prompt
stringlengths 15
655k
| completion
stringlengths 3
32.4k
| api
stringlengths 8
52
|
---|---|---|
import os,warnings
warnings.filterwarnings("ignore")
import numpy as np
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.ERROR)
import tensorflow.contrib.slim as slim
import scipy.io as sio
from sklearn.utils import shuffle
from util import gpusession,nzr,print_n_txt,remove_file_if_exists,create_gradient_clipping
import matplotlib.pyplot as plt
class cn_reg_class(object): # ChoiceNet
def __init__(self,_name='cn_reg',_x_dim=1,_y_dim=1,_h_dims=[64,64]
,_k_mix=5,_actv=tf.nn.relu,_bn=slim.batch_norm
,_rho_ref_train=0.95,_tau_inv=1e-2,_var_eps=1e-2
,_pi1_bias=0.0,_log_sigma_Z_val=0
,_kl_reg_coef=1e-5,_l2_reg_coef=1e-5
,_SCHEDULE_MDN_REG=False
,_GPU_ID=0,_VERBOSE=True):
self.name = _name
self.x_dim = _x_dim
self.y_dim = _y_dim
self.h_dims = _h_dims
self.k_mix = _k_mix
self.actv = _actv
self.bn = _bn # slim.batch_norm / None
self.rho_ref_train = _rho_ref_train # Rho for training
self.tau_inv = _tau_inv
self.var_eps = _var_eps # This will be used for the loss function (var+var_eps)
self.pi1_bias = _pi1_bias
self.log_sigma_Z_val = _log_sigma_Z_val
self.kl_reg_coef = _kl_reg_coef
self.l2_reg_coef = _l2_reg_coef # L2 regularizer
self.SCHEDULE_MDN_REG = _SCHEDULE_MDN_REG
self.GPU_ID = _GPU_ID
self.VERBOSE = _VERBOSE
if _GPU_ID < 0: # with CPU only (no GPU)
# Build model
self.build_model()
# Build graph
self.build_graph()
# Check params
self.check_params()
else: # with GPU
with tf.device('/device:GPU:%d' % (self.GPU_ID)):
# Build model
self.build_model()
# Build graph
self.build_graph()
# Check params
self.check_params()
# Build model
def build_model(self):
# Placeholders
self.x = tf.placeholder(dtype=tf.float32,shape=[None,self.x_dim],name='x') # Input [None x xdim]
self.t = tf.placeholder(dtype=tf.float32,shape=[None,self.y_dim],name='t') # Output [None x ydim]
self.kp = tf.placeholder(dtype=tf.float32,shape=[],name='kp') # Keep probability
self.lr = tf.placeholder(dtype=tf.float32,shape=[],name='lr') # Learning rate
self.is_training = tf.placeholder(dtype=tf.bool,shape=[]) # Training flag
self.rho_ref = tf.placeholder(dtype=tf.float32,shape=[],name='rho_ref') # Training flag
self.train_rate = tf.placeholder(dtype=tf.float32,shape=[],name='train_rate') # from 0.0~1.0
# Initializers
trni = tf.random_normal_initializer
tci = tf.constant_initializer
self.fully_init = trni(stddev=0.01)
self.bias_init = tci(0.)
self.bn_init = {'beta':tci(0.),'gamma':trni(1.,0.01)}
self.bn_params = {'is_training':self.is_training,'decay':0.9,'epsilon':1e-5,
'param_initializers':self.bn_init,'updates_collections':None}
# Build graph
with tf.variable_scope(self.name,reuse=False) as scope:
with slim.arg_scope([slim.fully_connected],activation_fn=self.actv,
weights_initializer=self.fully_init,biases_initializer=self.bias_init,
normalizer_fn=self.bn,normalizer_params=self.bn_params,
weights_regularizer=None):
_net = self.x # Now we have an input
self.N = tf.shape(self.x)[0] # Input dimension
for h_idx in range(len(self.h_dims)): # Loop over hidden layers
_hdim = self.h_dims[h_idx]
_net = slim.fully_connected(_net,_hdim,scope='lin'+str(h_idx))
_net = slim.dropout(_net,keep_prob=self.kp,is_training=self.is_training
,scope='dr'+str(h_idx))
self.feat = _net # Feature [N x Q]
self.Q = self.feat.get_shape().as_list()[1] # Feature dimension
# Feature to K rhos (NO ACTIVATION !!!)
_rho_raw = slim.fully_connected(self.feat,self.k_mix,activation_fn=None
,scope='rho_raw')
# self.rho_temp = tf.nn.tanh(_rho_raw) # [N x K] between -1.0~1.0 for regression
self.rho_temp = tf.nn.sigmoid(_rho_raw) # [N x K] between 0.0~1.0 for classification
# Maker sure the first mixture to have 'self.rho_ref' correlation
self.rho = tf.concat([self.rho_temp[:,0:1]*0.0+self.rho_ref,self.rho_temp[:,1:]]
,axis=1) # [N x K]
# Variabels for the sampler
self.muW = tf.get_variable(name='muW',shape=[self.Q,self.y_dim],
initializer=tf.random_normal_initializer(stddev=0.1),
dtype=tf.float32) # [Q x D]
self.logSigmaW = tf.get_variable(name='logSigmaW'
,shape=[self.Q,self.y_dim]
,initializer=tf.constant_initializer(-2.0)
,dtype=tf.float32) # [Q x D]
self.muZ = tf.constant(np.zeros((self.Q,self.y_dim))
,name='muZ',dtype=tf.float32) # [Q x D]
self.logSigmaZ = tf.constant(self.log_sigma_Z_val*np.ones((self.Q,self.y_dim))
,name='logSigmaZ',dtype=tf.float32) # [Q x D]
# Reparametrization track (THIS PART IS COMPLICATED, I KNOW)
_muW_tile = tf.tile(self.muW[tf.newaxis,:,:]
,multiples=[self.N,1,1]) # [N x Q x D]
_sigmaW_tile = tf.exp(tf.tile(self.logSigmaW[tf.newaxis,:,:]
,multiples=[self.N,1,1])) # [N x Q x D]
_muZ_tile = tf.tile(self.muZ[tf.newaxis,:,:]
,multiples=[self.N,1,1]) # [N x Q x D]
_sigmaZ_tile = tf.exp(tf.tile(self.logSigmaZ[tf.newaxis,:,:]
,multiples=[self.N,1,1])) # [N x Q x D]
_samplerList = []
for jIdx in range(self.k_mix): # For all K mixtures
_rho_j = self.rho[:,jIdx:jIdx+1] # [N x 1]
_rho_tile = tf.tile(_rho_j[:,:,tf.newaxis]
,multiples=[1,self.Q,self.y_dim]) # [N x Q x D]
_epsW = tf.random_normal(shape=[self.N,self.Q,self.y_dim],mean=0,stddev=1
,dtype=tf.float32) # [N x Q x D]
_W = _muW_tile + tf.sqrt(_sigmaW_tile)*_epsW # [N x Q x D]
_epsZ = tf.random_normal(shape=[self.N,self.Q,self.y_dim]
,mean=0,stddev=1,dtype=tf.float32) # [N x Q x D]
_Z = _muZ_tile + tf.sqrt(_sigmaZ_tile)*_epsZ # [N x Q x D]
_Y = _rho_tile*_muW_tile + (1.0-_rho_tile**2) \
*(_rho_tile*tf.sqrt(_sigmaZ_tile)/tf.sqrt(_sigmaW_tile) \
*(_W-_muW_tile)+tf.sqrt(1-_rho_tile**2)*_Z)
_samplerList.append(_Y) # Append
WlistConcat = tf.convert_to_tensor(_samplerList) # K*[N x Q x D] => [K x N x Q x D]
self.wSample = tf.transpose(WlistConcat,perm=[1,3,0,2]) # [N x D x K x Q]
# K mean mixtures [N x D x K]
_wTemp = tf.reshape(self.wSample
,shape=[self.N,self.k_mix*self.y_dim,self.Q]) # [N x KD x Q]
_featRsh = tf.reshape(self.feat,shape=[self.N,self.Q,1]) # [N x Q x 1]
_mu = tf.matmul(_wTemp,_featRsh) # [N x KD x Q] x [N x Q x 1] => [N x KD x 1]
self.mu = tf.reshape(_mu,shape=[self.N,self.y_dim,self.k_mix]) # [N x D x K]
# K variance mixtures [N x D x K]
_logvar_raw = slim.fully_connected(self.feat,self.y_dim,scope='var_raw') # [N x D]
_var_raw = tf.exp(_logvar_raw) # [N x D]
_var_tile = tf.tile(_var_raw[:,:,tf.newaxis]
,multiples=[1,1,self.k_mix]) # [N x D x K]
_rho_tile = tf.tile(self.rho[:,tf.newaxis,:]
,multiples=[1,self.y_dim,1]) # [N x D x K]
_tau_inv = self.tau_inv
self.var = (1.0-_rho_tile**2)*_var_tile + _tau_inv # [N x D x K]
# Weight allocation probability pi [N x K]
_pi_logits = slim.fully_connected(self.feat,self.k_mix
,scope='pi_logits') # [N x K]
self.pi_temp = tf.nn.softmax(_pi_logits,dim=1) # [N x K]
# Some heuristics to ensure that pi_1(x) is high enough
if self.pi1_bias != 0:
self.pi_temp = tf.concat([self.pi_temp[:,0:1]+self.pi1_bias
,self.pi_temp[:,1:]],axis=1) # [N x K]
self.pi = tf.nn.softmax(self.pi_temp,dim=1) # [N x K]
else: self.pi = self.pi_temp # [N x K]
# Build graph
def build_graph(self):
# Parse
_M = tf.shape(self.x)[0] # Current batch size
t,pi,mu,var = self.t,self.pi,self.mu,self.var
# Mixture density network loss
trepeat = tf.tile(t[:,:,tf.newaxis],[1,1,self.k_mix]) # (N x D x K)
self.quadratics = -0.5*tf.reduce_sum(((trepeat-mu)**2)/(var+self.var_eps),axis=1) # (N x K)
self.logdet = -0.5*tf.reduce_sum(tf.log(var+self.var_eps),axis=1) # (N x K)
self.logconstant = - 0.5*self.y_dim*tf.log(2*np.pi) # (1)
self.logpi = tf.log(pi) # (N x K)
self.exponents = self.quadratics + self.logdet + self.logpi # + self.logconstant
self.logprobs = tf.reduce_logsumexp(self.exponents,axis=1) # (N)
self.gmm_prob = tf.exp(self.logprobs) # (N)
self.gmm_nll = -tf.reduce_mean(self.logprobs) # (1)
# Regression loss
maxIdx = tf.argmax(input=pi,axis=1, output_type=tf.int32) # Argmax Index [N]
maxIdx = 0*tf.ones_like(maxIdx)
coords = tf.stack([tf.transpose(gv) for gv in tf.meshgrid(tf.range(self.N),tf.range(self.y_dim))] +
[tf.reshape(tf.tile(maxIdx[:,tf.newaxis],[1,self.y_dim]),shape=(self.N,self.y_dim))]
,axis=2) # [N x D x 3]
self.mu_bar = tf.gather_nd(mu,coords) # [N x D]
fit_mse_coef = 1e-2
self.fit_mse = fit_mse_coef*tf.maximum((1.0-2.0*self.train_rate),0.0) \
*tf.reduce_sum(tf.pow(self.mu_bar-self.t,2))/(tf.cast(self.N,tf.float32)) # (1)
# KL-divergence
_eps = 1e-2
self.rho_pos = self.rho+1.0 # Make it positive
self._kl_reg = self.kl_reg_coef*tf.reduce_sum(-self.rho_pos
*(tf.log(self.pi+_eps)-tf.log(self.rho_pos+_eps)),axis=1) # (N)
self.kl_reg = tf.reduce_mean(self._kl_reg) # (1)
# Weight decay
_g_vars = tf.trainable_variables()
self.c_vars = [var for var in _g_vars if '%s/'%(self.name) in var.name]
self.l2_reg = self.l2_reg_coef*tf.reduce_sum(tf.stack([tf.nn.l2_loss(v) for v in self.c_vars])) # [1]
# Schedule MDN loss and regression loss
if self.SCHEDULE_MDN_REG:
self.gmm_nll = tf.minimum((2.0*self.train_rate+0.1),1.0)*self.gmm_nll
self.fit_mse = tf.maximum((1.0-2.0*self.train_rate),0.0)*self.fit_mse
self.loss_total = self.gmm_nll+self.kl_reg+self.l2_reg+self.fit_mse # [1]
else:
self.gmm_nll = self.gmm_nll
self.fit_mse = tf.constant(0.0)
self.loss_total = self.gmm_nll+self.kl_reg+self.l2_reg
# Optimizer
USE_ADAM = True
GRAD_CLIP = True
if GRAD_CLIP: # Gradient clipping
if USE_ADAM:
_optm = tf.train.AdamOptimizer(learning_rate=self.lr
,beta1=0.9,beta2=0.999,epsilon=1e-1) # 1e-4
else:
_optm = tf.train.MomentumOptimizer(learning_rate=self.lr,momentum=0.0)
self.optm = create_gradient_clipping(self.loss_total
,_optm,tf.trainable_variables(),clipVal=1.0)
else:
if USE_ADAM:
self.optm = tf.train.AdamOptimizer(learning_rate=self.lr
,beta1=0.9,beta2=0.999,epsilon=1e-1).minimize(self.loss_total)
else:
self.optm = tf.train.MomentumOptimizer(learning_rate=self.lr
,momentum=0.0).minimize(self.loss_total)
# Check parameters
def check_params(self):
_g_vars = tf.global_variables()
self.g_vars = [var for var in _g_vars if '%s/'%(self.name) in var.name]
if self.VERBOSE:
print ("==== Global Variables ====")
for i in range(len(self.g_vars)):
w_name = self.g_vars[i].name
w_shape = self.g_vars[i].get_shape().as_list()
if self.VERBOSE:
print (" [%02d] Name:[%s] Shape:[%s]" % (i,w_name,w_shape))
# Sampler
def sampler(self,_sess,_x,n_samples=1,_deterministic=True):
pi, mu, var = _sess.run([self.pi, self.mu, self.var],
feed_dict={self.x:_x,self.kp:1.0,self.is_training:False
,self.rho_ref:1.0}) #
n_points = _x.shape[0]
_y_sampled = np.zeros([n_points,self.y_dim,n_samples])
for i in range(n_points):
for j in range(n_samples):
if _deterministic:
k = 0
else:
k = np.random.choice(self.k_mix,p=pi[i,:])
_y_sampled[i,:,j] = mu[i,:,k] # + np.random.randn(1,self.y_dim)*np.sqrt(var[i,:,k])
return _y_sampled
def save2npz(self,_sess,_save_name=None):
""" Save name """
if _save_name==None:
_save_name='net/net_%s.npz'%(self.name)
""" Get global variables """
self.g_wnames,self.g_wvals,self.g_wshapes = [],[],[]
for i in range(len(self.g_vars)):
curr_wname = self.g_vars[i].name
curr_wvar = [v for v in tf.global_variables() if v.name==curr_wname][0]
curr_wval = _sess.run(curr_wvar)
curr_wval_sqz = curr_wval.squeeze()
self.g_wnames.append(curr_wname)
self.g_wvals.append(curr_wval_sqz)
self.g_wshapes.append(curr_wval.shape)
""" Save """
np.savez(_save_name,g_wnames=self.g_wnames,g_wvals=self.g_wvals,g_wshapes=self.g_wshapes)
if self.VERBOSE:
print ("[%s] saved. Size is [%.4f]MB" %
(_save_name,os.path.getsize(_save_name)/1000./1000.))
def restore_from_npz(self,_sess,_loadname=None):
if _loadname==None:
_loadname='net/net_%s_final.npz'%(self.name)
l = np.load(_loadname)
g_wnames = l['g_wnames']
g_wvals = l['g_wvals']
g_wshapes = l['g_wshapes']
for widx,wname in enumerate(g_wnames):
curr_wvar = [v for v in tf.global_variables() if v.name==wname][0]
_sess.run(tf.assign(curr_wvar,g_wvals[widx].reshape(g_wshapes[widx])))
if self.VERBOSE:
print ("Weight restored from [%s] Size is [%.4f]MB" %
(_loadname,os.path.getsize(_loadname)/1000./1000.))
def save2mat_from_npz(self,_x_train='',_y_train='',_save_name=None,_npz_path=None):
# Save weights to mat file so that MATLAB can use it.
if _npz_path == None:
_npz_path = 'net/net_%s.npz'%(self.name)
l = np.load(_npz_path)
g_wnames = l['g_wnames']
g_wvals = l['g_wvals']
g_wshapes = l['g_wshapes']
D = {}
for w_idx,w_name in enumerate(g_wnames):
curr_name = w_name.replace(':0','')
curr_name = curr_name.replace(self.name+'/','')
curr_name = curr_name.replace('/','_')
curr_val = g_wvals[w_idx].reshape(g_wshapes[w_idx])
D[curr_name] = curr_val
# Save train data
if _x_train!='': D['x_train'] = _x_train
if _y_train!='': D['y_train'] = _y_train
# Save dictionary D to the mat file
if _save_name == None:
_save_name = 'net/net_%s.mat'%(self.name)
sio.savemat(_save_name,D)
if self.VERBOSE:
print ("[%s] saved. Size is [%.4f]MB" %
(_save_name,os.path.getsize(_save_name)/1000./1000.))
# Train
def train(self,_sess,_x_train,_y_train,_lr=1e-3,_batch_size=512,_max_epoch=1e4,_kp=1.0
,_LR_SCHEDULE=True,_PRINT_EVERY=20,_PLOT_EVERY=20
,_SAVE_TXT=True,_SAVE_BEST_NET=True,_SAVE_FINAL=True,_REMOVE_PREVS=True
,_x_dim4plot=0,_x_name4plot=None):
self.x_dim4plot = _x_dim4plot
self.x_name4plot = _x_name4plot
# Remove existing files
if _REMOVE_PREVS:
remove_file_if_exists('net/net_%s_best.npz'%(self.name),_VERBOSE=self.VERBOSE)
remove_file_if_exists('net/net_%s_best.mat'%(self.name),_VERBOSE=self.VERBOSE)
remove_file_if_exists('net/net_%s_final.npz'%(self.name),_VERBOSE=self.VERBOSE)
remove_file_if_exists('net/net_%s_final.mat'%(self.name),_VERBOSE=self.VERBOSE)
remove_file_if_exists('res/res_%s.txt'%(self.name),_VERBOSE=self.VERBOSE)
# Reference training data
x_train,y_train = _x_train,_y_train
if len(np.shape(y_train)) == 1: # if y is a vector
y_train = np.reshape(y_train,newshape=[-1,1]) # make it rank two
self.nzr_x,self.nzr_y = nzr(x_train),nzr(y_train) # get normalizer
# Iterate
if _PRINT_EVERY == 0: print_period = 0
else: print_period = _max_epoch//_PRINT_EVERY
if _PLOT_EVERY == 0: plot_period = 0
else: plot_period = _max_epoch//_PLOT_EVERY
max_iter = max(x_train.shape[0]//_batch_size, 1)
best_loss_val = np.inf
if _SAVE_TXT:
txt_name = ('res/res_%s.txt'%(self.name));f = open(txt_name,'w') # Open txt file
print_n_txt(_f=f,_chars='Text name: '+txt_name,_DO_PRINT=self.VERBOSE)
for epoch in range((int)(_max_epoch)+1): # For every epoch
train_rate = (float)(epoch/_max_epoch)
x_train,y_train = shuffle(x_train,y_train)
nzd_x_train,nzd_y_train = self.nzr_x.get_nzdval(x_train),self.nzr_y.get_nzdval(y_train)
for iter in range(max_iter): # For every iteration
start,end = iter*_batch_size,(iter+1)*_batch_size
if _LR_SCHEDULE:
if epoch < 0.5*_max_epoch:
lr_use = _lr
elif epoch < 0.75*_max_epoch:
lr_use = _lr/5.
else:
lr_use = _lr/10.
else:
lr_use = _lr
feeds = {self.x:nzd_x_train[start:end,:],self.t:nzd_y_train[start:end,:]
,self.kp:_kp,self.lr:lr_use,self.train_rate:(float)(epoch/_max_epoch)
,self.rho_ref:self.rho_ref_train,self.is_training:True}
# Optimize
_sess.run(self.optm,feeds)
# Track the Best result
BEST_FLAG = False
check_period = _max_epoch//100
if (epoch%check_period)==0:
feeds = {self.x:nzd_x_train,self.t:nzd_y_train,self.kp:1.0,self.train_rate:train_rate
,self.rho_ref:self.rho_ref_train,self.is_training:False}
opers = [self.loss_total,self.gmm_nll,self.kl_reg,self.l2_reg,self.fit_mse]
loss_val,gmm_nll,kl_reg,l2_reg,fit_mse = _sess.run(opers,feeds)
if (loss_val < best_loss_val) & (train_rate >= 0.5):
best_loss_val = loss_val
BEST_FLAG = True
if _SAVE_BEST_NET:
if self.VERBOSE:
print ("Epoch:[%d] saving current network (best loss:[%.3f])"%(epoch,best_loss_val))
self.save2npz(_sess,'net/net_%s_best.npz'%(self.name)) # Save the current best model
self.save2mat_from_npz(_x_train=x_train,_y_train=y_train,
_save_name='net/net_%s_best.mat'%(self.name),
_npz_path='net/net_%s_best.npz'%(self.name))
# Print current result
if (print_period!=0) and ((epoch%print_period)==0 or (epoch==(_max_epoch-1))): # Print
# Feed total dataset
feeds = {self.x:nzd_x_train,self.t:nzd_y_train,self.kp:1.0,self.train_rate:(float)(epoch/_max_epoch)
,self.rho_ref:self.rho_ref_train,self.is_training:False}
opers = [self.loss_total,self.gmm_nll,self.kl_reg,self.l2_reg,self.fit_mse]
loss_val,gmm_nll,kl_reg,l2_reg,fit_mse = _sess.run(opers,feeds)
if _SAVE_TXT:
strTemp = ("[%d/%d] loss:%.3f(gmm:%.3f+kl:%.3f+l2:%.3f+fit:%.3f) bestLoss:%.3f"
%(epoch,_max_epoch,loss_val,gmm_nll,kl_reg,l2_reg,fit_mse,best_loss_val))
print_n_txt(_f=f,_chars=strTemp,_DO_PRINT=self.VERBOSE)
else:
if self.VERBOSE:
print ("[%d/%d] loss:%.3f(gmm:%.3f+kl:%.3f+l2:%.3f+fit:%.3f) bestLoss:%.3f"
%(epoch,_max_epoch,loss_val,gmm_nll,kl_reg,l2_reg,fit_mse,best_loss_val))
# Plot current result
if (plot_period!=0) and ((epoch%plot_period)==0 or (epoch==(_max_epoch-1))): # Plot
# Get loss values
feeds = {self.x:nzd_x_train,self.t:nzd_y_train,self.kp:1.0,self.train_rate:(float)(epoch/_max_epoch)
,self.rho_ref:self.rho_ref_train,self.is_training:False}
opers = [self.loss_total,self.gmm_nll,self.kl_reg,self.l2_reg,self.fit_mse]
loss_val,gmm_nll,kl_reg,l2_reg,fit_mse = _sess.run(opers,feeds)
# Sampling
n_sample = 1
nzd_y_pred = self.sampler(_sess=_sess,_x=nzd_x_train,n_samples=n_sample)
# Plot first dimensions of both input and output
x_plot,y_plot = x_train[:,self.x_dim4plot],y_train[:,0] # Traning data
plt.figure(figsize=(8,4))
plt.axis([np.min(x_plot),np.max(x_plot),np.min(y_plot)-0.1,np.max(y_plot)+0.1])
h_tr,=plt.plot(x_plot,y_plot,'k.') # Plot training data
for i in range(n_sample):
ith_nzd_y_pred = nzd_y_pred[:,0:1,i]
h_pr,=plt.plot(x_plot,self.nzr_y.get_orgval(ith_nzd_y_pred),'b.') # Plot prediction
plt.title("[%d/%d] name:[%s] loss_val:[%.3e]"%(epoch,_max_epoch,self.name,loss_val))
plt.legend([h_tr,h_pr],['Train data','Predictions'],fontsize=13,loc='upper left')
if self.x_name4plot != None:
plt.xlabel(self.x_name4plot,fontsize=13)
plt.show()
# Save final weights
if _SAVE_FINAL:
self.save2npz(_sess,'net/net_%s_final.npz'%(self.name)) # Save the current best model
self.save2mat_from_npz(_x_train=x_train,_y_train=y_train,
_save_name='net/net_%s_final.mat'%(self.name),
_npz_path='net/net_%s_final.npz'%(self.name))
# Test
def test(self,_sess,_x_train,_y_train,_x_test=None,_y_test=None,
_title_str4data=None,_title_str4test=None,
_PLOT_TRAIN=False,_PLOT_TEST=False,_SAVE_FIG=False,
_x_dim4plot=0,_x_name4plot=None):
self.x_dim4plot = _x_dim4plot
self.x_name4plot = _x_name4plot
# Get normalizer
if len(np.shape(_y_train)) == 1: # if y is a vector
_y_train = np.reshape(_y_train,newshape=[-1,1]) # make it rank two
self.nzr_x,self.nzr_y = nzr(_x_train),nzr(_y_train) # get normalizer
# Plot train data and predictions
if _PLOT_TRAIN:
if len(np.shape(_y_train)) == 1: # if y is a vector
_y_train = np.reshape(_y_train,newshape=[-1,1]) # make it rank two
x_train4plot,y_train4plot = _x_train[:,self.x_dim4plot],_y_train[:,0] # traning data
nzd_y_pred = self.sampler(_sess=_sess,_x=self.nzr_x.get_nzdval(_x_train))
y_pred_train = self.nzr_y.get_orgval(nzd_y_pred)[:,0]
plt.figure(figsize=(8,4))
plt.axis([np.min(x_train4plot),np.max(x_train4plot),np.min(y_train4plot)-0.1,np.max(y_train4plot)+0.1])
h_tr,=plt.plot(x_train4plot,y_train4plot,'k.') # plot train data
h_pr,=plt.plot(x_train4plot,y_pred_train,'b.') # plot prediction for train data
plt.legend([h_tr,h_pr],['Train data','Train predictions'],fontsize=13,loc='upper left')
if self.x_name4plot != None:
plt.xlabel(self.x_name4plot,fontsize=13)
plt.ylabel('Output',fontsize=13)
if _title_str4data != None:
plt.title(_title_str4data,fontsize=15);
if _SAVE_FIG:
plt.savefig('fig/fig_%s_data.png'%(self.name))
plt.show()
# Plot test data and predictions
if len(np.shape(_y_train)) == 1: # if y is a vector
_y_train = np.reshape(_y_train,newshape=[-1,1]) # make it rank two
if len(np.shape(_y_test)) == 1: # if y is a vector
_y_test = np.reshape(_y_test,newshape=[-1,1]) # make it rank two
x_data4plot,y_data4plot = _x_train[:,self.x_dim4plot],_y_train[:,0] # traning data
x_test4plot,y_test4plot = _x_test[:,self.x_dim4plot],_y_test[:,0] # test data
nzd_y_pred = self.sampler(_sess=_sess,_x=self.nzr_x.get_nzdval(_x_test))
y_pred_test = self.nzr_y.get_orgval(nzd_y_pred[:,0:1,0])
if _PLOT_TEST:
fig = plt.figure(figsize=(8,4))
plt.axis([np.min(x_data4plot),np.max(x_data4plot),np.min(y_data4plot)-0.1,
|
np.max(y_data4plot)
|
numpy.max
|
import numpy as np
import nnabla as nn
import nnabla.functions as F
import nnabla.parametric_functions as PF
import nnabla.solvers as S
import nnabla.random as random
import math
import argparse
import gym
from nnabla.ext_utils import get_extension_context
from nnabla.parameter import get_parameter_or_create
from nnabla.initializer import ConstantInitializer
from common.buffer import ReplayBuffer
from common.log import prepare_monitor
from common.experiment import evaluate, train
from common.exploration import EmptyNoise
from sac import q_network, policy_network
from sac import _squash_action
class SAC:
def __init__(self,
obs_shape,
action_size,
batch_size,
critic_lr,
actor_lr,
temp_lr,
tau,
gamma):
self.obs_shape = obs_shape
self.action_size = action_size
self.batch_size = batch_size
self.critic_lr = critic_lr
self.actor_lr = actor_lr
self.temp_lr = temp_lr
self.gamma = gamma
self.tau = tau
self._build()
def _build(self):
# inference graph
self.infer_obs_t = nn.Variable((1,) + self.obs_shape)
with nn.parameter_scope('trainable'):
infer_dist = policy_network(self.infer_obs_t, self.action_size,
'actor')
self.infer_act_t, _ = _squash_action(infer_dist)
self.deterministic_act_t = infer_dist.mean()
# training graph
self.obss_t = nn.Variable((self.batch_size,) + self.obs_shape)
self.acts_t = nn.Variable((self.batch_size, self.action_size))
self.rews_tp1 = nn.Variable((self.batch_size, 1))
self.obss_tp1 = nn.Variable((self.batch_size,) + self.obs_shape)
self.ters_tp1 = nn.Variable((self.batch_size, 1))
with nn.parameter_scope('trainable'):
self.log_temp = get_parameter_or_create('temp', [1, 1],
ConstantInitializer(0.0))
dist_t = policy_network(self.obss_t, self.action_size, 'actor')
dist_tp1 = policy_network(self.obss_tp1, self.action_size, 'actor')
squashed_act_t, log_prob_t = _squash_action(dist_t)
squashed_act_tp1, log_prob_tp1 = _squash_action(dist_tp1)
q1_t = q_network(self.obss_t, self.acts_t, 'critic/1')
q2_t = q_network(self.obss_t, self.acts_t, 'critic/2')
q1_t_with_actor = q_network(self.obss_t, squashed_act_t, 'critic/1')
q2_t_with_actor = q_network(self.obss_t, squashed_act_t, 'critic/2')
with nn.parameter_scope('target'):
q1_tp1 = q_network(self.obss_tp1, squashed_act_tp1, 'critic/1')
q2_tp1 = q_network(self.obss_tp1, squashed_act_tp1, 'critic/2')
# q function loss
q_tp1 = F.minimum2(q1_tp1, q2_tp1)
entropy_tp1 = F.exp(self.log_temp) * log_prob_tp1
mask = (1.0 - self.ters_tp1)
q_target = self.rews_tp1 + self.gamma * (q_tp1 - entropy_tp1) * mask
q_target.need_grad = False
q1_loss = 0.5 * F.mean(F.squared_error(q1_t, q_target))
q2_loss = 0.5 * F.mean(F.squared_error(q2_t, q_target))
self.critic_loss = q1_loss + q2_loss
# policy function loss
q_t = F.minimum2(q1_t_with_actor, q2_t_with_actor)
entropy_t = F.exp(self.log_temp) * log_prob_t
self.actor_loss = F.mean(entropy_t - q_t)
# temperature loss
temp_target = log_prob_t - self.action_size
temp_target.need_grad = False
self.temp_loss = -F.mean(F.exp(self.log_temp) * temp_target)
# trainable parameters
with nn.parameter_scope('trainable'):
with nn.parameter_scope('critic'):
critic_params = nn.get_parameters()
with nn.parameter_scope('actor'):
actor_params = nn.get_parameters()
# target parameters
with nn.parameter_scope('target/critic'):
target_params = nn.get_parameters()
# target update
update_targets = []
sync_targets = []
for key, src in critic_params.items():
dst = target_params[key]
updated_dst = (1.0 - self.tau) * dst + self.tau * src
update_targets.append(F.assign(dst, updated_dst))
sync_targets.append(F.assign(dst, src))
self.update_target_expr = F.sink(*update_targets)
self.sync_target_expr = F.sink(*sync_targets)
# setup solvers
self.critic_solver = S.Adam(self.critic_lr)
self.critic_solver.set_parameters(critic_params)
self.actor_solver = S.Adam(self.actor_lr)
self.actor_solver.set_parameters(actor_params)
self.temp_solver = S.Adam(self.temp_lr)
self.temp_solver.set_parameters({'temp': self.log_temp})
def infer(self, obs_t):
self.infer_obs_t.d = np.array([obs_t])
self.infer_act_t.forward(clear_buffer=True)
return np.clip(self.infer_act_t.d[0], -1.0, 1.0)
def evaluate(self, obs_t):
self.infer_obs_t.d =
|
np.array([obs_t])
|
numpy.array
|
from pystan import StanModel
import statistics
import numpy as np
import os
#load and compile model (may take a few minutes)
sm = StanModel(file="xy_model.stan")
#set out file
filename = "energy_data.csv"
#write file header if necessary
if os.path.exists(filename):
pass
else:
with open(filename, 'w') as f:
f.write("temp,dim_x,dim_y,energy,energy_var,c,vortex_density,rhat,n_eff\n")
#basic parameters
chains = 4
vorticity_samples = 1000
iterations = 5000
dim_x = dim_y = 4
#create the initial state for the lowest temperature calculation
unit_vect_array =
|
np.zeros((dim_x,dim_y,2),dtype='float')
|
numpy.zeros
|
import argparse
import os
import time
import h5py
import numpy as np
import pymesh
import trimesh
from joblib import Parallel, delayed
from scipy.interpolate import RegularGridInterpolator
import create_file_lst
CUR_PATH = os.path.dirname(os.path.realpath(__file__))
parser = argparse.ArgumentParser()
parser.add_argument('--thread_num', type=int, default='9', help='how many objs are creating at the same time')
parser.add_argument('--category', type=str, default="all", help='Which single class to generate on [default: all, can '
'be chair or plane, etc.]')
FLAGS = parser.parse_args()
def get_sdf_value(sdf_pt, sdf_params_ph, sdf_ph, sdf_res):
x = np.linspace(sdf_params_ph[0], sdf_params_ph[3], num=sdf_res + 1)
y = np.linspace(sdf_params_ph[1], sdf_params_ph[4], num=sdf_res + 1)
z = np.linspace(sdf_params_ph[2], sdf_params_ph[5], num=sdf_res + 1)
my_interpolating_function = RegularGridInterpolator((z, y, x), sdf_ph)
sdf_value = my_interpolating_function(sdf_pt)
print("sdf_value:", sdf_value.shape)
return np.expand_dims(sdf_value, axis=1)
def get_sdf(sdf_file, sdf_res):
intsize = 4
floatsize = 8
sdf = {
"param": [],
"value": []
}
with open(sdf_file, "rb") as f:
try:
bytes = f.read()
ress = np.frombuffer(bytes[:intsize * 3], dtype=np.int32)
if -1 * ress[0] != sdf_res or ress[1] != sdf_res or ress[2] != sdf_res:
raise Exception(sdf_file, "res not consistent with ", str(sdf_res))
positions = np.frombuffer(bytes[intsize * 3:intsize * 3 + floatsize * 6], dtype=np.float64)
# bottom left corner, x,y,z and top right corner, x, y, z
sdf["param"] = [positions[0], positions[1], positions[2], positions[3], positions[4], positions[5]]
sdf["param"] = np.float32(sdf["param"])
sdf["value"] = np.frombuffer(bytes[intsize * 3 + floatsize * 6:], dtype=np.float32)
sdf["value"] = np.reshape(sdf["value"], (sdf_res + 1, sdf_res + 1, sdf_res + 1))
finally:
f.close()
return sdf
def get_offset_ball(num, bandwidth):
u = np.random.normal(0, 1, size=(num, 1))
v = np.random.normal(0, 1, size=(num, 1))
w = np.random.normal(0, 1, size=(num, 1))
r = np.random.uniform(0, 1, size=(num, 1)) ** (1. / 3) * bandwidth
norm = np.linalg.norm(np.concatenate([u, v, w], axis=1), axis=1, keepdims=1)
# print("u.shape",u.shape)
# print("norm.shape",norm.shape)
# print("r.shape",r.shape)
(x, y, z) = r * (u, v, w) / norm
return np.concatenate([x, y, z], axis=1)
def get_offset_cube(num, bandwidth):
u = np.random.normal(0, 1, size=(num, 1))
v =
|
np.random.normal(0, 1, size=(num, 1))
|
numpy.random.normal
|
import numpy as np
import os
from notears import utils, linear, noleaks
def read_tsv(filename, priv_size, pub_size):
with open(filename) as f:
headers = [h[1:-1] for h in f.readline().strip().split('\t')]
discrete2num = {h:{} for h in headers}
discrete2idx = {h:0 for h in headers}
frequencies = {h:{} for h in headers}
lines = f.readlines()
X = []
for line in lines:
point = line.strip().split('\t')
num_point = []
for idx, val in enumerate(point):
col = headers[idx]
if val not in discrete2num[col]:
mapping = np.ceil(discrete2idx[col] / 2)
if discrete2idx[col] % 2 == 0:
mapping *= -1
discrete2num[col][val] = discrete2idx[col]
discrete2idx[col] += 1
num_val = discrete2num[col][val]
if num_val not in frequencies[col]:
frequencies[col][num_val] = 1
else:
frequencies[col][num_val] += 1
num_point.append(num_val)
X.append(num_point)
X = np.array(X, dtype=np.float)
X = discrete2continuous(X, headers, frequencies) * 20
X = X - np.mean(X, axis=0, keepdims=True)
priv_X = X[:priv_size]
if pub_size != 0: pub_X = X[priv_size:priv_size+pub_size]
else: pub_X = None
return priv_X, pub_X, headers
def discrete2continuous(X, headers, frequencies):
data_size = X.shape[0]
models = {h:create_model(frequencies[h]) for h in headers}
for row_idx in range(data_size):
for col_idx, h in enumerate(headers):
val = models[h](X[row_idx][col_idx])
X[row_idx][col_idx] = val
return X
def normal(x, mu, sig):
return 1. / (np.sqrt(2 * np.pi) * sig) * np.exp(-0.5 *
|
np.square(x - mu)
|
numpy.square
|
"""
Use info from TEF sections to calculate values for the
Geyer and MacCready 2014 parameter space diagram.
"""
# imports
import matplotlib.pyplot as plt
import numpy as np
import pickle
import pandas as pd
from datetime import datetime, timedelta
import netCDF4 as nc
import os; import sys
sys.path.append(os.path.abspath('../alpha'))
import Lfun
import zrfun
import zfun
import tef_fun
import flux_fun
from time import time
from warnings import filterwarnings
filterwarnings('ignore') # skip some warning messages
# associated with lines like QQp[QQ<=0] = np.nan
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-g', '--gridname', type=str, default='cas6')
parser.add_argument('-t', '--tag', type=str, default='v3')
parser.add_argument('-x', '--ex_name', type=str, default='lo8b')
parser.add_argument('-y', '--year', type=int, default=2017)
args = parser.parse_args()
year_str = str(args.year)
# Get Ldir
Ldir = Lfun.Lstart(args.gridname, args.tag)
gtagex = args.gridname + '_' + args.tag + '_' + args.ex_name
# select input/output location
run_name = gtagex+'_'+year_str+'.01.01_'+year_str+'.12.31'
indir00 = Ldir['LOo'] + 'tef2/'
indir0 = indir00 + run_name + '/'
indir = indir0 + 'flux/'
x_indir = indir0 + 'extractions/'
outdir = indir00 + 'sill_dyn_plots/'
Lfun.make_dir(outdir)
# get section definitions
sect_df = tef_fun.get_sect_df()
testing = False
if testing:
sect_list = ['ai1']
else:
sect_list = list(sect_df.index)
# initialize DataFrame
q_df = pd.DataFrame(index=sect_list,
columns=['Ut', 'Qprism','H', 'Qr','Ur', 'M', 'Fr', 'Qe', 'Ue', 'DS', 'Sbar', 'c', 'Ue_non', 'DS_non'])
# get Socn
jdf1_tef_df, jdf1_in_sign = flux_fun.get_fluxes(indir0, 'jdf1')
Socn = jdf1_tef_df['Sin'].max()
print('Socn = %0.2f' % (Socn))
# constants
Cd = 2.5e-3
om = 1.4e-4
beta = 7.7e-4
g = 9.8
for sect_name in sect_list:
tef_df, in_sign = flux_fun.get_fluxes(indir0, sect_name)
Qe = (tef_df['Qin'] - tef_df['Qout']).mean()/2
DS = (tef_df['Sin'] - tef_df['Sout']).mean()
Sbar = (tef_df['Sin'] + tef_df['Sout']).mean()/2
if False:
if sect_name in ['jdf1', 'sog5']:
# check out mean net salt flux through a couple of sections
print(sect_name)
print((tef_df['Qin']*tef_df['Sin']).mean()
+ (tef_df['Qout']*tef_df['Sout']).mean())
ds = nc.Dataset(x_indir + sect_name + '.nc')
H = ds['h'][:].max() # max depth [m]
A = ds['DA0'][:].sum() # cross-sectional area [m2]
N0 = np.sqrt(beta*g*Socn/H)
c = np.sqrt(beta*g*Socn*H)
Qprism = tef_df['Qtide'].mean() / 2
Ut = (np.pi/2) * tef_df['Qtide'].mean() / A
# use Freshwater Flux as an alternate way to calculate Qr and Ur
Qr = -( tef_df['Qin']*(Socn-tef_df['Sin']) + tef_df['Qout']*(Socn-tef_df['Sout']) ).mean()/Socn
do_sect = True
if Qr < 1:
print('Dropping Section: Qr negative for ' + sect_name)
do_sect = False
q_df = q_df.drop(sect_name)
if do_sect:
q_df.loc[sect_name,'Qprism'] = Qprism
Ur = np.abs(Qr/A)
if Qe < 0:
print('Qe negative for ' + sect_name)
Ue = np.abs(Qe/A) # should we use A/2?
q_df.loc[sect_name,'H'] = H
q_df.loc[sect_name,'Ut'] = Ut
q_df.loc[sect_name,'Qe'] = Qe
q_df.loc[sect_name,'Qr'] = Qr
q_df.loc[sect_name,'Ur'] = Ur
q_df.loc[sect_name,'Ue'] = Ue
q_df.loc[sect_name,'DS'] = DS
q_df.loc[sect_name,'Sbar'] = Sbar
# derived quantities
M2 = (Cd*Ut*Ut)/(om*N0*H*H)
M =
|
np.sqrt(M2)
|
numpy.sqrt
|
import numpy as np
import sys
import os
import h5py
import pandas as pd
from constants import *
from scipy.spatial import cKDTree
import time
import copy
class Snapshots:
def __init__(self, path, snaplist, partType=1, useIDs=True, conversions=[1, 1, 1, 1], softeningLength = 0.002, bigFile=False, physical_units=False):
self.snapshot = {}
for i in snaplist:
self.snapshot[i] = Snapshot(path, i, partType=partType, useIDs=useIDs, conversions=conversions, softeningLength=softeningLength, bigFile=bigFile,
physical_units=physical_units)
class Snapshot:
def __init__(self, path, nsnap, partType=1, useIDs=True, conversions=[1, 1, 1, 1], softeningLength = 0.002, bigFile=False, physical_units=False, read_only_coords=False):
self.lengte_to_Mpc = conversions[0]
self.snelheid_to_kms = conversions[1]
self.dichtheid_to_Mpc3 = conversions[2]
self.massa_to_10msun = conversions[3]
self.softeningLength = softeningLength
self.bigFile=bigFile
self.physical_units = physical_units
self.snappath = path
self.nsnap = nsnap
self.partType = partType
self.useIDs = useIDs
self.dataopen = self.open_snap(self.snappath, nsnap)
Header = self.dataopen['Header']
self.time = Header.attrs['Time']
self.boxsize = Header.attrs['BoxSize']*self.lengte_to_Mpc
self.npart = Header.attrs['NumPart_ThisFile'][:]
self.redshift = Header.attrs['Redshift']
self.tree = []
self.mass = Header.attrs['MassTable'][:]*self.massa_to_10msun
self.temphalo = {}
self.temphalo['exists'] = False
if not self.bigFile:
if partType < 6:
if not read_only_coords:
self.velocities = self.dataopen['PartType{}/Velocities'.format(int(partType))][:,:]*self.snelheid_to_kms*np.sqrt(1./(1.+self.redshift))
#self.densities = dataopen['PartType{}/Density'.format(int(partType))][:]
self.IDs = self.dataopen['PartType{}/ParticleIDs'.format(int(partType))][:]
self.masses = np.ones(len(self.IDs))*self.mass[self.partType]
if partType == 5:
self.masses = self.dataopen['PartType5/Masses'][:]*self.massa_to_10msun
if partType == 0:
self.internalEnergy = self.dataopen['PartType0/InternalEnergy'][:]*self.massa_to_10msun*self.snelheid_to_kms**2
self.density = self.dataopen['PartType0/Density'][:]*self.dichtheid_to_Mpc3
self.coordinates = self.dataopen['PartType{}/Coordinates'.format(int(self.partType))][:,:]*self.lengte_to_Mpc
elif partType == 6:
if not read_only_coords:
self.mass = Header.attrs['MassTable'][:]*self.massa_to_10msun
self.velocities = self.dataopen['PartType1/Velocities'][:,:]
self.IDs = self.dataopen['PartType1/ParticleIDs'][:]
self.masses = self.mass[1]*np.ones(len(self.IDs))
self.velocities = np.append(self.velocities, self.dataopen['PartType5/Velocities'][:,:], axis=0)*self.snelheid_to_kms*np.sqrt(1./(1.+self.redshift))
self.IDs = np.append(self.IDs, self.dataopen['PartType5/ParticleIDs'][:])
self.masses = np.append(self.masses, self.dataopen['PartType5/Masses'][:]*self.massa_to_10msun)
self.coordinates = self.dataopen['PartType1/Coordinates'][:,:]
self.coordinates = np.append(self.coordinates, self.dataopen['PartType5/Coordinates'][:,:], axis=0)*self.lengte_to_Mpc
elif partType == 7:
if not read_only_coords:
self.mass = Header.attrs['MassTable'][:]*self.massa_to_10msun
self.velocities = self.dataopen['PartType1/Velocities'][:,:]
self.IDs = self.dataopen['PartType1/ParticleIDs'][:]
self.masses = self.mass[1]*np.ones(len(self.IDs))
self.internalEnergy = np.zeros_like(self.masses)
self.velocities = np.append(self.velocities, self.dataopen['PartType0/Velocities'][:,:], axis=0)*self.snelheid_to_kms*np.sqrt(1./(1.+self.redshift))
self.IDs = np.append(self.IDs, self.dataopen['PartType0/ParticleIDs'][:])
self.masses = np.append(self.masses, self.mass[0]*np.ones(len(self.dataopen['PartType0/ParticleIDs'][:])))
self.internalEnergy = np.append(self.internalEnergy, self.dataopen['PartType0/InternalEnergy'][:])
self.temperature = self.internalEnergy*80.8
self.coordinates = self.dataopen['PartType1/Coordinates'][:,:]
self.coordinates = np.append(self.coordinates, self.dataopen['PartType0/Coordinates'][:,:], axis=0)*self.lengte_to_Mpc
self.dataopen.close()
def open_property(self, prop):
if self.partType < 6:
return self.dataopen['PartType{}/'.format(int(self.partType))+prop][:]
elif self.partType == 6:
een = self.dataopen['PartType1/'+prop][:]
return np.append(een, self.dataopen['PartType5/'+prop][:], axis=0)
elif self.partType == 7:
een = self.dataopen['PartType1/'+prop][:]
return np.append(een, self.dataopen['PartType0/'+prop][:], axis=0)
def get_IDs(self):
if self.bigFile:
return self.open_property('ParticleIDs')
else:
return self.IDs
def get_coordinates(self):
if self.bigFile:
return self.open_property('Coordinates')*self.lengte_to_Mpc
else:
return self.coordinates
def get_velocities(self):
if self.bigFile:
return self.open_property('Velocities')*self.snelheid_to_kms
else:
return self.velocities
def get_internalEnergy(self):
if self.partType == 0 or self.partType == 7:
return self.internalEnergy
# elif self.partType == 7:
# iE = np.zeros(np.sum(self.npart))
# iE[self.npart[1]:] = self.dataopen['PartType0/InternalEnergy'][:]
# return iE
else:
return []
def get_temperature(self):
if self.partType == 0 or self.partType == 7:
return self.temperature
else:
return []
def get_density(self):
if self.partType == 0:
return self.density
elif self.partType == 7:
den = np.zeros(np.sum(self.npart))
den[self.npart[1]:] = self.dataopen['PartType0/Density'][:]
return den
else:
return []
def get_masses(self):
if self.bigFile:
if self.partType < 6:
if self.mass[self.partType] == 0:
return self.open_property('Masses')*self.massa_to_10msun
else:
return np.ones(self.get_number_of_particles())*self.mass[self.partType]
else:
if self.partType == 6:
if self.mass[1] == 0:
een = self.dataopen['PartType1/Masses'][:]*self.massa_to_10msun
else:
een = np.ones(self.get_number_of_particles())*self.mass[1]
if self.mass[5] == 0:
return np.append(een, self.dataopen['PartType5/Masses'][:]*self.massa_to_10msun)
else:
return np.append(een, np.ones(self.get_number_of_particles)*self.mass[5])
if self.partType == 7:
if self.mass[1] == 0:
een = self.dataopen['PartType1/Masses'][:]*self.massa_to_10msun
else:
een = np.ones(self.get_number_of_particles())*self.mass[1]
if self.mass[0] == 0:
return np.append(een, self.dataopen['PartType0/Masses'][:]*self.massa_to_10msun)
else:
return np.append(een, np.ones(self.get_number_of_particles())*self.mass[0])
else:
return self.masses
def get_masscenter_temphalo(self, particles):
coords = self.get_coordinates()[particles]
mass = self.get_masses()[particles]
comtemp = 1./np.sum(mass)*np.sum((mass*coords.T).T, axis=0)
tree = cKDTree(coords, boxsize=self.boxsize)
particles2 = copy.deepcopy(particles)
comnew = copy.deepcopy(comtemp)
comtemp *= 2
while (np.sum(comtemp - comnew)/3. > self.softeningLength/20.):
print(comnew)
dist, ind = tree.query([comnew], k=int(np.min([int(len(particles2)/2), 5000])))
print(np.sum(dist[0])/len(dist[0]))
particles2 = particles[ind[0]]
coords2 = coords[ind[0]]
print(np.sum(np.sqrt((coords2[:, 0]-comnew[0])**2 + (coords2[:, 1]-comnew[1])**2 + (coords2[:, 2]-comnew[2])**2))/len(dist[0]))
mass2 = mass[ind[0]]
comtemp = copy.deepcopy(comnew)
comnew = 1./np.sum(mass2)*np.sum((mass2*coords2.T).T, axis=0)
print(comnew)
return comnew
def get_temphalo(self, coord, radius, fixedRadius=np.logspace(-3, 0, 60), r200fac=1, partlim=200, satellite=False):
if not isinstance(self.tree, cKDTree):
sys.exit("Error: no KDTree present")
massa = self.get_masses()
c = constant(redshift=self.redshift)
c.change_constants(self.redshift)
comoving_rhocrit200 = deltaVir*c.rhocrit_Ms_Mpci3*h/(h*(1+self.redshift))**3
self.temphalo['BinMiddleRadius'] = fixedRadius
self.temphalo['MaxRadIndex'] = np.abs(fixedRadius - r200fac*radius).argmin()
self.temphalo['Radius'] = np.logspace(np.log10(fixedRadius[0]) -
0.5*(np.log10(fixedRadius[-1])-np.log10(fixedRadius[0]))/len(fixedRadius),
np.log10(fixedRadius[-1]) - 0.5*(np.log10(fixedRadius[-1])-np.log10(fixedRadius[0]))/len(fixedRadius), len(fixedRadius))
self.temphalo['indices'] = np.array(self.tree.query_ball_point(coord, r=np.min([r200fac*radius, self.temphalo['Radius'][-1]])))
if len(self.temphalo['indices']) < partlim:
self.temphalo['Npart'] = 0
return 0
self.temphalo['distance'] = self.get_radius(point=coord, coords=self.get_coordinates()[self.temphalo['indices']])
sortorder = np.argsort(self.temphalo['distance']).astype(int)
self.temphalo['indices'] = self.temphalo['indices'][sortorder]
self.temphalo['distance'] = self.temphalo['distance'][sortorder]
if self.temphalo['distance'][0] == 0.0:
self.temphalo['distance'][0] = 0.001*self.temphalo['distance'][1]
self.temphalo['Coord'] = coord
#Compute initial density profile
self.temphalo['densityprofile'] = np.cumsum(massa[self.temphalo['indices']])/(4./3.*np.pi*self.temphalo['distance']**3)*1.e10
#Compute virial radius and mass
if not satellite:
virialradiusindex = np.where(self.temphalo['densityprofile'] <= comoving_rhocrit200)[0]
if len(virialradiusindex) == 0:
print("Something is wrong with this halo", self.temphalo['densityprofile'][-1]/comoving_rhocrit200,
self.temphalo['distance'][0], len(self.temphalo['indices']))
self.temphalo['indices'] = []
self.temphalo['Npart'] = 0
return 0
virialradiusindex = virialradiusindex[0]
if virialradiusindex < partlim:
self.temphalo['indices'] = []
self.temphalo['Npart'] = 0
return 0
self.temphalo['virialradiusindex'] = virialradiusindex
self.temphalo['R200'] = self.temphalo['distance'][virialradiusindex]
indicestemp = self.temphalo['indices'][:virialradiusindex]
self.temphalo['Npart'] = len(indicestemp)
#[np.where(self.temphalo['distance'] < self.temphalo['R200'])[0]]
self.temphalo['M200'] = np.sum(massa[indicestemp])
else:
self.temphalo['R200'] = -1
self.temphalo['M200'] = -1
self.temphalo['exists'] = True
if self.partType < 6:
self.temphalo['indicesdictCu'] = {}
self.temphalo['indicesdict'] = {}
for i in range(len(self.temphalo['Radius'])):
self.temphalo['indicesdictCu'][i] = np.zeros(0).astype(int)
self.temphalo['indicesdict'][i] = np.zeros(0).astype(int)
for i in range(0, self.temphalo['MaxRadIndex']+1):
temp2 = np.where(self.temphalo['distance'] <= self.temphalo['BinMiddleRadius'][i])[0]
self.temphalo['indicesdictCu'][i] = temp2
if i == 0:
temp = np.where(self.temphalo['distance'] <= self.temphalo['Radius'][0])[0]
else:
temp = np.where((self.temphalo['distance'] > self.temphalo['Radius'][i-1]) & (self.temphalo['distance'] <= self.temphalo['Radius'][i]))[0]
self.temphalo['indicesdict'][i] = temp
elif self.partType == 7:
massanu = self.get_masses()[self.temphalo['indices']]
self.temphalo['DMparticles'] = np.zeros(len(self.temphalo['indices']))
self.temphalo['Hparticles'] = np.zeros(len(self.temphalo['indices']))
self.temphalo['DMindices'] = np.where(massanu == self.mass[1])[0]
self.temphalo['Hindices'] = np.where(massanu == self.mass[0])[0]
self.temphalo['DMparticles'][self.temphalo['DMindices']] = 1
self.temphalo['Hparticles'][self.temphalo['Hindices']] = 1
if not satellite:
self.temphalo['virialradiusindex'] = virialradiusindex
self.temphalo['DMindicesM200'] = np.where(self.get_masses()[self.temphalo['indices'][:virialradiusindex]] == self.mass[1])[0]
self.temphalo['HindicesM200'] = np.where(self.get_masses()[self.temphalo['indices'][:virialradiusindex]] == self.mass[0])[0]
self.temphalo['DMFraction'] = self.mass[1]*len(self.temphalo['DMindicesM200'])/(self.mass[1]*len(self.temphalo['DMindicesM200']) +
self.mass[0]*len(self.temphalo['HindicesM200']))
else:
self.temphalo['DMFraction'] = -1
#Saving particles per shell that can be used by other functions:
# - get_angular_momentum_radius()
# - get_temphalo_profiles()
self.temphalo['Hindicesdict'] = {}
self.temphalo['DMindicesdict'] = {}
self.temphalo['HindicesdictCu'] = {}
self.temphalo['DMindicesdictCu'] = {}
self.temphalo['indicesdictCu'] = {}
self.temphalo['indicesdict'] = {}
for i in range(len(self.temphalo['Radius'])):
self.temphalo['Hindicesdict'][i] = np.zeros(0).astype(int)
self.temphalo['DMindicesdict'][i] = np.zeros(0).astype(int)
self.temphalo['HindicesdictCu'][i] = np.zeros(0).astype(int)
self.temphalo['DMindicesdictCu'][i] = np.zeros(0).astype(int)
self.temphalo['indicesdictCu'][i] = np.zeros(0).astype(int)
self.temphalo['indicesdict'][i] = np.zeros(0).astype(int)
for i in range(0, self.temphalo['MaxRadIndex']+1):
temp2 = np.where(self.temphalo['distance'] <= self.temphalo['BinMiddleRadius'][i])[0]
self.temphalo['indicesdictCu'][i] = temp2
self.temphalo['HindicesdictCu'][i] = np.where(self.temphalo['Hparticles'][temp2] != 0)[0]
self.temphalo['DMindicesdictCu'][i] = np.where(self.temphalo['DMparticles'][temp2] != 0)[0]
if i == 0:
temp = np.where(self.temphalo['distance'] <= self.temphalo['Radius'][0])[0]
else:
temp = np.where((self.temphalo['distance'] > self.temphalo['Radius'][i-1]) & (self.temphalo['distance'] <= self.temphalo['Radius'][i]))[0]
self.temphalo['Hindicesdict'][i] = np.where(self.temphalo['Hparticles'][temp] != 0)[0]
self.temphalo['DMindicesdict'][i] = np.where(self.temphalo['DMparticles'][temp] != 0)[0]
self.temphalo['indicesdict'][i] = temp
def get_number_of_particles(self):
return len(self.get_IDs())
def get_time(self):
return self.time
def get_boxsize(self):
return self.boxsize
def get_radius(self, point=np.array([0, 0, 0]), coords = np.zeros((0, 3))):
if len(coords) == 0 :
coords = self.get_coordinates()
coords = (coords - point)
coords = np.where(np.abs(coords) > 0.5*self.boxsize, coords - np.sign(coords)*self.boxsize, coords)
return np.sqrt((coords[:, 0])**2 + (coords[:, 1])**2 + (coords[:, 2])**2)
def get_average_velocity(self):
vel = self.get_velocities()
return np.sqrt(vel[:, 0]**2 + vel[:, 1]**2 + vel[:, 2]**2)
def get_radialvelocity(self, coord, IDs=[], indices=[]):
if len(indices) > 0:
start_time = time.time()
coords = (self.get_coordinates()[indices]-coord)*Mpc_to_km
velocity = self.get_velocities()[indices]
r = np.sqrt((coords[:, 0])**2 + (coords[:, 1])**2 + (coords[:, 2])**2)
elif len(IDs) > 0:
self.useIDs = True
indices = self.get_indices(IDs = IDs)
coords = (self.get_coordinates()[indices]-coord)*Mpc_to_km
velocity = self.get_velocities()[indices]
r = self.get_radius(point = coord)[indices]*Mpc_to_km
else:
coords = (self.get_coordinates() - coord)*Mpc_to_km
velocity = self.get_velocities()
r = self.get_radius(point = coord)*Mpc_to_km
vel = (velocity[:, 0]*(coords[:, 0]) + velocity[:, 1]*(coords[:, 1]) +
velocity[:, 2]*(coords[:, 2]))
vel_rad = np.zeros(len(velocity[:,0]))
vel_rad[np.where(r > 0)] = vel[np.where(r > 0)]/r[np.where(r > 0)]
return vel_rad
def get_angular_momentum_from_coords(self, coords, radius):
coord = self.get_coordinates() - coords
rad = self.get_radius(point=coords)
indices = np.where(rad <= radius)[0]
xr, xteta, xphi, vr, vteta, vphi = self.get_spherical_coord_velocity(coords, indices)
Jx = np.sum((xteta*vphi - xphi*vteta)*self.get_masses()[indices])
Jy = np.sum((xphi*vr - xr*vphi)*self.get_masses()[indices])
Jz = np.sum((xr*vteta - xteta*vr)*self.get_masses()[indices])
return np.sqrt(Jx*Jx+Jy*Jy+Jz*Jz)*self.mass[self.partType]
def get_spherical_coords(self, coords, indices):
coord = self.get_coordinates()[indices, :] - coords
xr = self.get_radius(point=coords)[indices]
xteta = np.arctan(coord[:, 1]/coord[:, 0])
xphi = np.arccos(coord[:, 2]/xr)
xr[np.where(np.isnan(xr))] = 0
xteta[np.where(np.isnan(xteta))] = 0
xphi[np.where(np.isnan(xphi))] = 0
return xr, xteta, xphi
def get_spherical_coord_velocity(self, coords, indices):
xr, xteta, xphi = self.get_spherical_coords(coords, indices)
coord = self.get_coordinates()[indices, :] - coords
vel = self.get_velocities()[indices, :]
vr = self.get_radius(point=coords)[indices]
coord *= Mpc_to_km
vteta = (vel[:, 0]*coord[:, 1] - vel[:, 1]*coord[:, 0])/(coord[:, 0]**2+ coord[:, 1]**2)
vphi = (coord[:, 2]*(coord[:, 0]*vel[:, 0] + coord[:, 1]*vel[:, 1]) -
(coord[:, 0]**2 + coord[:, 1]**2)*vel[:, 2])/(xr*Mpc_to_km*np.sqrt(coord[:, 0]**2 + coord[:, 1]**2))
vr[np.where(np.isnan(vr))] = 0
vteta[np.where(np.isnan(vteta))] = 0
vphi[np.where(np.isnan(vphi))] = 0
return xr, xteta, xphi, vr, vteta, vphi
def get_number_density_profile(self, coords, IDs, radius):
if not self.temphalo['exists']:
indices = self.get_indices(coords, IDs = [], radius=radius[-1])
radcoord = self.get_radius(point=coords)[indices]
else:
indices = self.temphalo['indices']
radcoord = self.temphalo['distance']
radius = self.temphalo['Radius']
n = np.zeros(len(radius)-1)
H_per_particle = self.get_masses()[indices]*1e10*Msun_g/hydrogen_g
for i in range(len(radius)-1):
V = 4./3*np.pi*((radius[i+1]*Mpc_to_cm)**3 - (radius[i]*Mpc_to_cm)**3)
temp = np.where((radcoord > radius[i]) & (radcoord <= radius[i+1]))[0]
n[i] = np.sum(H_per_particle[temp])/V
return n
def get_virial_ratio(self, amount):
#Werkt niet als er verschillende massa's in een parType zitten!!!
#Eerst moet get_temphalo_profiles gerund worden.
if self.temphalo['virialradiusindex'] < amount:
indices = self.temphalo['indices'][:self.temphalo['virialradiusindex']]
else:
indices = np.random.choice(self.temphalo['indices'], size=amount, replace=False)#[np.random.randint(self.temphalo['virialradiusindex'], size=amount)]
if self.partType == 1:
DMmass = self.mass[1]
#Hmass = self.mass[0]
#fM = DMmass/Hmass
DMindices = indices
#Hindices = np.where(self.get_masses()[indices]==Hmass)[0]
DMparts = len(DMindices)
#Hparts = len(Hindices)
if amount < self.temphalo['virialradiusindex']:
DMparttot = self.temphalo['virialradiusindex']
#Hparttot = np.sum(self.temphalo['Hparticles'][:self.temphalo['virialradiusindex']])
DMmass = DMparttot/DMparts * DMmass
#Hmass = Hparttot/Hparts * Hmass
indices = DMindices
coords = self.get_coordinates()[DMindices]
#mi = np.ones(len(indices))
#mi[DMindices] = DMmass
#if Hparts > 0:
# mi[Hindices] = Hmass
U = 0
lengte = len(indices) - 1
#print("--- %s seconds ---" % (time.time() - start_time), 'making arrays', len(indices))
#start_time = time.time()
for i in range(len(indices)):
coordstemp = np.delete(coords, i, axis=0)
#mij = np.delete(mi, i)*mi[i]
mij = DMmass*DMmass
rij = self.get_radius(point = coords[i], coords = coordstemp)
rij[rij==0] = self.softeningLength
U += np.sum(mij/rij)
U *= G_Mpc_km2_Msi_si2 * 0.5 * 1.e10
indices = self.temphalo['indices'][:self.temphalo['virialradiusindex']]
velocities = self.get_velocities()[indices]
velocities -= self.temphalo['Vel']
K = np.sum(0.5*self.mass[1]*np.sum(velocities*velocities, axis=1))# + np.sum(self.get_internalEnergy()[indices])*self.mass[0]
self.temphalo['Virial_ratio'] = 2*K/U
return self.temphalo['Virial_ratio']
elif self.partType == 7:
DMmass = self.mass[1]
#Hmass = self.mass[0]
#fM = DMmass/Hmass
DMindices = indices[np.where(self.get_masses()[indices]==DMmass)[0]]
#Hindices = np.where(self.get_masses()[indices]==Hmass)[0]
DMparts = len(DMindices)
#Hparts = len(Hindices)
if amount < self.temphalo['virialradiusindex']:
DMparttot = np.sum(self.temphalo['DMparticles'][:self.temphalo['virialradiusindex']])
#Hparttot = np.sum(self.temphalo['Hparticles'][:self.temphalo['virialradiusindex']])
DMmass = DMparttot/DMparts * DMmass
#Hmass = Hparttot/Hparts * Hmass
indices = DMindices
coords = self.get_coordinates()[DMindices]
#mi = np.ones(len(indices))
#mi[DMindices] = DMmass
#if Hparts > 0:
# mi[Hindices] = Hmass
U = 0
lengte = len(indices) - 1
#print("--- %s seconds ---" % (time.time() - start_time), 'making arrays', len(indices))
#start_time = time.time()
for i in range(len(indices)):
coordstemp = np.delete(coords, i, axis=0)
#mij = np.delete(mi, i)*mi[i]
mij = DMmass*DMmass
rij = self.get_radius(point = coords[i], coords = coordstemp)
rij[rij==0] = self.softeningLength
U += np.sum(mij/rij)
U *= G_Mpc_km2_Msi_si2 * 0.5 * 1.e10
indices = self.temphalo['indices'][:self.temphalo['virialradiusindex']]
booldm = self.temphalo['DMparticles'][:self.temphalo['virialradiusindex']].astype(bool)
indices = indices[booldm]
velocities = self.get_velocities()[indices]
velocities -= self.temphalo['Vel']
K = np.sum(0.5*self.mass[1]*np.sum(velocities*velocities, axis=1))# + np.sum(self.get_internalEnergy()[indices])*self.mass[0]
self.temphalo['Virial_ratio'] = 2*K/U
return self.temphalo['Virial_ratio']
def get_temphalo_profiles(self):
if self.partType == 7:
self.get_temphalo_profiles_pt7()
return
indices = self.temphalo['indices']
radcoord = self.temphalo['distance']
radius = self.temphalo['Radius']
self.temphalo['profile_density'] = np.zeros(len(radius))
self.temphalo['profile_volume'] = np.zeros(len(radius))
self.temphalo['profile_npart'] = np.zeros(len(radius))
self.temphalo['profile_vrad'] = np.zeros(len(radius))
#self.temphalo['profile_v'] = np.zeros(len(radius))
self.temphalo['profile_mass'] = np.zeros(len(radius))
coords = (self.get_coordinates()[indices]-self.temphalo['Coord'])
coords = np.where(np.abs(coords) > 0.5*self.boxsize, coords - np.sign(coords)*self.boxsize, coords)*Mpc_to_km
velocity = self.get_velocities()[indices]
if self.temphalo['virialradiusindex'] > 1000:
self.temphalo['Vel'] = np.average(velocity[:np.max([int(self.temphalo['virialradiusindex']*0.10), 1000])], axis=0)
else:
self.temphalo['Vel'] = np.average(velocity[:self.temphalo['virialradiusindex']], axis=0)
velocity -= self.temphalo['Vel']
vel = (velocity[:, 0]*(coords[:, 0]) + velocity[:, 1]*(coords[:, 1]) +
velocity[:, 2]*(coords[:, 2]))
r = radcoord*Mpc_to_km
vr = np.zeros(len(velocity[:,0]))
vr[np.where(r > 0)] = vel[np.where(r > 0)]/r[np.where(r > 0)]
M_per_particle = self.get_masses()[indices]*1e10
if self.partType==0:
self.temphalo['profile_temperature'] = np.zeros(len(radius)-1)
ie = self.get_temperature()[indices]
for i in range(self.temphalo['MaxRadIndex'] + 1):
temp = self.temphalo['indicesdict'][i]
tempCu = self.temphalo['indicesdictCu'][i]
if i == 0:
self.temphalo['profile_volume'][i] = 4./3*np.pi*(radius[i])**3
else:
self.temphalo['profile_volume'][i] = 4./3*np.pi*((radius[i])**3 - (radius[i-1])**3)
if len(temp) > 0:
self.temphalo['profile_npart'][i] = len(temp)
self.temphalo['profile_vrad'][i] = np.sum(vr[temp])/len(temp)
masstemp = np.sum(M_per_particle[temp])
if self.partType==0 or self.partType==1:
self.temphalo['profile_density'][i] =masstemp/self.temphalo['profile_volume'][i]
if self.partType==0:
self.temphalo['profile_temperature'][i] = np.sum(ie[temp])/len(temp)
if len(tempCu) > 0:
#self.temphalo['profile_v'][i] = np.sum(velocity[tempCu])/len(tempCu)
self.temphalo['profile_mass'][i] = np.sum(M_per_particle[tempCu])
def get_temphalo_profiles_pt7(self):
indices = self.temphalo['indices']
radcoord = self.temphalo['distance']
radius = self.temphalo['Radius']
self.temphalo['profile_Hdensity'] = np.zeros(len(radius))
self.temphalo['profile_DMdensity'] = np.zeros(len(radius))
self.temphalo['profile_density'] = np.zeros(len(radius))
self.temphalo['profile_volume'] = np.zeros(len(radius))
self.temphalo['profile_npart'] = np.zeros(len(radius))
self.temphalo['profile_Hnpart'] = np.zeros(len(radius))
self.temphalo['profile_DMnpart'] = np.zeros(len(radius))
self.temphalo['profile_vrad'] = np.zeros(len(radius))
self.temphalo['profile_Hvrad'] = np.zeros(len(radius))
self.temphalo['profile_DMvrad'] = np.zeros(len(radius))
#self.temphalo['profile_v'] = np.zeros(len(radius))
#self.temphalo['profile_Hv'] = np.zeros(len(radius))
#self.temphalo['profile_DMv'] = np.zeros(len(radius))
self.temphalo['profile_mass'] = np.zeros(len(radius))
self.temphalo['profile_Hmass'] = np.zeros(len(radius))
self.temphalo['profile_DMmass'] = np.zeros(len(radius))
self.temphalo['profile_temperature'] = np.zeros(len(radius))
ie = self.get_temperature()[indices]
coords = (self.get_coordinates()[indices] - self.temphalo['Coord'])
coords = np.where(np.abs(coords) > 0.5*self.boxsize, coords - np.sign(coords)*self.boxsize, coords)*Mpc_to_km
velocity = self.get_velocities()[indices]
if self.temphalo['virialradiusindex'] > 1000:
self.temphalo['Vel'] = np.average(velocity[:int(self.temphalo['virialradiusindex']*0.10)], axis=0)
else:
self.temphalo['Vel'] = np.average(velocity[:self.temphalo['virialradiusindex']], axis=0)
# self.temphalo['VelDM'] = np.average(velocity[:100][self.temphalo['DMparticles'][:100].astype(bool)], axis=0)
# self.temphalo['VelH'] = np.average(velocity[:100][self.temphalo['Hparticles'][:100].astype(bool)], axis=0)
velocity -= self.temphalo['Vel']
M_per_particle = self.get_masses()[indices]*1e10
vel = (velocity[:, 0]*(coords[:, 0]) + velocity[:, 1]*(coords[:, 1]) +
velocity[:, 2]*(coords[:, 2]))
r = radcoord*Mpc_to_km
vr = np.zeros(len(velocity[:,0]))
vr[np.where(r > 0)] = vel[r > 0]/r[r > 0]
for i in range(self.temphalo['MaxRadIndex']+1):
temp = self.temphalo['indicesdict'][i]
tempCu = self.temphalo['indicesdictCu'][i]
if i == 0:
self.temphalo['profile_volume'][i] = 4./3*np.pi*(radius[i])**3
else:
self.temphalo['profile_volume'][i] = 4./3*np.pi*((radius[i])**3 - (radius[i-1])**3)
# Htemp = temp[np.where(self.temphalo['Hparticles'][temp] != 0)[0]]
# DMtemp =temp[np.where(self.temphalo['DMparticles'][temp] != 0)[0]]
# Htemp = temp[np.where(np.in1d(temp.ravel(), self.temphalo['Hindices']))[0]]
# DMtemp = temp[np.where(np.in1d(temp.ravel(), self.temphalo['DMindices']))[0]]
Htemp = temp[self.temphalo['Hindicesdict'][i]]
DMtemp = temp[self.temphalo['DMindicesdict'][i]]
HtempCu = tempCu[self.temphalo['HindicesdictCu'][i]]
DMtempCu = tempCu[self.temphalo['DMindicesdictCu'][i]]
if len(temp)>0:
self.temphalo['profile_npart'][i] = len(temp)
self.temphalo['profile_Hnpart'][i] = len(Htemp)
self.temphalo['profile_DMnpart'][i] = len(DMtemp)
self.temphalo['profile_vrad'][i] = np.sum(vr[temp])/len(temp)
masstemp = np.sum(M_per_particle[temp])
self.temphalo['profile_density'][i] = masstemp/self.temphalo['profile_volume'][i]
if len(tempCu) > 0:
#self.temphalo['profile_v'][i] = np.sum(velocity[tempCu])/len(tempCu)
self.temphalo['profile_mass'][i] = np.sum(M_per_particle[tempCu])
if len(DMtemp) > 0:
self.temphalo['profile_DMvrad'][i] = np.sum(vr[DMtemp])/len(DMtemp)
DMmasstemp = np.sum(M_per_particle[DMtemp])
self.temphalo['profile_DMdensity'][i] = DMmasstemp/self.temphalo['profile_volume'][i]
if len(DMtempCu) > 0:
#self.temphalo['profile_DMv'][i] = np.sum(velocity[DMtempCu])/len(DMtempCu)
self.temphalo['profile_DMmass'][i] = np.sum(M_per_particle[DMtempCu])
if len(Htemp) > 0:
self.temphalo['profile_Hvrad'][i] = np.sum(vr[Htemp])/len(Htemp)
Hmastemp = np.sum(M_per_particle[Htemp])
self.temphalo['profile_temperature'][i] = np.sum(ie[Htemp])/len(Htemp)
self.temphalo['profile_Hdensity'][i] = Hmastemp/self.temphalo['profile_volume'][i]
if len(HtempCu) > 0:
#self.temphalo['profile_Hv'][i] = np.sum(velocity[HtempCu])/len(HtempCu)
self.temphalo['profile_Hmass'][i] = np.sum(M_per_particle[HtempCu])
def get_density_profile(self, coords=[], radius=[]): #Msun /Mpc^3
self.useIDs = False
if not self.temphalo['exists']:
indices = self.get_indices(coords, IDs = [], radius=radius[-1])
radcoord = self.get_radius(point=coords)[indices]
else:
indices = self.temphalo['indices']
radcoord = self.temphalo['distance']
radius = self.temphalo['Radius']
n = np.zeros(len(radius)-1)
M_per_particle = self.get_masses()[indices]*1e10
for i in range(len(radius)-1):
V = 4./3*np.pi*((radius[i+1])**3 - (radius[i])**3)
temp = np.where((radcoord > radius[i]) & (radcoord <= radius[i+1]))[0]
n[i] = np.sum(M_per_particle[temp])/V
return n
def get_radialvelocity_profile(self, coords=[], radius=[]): #Msun /Mpc^3
self.useIDs = False
indices = self.get_indices(coords, IDs = [], radius=radius[-1])
radcoord = self.get_radius(point=coords)[indices]
velocity = self.get_velocities()[indices]
coord = (self.get_coordinates()[indices] - coords)
coord = np.where(np.abs(coord) > 0.5*self.boxsize, coord - np.sign(coord)*self.boxsize, coord)
vel = (velocity[:, 0]*(coord[:, 0]) + velocity[:, 1]*(coord[:, 1]) +
velocity[:, 2]*(coord[:, 2]))
vr = np.zeros(len(radcoord))
vr[np.where(radcoord > 0)] = vel[np.where(radcoord > 0)]/radcoord[np.where(radcoord > 0)]/Mpc_to_km
vel_rad= np.zeros(len(radius)-1)
for i in range(len(radius)-1):
temp = np.where((radcoord > radius[i]) & (radcoord <= radius[i+1]))[0]
vel_rad[i] = np.sum(vr[temp])/len(temp)
return vel_rad
def makeCoordTree(self):
print('Constructing cKDTree...')
self.tree = cKDTree(self.get_coordinates(), boxsize=self.boxsize)
print('Finished constructing cKDTree.')
def get_virial_radius(self, coords):
if self.temphalo['exists']:
return self.temphalo['R200']
c = constant(redshift = self.redshift)
coords = coords%self.boxsize
if not isinstance(self.tree, cKDTree):
self.makeCoordTree()
massa = self.get_masses()
def give_density(rad):
return np.sum(massa[self.tree.query_ball_point(coords, r=rad)])/(4./3.*np.pi*rad**3)*1e10
def give_density2(rad):
return self.get_mass_within_radius(coords, rad)/(4./3.*np.pi*rad**3)*1e10
boundsnotfound = True
radius = 0.1
density = give_density(radius)
if density == 0:
return 0
while boundsnotfound:
times = density/(deltaVir*c.rhocrit_Ms_Mpci3)
if np.abs(times - 1) < self.softeningLength:
return radius
if times > 1:
radius *= ((times)**(1./2.))
else:
boundsnotfound = False
density = give_density(radius)
boundsnotfound = True
radleft = 0.8*radius
denleft = give_density(radleft)
while boundsnotfound:
times = denleft/(deltaVir*c.rhocrit_Ms_Mpci3)
if np.abs(times - 1) < self.softeningLength:
return radleft
if denleft == 0:
radleft *= 1.1
elif times < 1:
radleft *= ((times)**(1./2.))
else:
boundsnotfound = False
denleft = give_density(radleft)
boundsnotfound = True
radright = radius
denright = density
radleft = self.softeningLength*10.
denleft = give_density(radleft)
softeningDen = np.abs(density - give_density(radius + self.softeningLength))
while boundsnotfound:
#print(radright, radleft, denright, denleft, softeningDen)
if np.abs(denright - deltaVir*c.rhocrit_Ms_Mpci3) < softeningDen:
return radright
if np.abs(denleft - deltaVir*c.rhocrit_Ms_Mpci3) < softeningDen:
return radleft
radius = radleft + (radright-radleft)/2.
density = give_density(radius)
softeningDen = np.abs(density - give_density(radius + self.softeningLength))
if np.abs(density - deltaVir*c.rhocrit_Ms_Mpci3) < softeningDen:
return radius
if density < deltaVir*c.rhocrit_Ms_Mpci3:
radright = radius
denright = density
else:
radleft = radius
denleft = density
def get_angular_momentum_spherical(self, coords, IDs, radius = False, overmass = False):
# ix = np.in1d(self.IDs.ravel(), IDs)
# indices = np.where(ix)[0]
# indices = indices.astype(int)
indices = self.get_indices(coords, IDs, radius=radius)
xr, xteta, xphi, vr, vteta, vphi = self.get_spherical_coord_velocity(coords, indices)
if overmass:
Jr = np.sum(xteta*vphi - xphi*vteta)
Jteta = np.sum(xphi*vr - xr*vphi)
Jphi = np.sum(xr*vteta - xteta*vr)
return np.sqrt(Jx*Jx+Jy*Jy+Jz*Jz)/len(indices)
else:
massa = self.get_masses()
Jr = np.sum((xteta*vphi - xphi*vteta)*massa[indices])
Jteta = np.sum((xphi*vr - xr*vphi)*massa[indices])
Jphi = np.sum((xr*vteta - xteta*vr)*massa[indices])
return np.pi*np.sqrt(Jr*Jr+Jteta*Jteta+Jphi*Jphi)
def get_angular_momentum(self, coords, IDs, radius = False, overmass = False):
# ix = np.in1d(self.IDs.ravel(), IDs)
# indices = np.where(ix)[0]
# indices = indices.astype(int)
#indices = mss.match(IDs, self.IDs)
if not self.temphalo['exists']:
indices = self.get_indices(coords, IDs, radius=radius)
vel0 = np.array([0, 0, 0])
else:
indices = self.temphalo['indices'] #Only within R200
vel0 = self.temphalo['Vel']
coord = self.get_coordinates()[indices, :] - coords
coord = np.where(np.abs(coord) > 0.5*self.boxsize, coord - np.sign(coord)*self.boxsize, coord)
vel = self.get_velocities()[indices, :] - vel0
if overmass:
Jx = np.sum(coord[:, 1]*vel[:, 2] - coord[:, 2]*vel[:, 1])
Jy = np.sum(coord[:, 2]*vel[:, 0] - coord[:, 0]*vel[:, 2])
Jz = np.sum(coord[:, 0]*vel[:, 1] - coord[:, 1]*vel[:, 0])
return np.sqrt(Jx*Jx+Jy*Jy+Jz*Jz)/len(indices)
else:
massa = self.get_masses()
Jx = np.sum((coord[:, 1]*vel[:, 2] - coord[:, 2]*vel[:, 1])*massa[indices])
Jy = np.sum((coord[:, 2]*vel[:, 0] - coord[:, 0]*vel[:, 2])*massa[indices])
Jz = np.sum((coord[:, 0]*vel[:, 1] - coord[:, 1]*vel[:, 0])*massa[indices])
return np.sqrt(Jx*Jx+Jy*Jy+Jz*Jz)
def get_angular_momentum_radius(self, coords, IDs, radius, overmass=False):
if not self.temphalo['exists']:
indices = self.get_indices(coords, IDs = IDs, radius=radius[-1])
rad = self.get_radius(point=coords)[indices]
else:
indices = self.temphalo['indices']
rad = self.temphalo['distance']
radius = self.temphalo['BinMiddleRadius']
coord1 = self.get_coordinates()[indices] - coords
coord1 = np.where(np.abs(coord1) > 0.5*self.boxsize, coord1 - np.sign(coord1)*self.boxsize, coord1)
vel1 = self.get_velocities()[indices] - self.temphalo['Vel']
self.temphalo['AngularMomentum'] = np.zeros_like(radius)
if self.partType == 7:
self.temphalo['AngularMomentumH'] = np.zeros_like(radius)
self.temphalo['AngularMomentumDM'] = np.zeros_like(radius)
JperRH = np.zeros(len(radius))
jperRDM = np.zeros(len(radius))
Jx = []
Jy = []
Jz = []
JperR = np.zeros(len(radius))
indicesold = []
if not overmass:
massa = self.get_masses()
for i in range(self.temphalo['MaxRadIndex']+1):
if isinstance(self.tree, cKDTree) and self.temphalo['exists']==False:
indicesnew = self.get_indices(coords, IDs, radius=radius[i])
coord = self.get_coordinates()[indicesnew, :] - coords
vel = self.get_velocities()[indicesnew, :]
else:
temp = self.temphalo['indicesdictCu'][i]
if len(temp) == 0:
JperR[i] = 0.
continue
indicesnew = indices[temp]
coord = coord1[temp]
vel = vel1[temp]
if len(indicesnew) == 0:
JperR[i] = 0.
continue
#indicesnew = np.delete(indicesnew, np.where(np.in1d(indicesnew, indicesold))[0])
if overmass:
# Jx = np.append(Jx, coord[:, 1]*vel[:, 2] - coord[:, 2]*vel[:, 1])
# Jy = np.append(Jy, coord[:, 2]*vel[:, 0] - coord[:, 0]*vel[:, 2])
# Jz = np.append(Jz, coord[:, 0]*vel[:, 1] - coord[:, 1]*vel[:, 0])
# JperR[i] = np.sqrt(np.sum(Jx)*np.sum(Jx)+np.sum(Jy)*np.sum(Jy)+np.sum(Jz)*np.sum(Jz))/len(Jx)
Jx = np.sum(coord[:, 1]*vel[:, 2] - coord[:, 2]*vel[:, 1])
Jy = np.sum(coord[:, 2]*vel[:, 0] - coord[:, 0]*vel[:, 2])
Jz = np.sum(coord[:, 0]*vel[:, 1] - coord[:, 1]*vel[:, 0])
JperR[i] = np.sqrt(Jx*Jx+Jy*Jy+Jz*Jz)/len(indicesnew)
if self.temphalo['exists']:
self.temphalo['AngularMomentum'][i] = JperR[i]
if self.partType==7:
Htemp = self.temphalo['HindicesdictCu'][i]
DMtemp = self.temphalo['DMindicesdictCu'][i]
if len(Htemp) > 0:
coordH = coord[Htemp]
velH = vel[Htemp]
Jx = np.sum(coordH[:, 1]*velH[:, 2] - coordH[:, 2]*velH[:, 1])
Jy = np.sum(coordH[:, 2]*velH[:, 0] - coordH[:, 0]*velH[:, 2])
Jz = np.sum(coordH[:, 0]*velH[:, 1] - coordH[:, 1]*velH[:, 0])
self.temphalo['AngularMomentumH'][i] = np.sqrt(Jx*Jx+Jy*Jy+Jz*Jz)/len(Htemp)
if len(DMtemp) > 0:
coordDM = coord[DMtemp]
velDM = vel[DMtemp]
Jx = np.sum(coordDM[:, 1]*velDM[:, 2] - coordDM[:, 2]*velDM[:, 1])
Jy = np.sum(coordDM[:, 2]*velDM[:, 0] - coordDM[:, 0]*velDM[:, 2])
Jz = np.sum(coordDM[:, 0]*velDM[:, 1] - coordDM[:, 1]*velDM[:, 0])
self.temphalo['AngularMomentumDM'][i] = np.sqrt(Jx*Jx+Jy*Jy+Jz*Jz)/len(DMtemp)
else:
Jx = np.append(Jx, (coord[:, 1]*vel[:, 2] - coord[:, 2]*vel[:, 1])*massa[indicesnew])
Jy = np.append(Jy, (coord[:, 2]*vel[:, 0] - coord[:, 0]*vel[:, 2])*massa[indicesnew])
Jz = np.append(Jz, (coord[:, 0]*vel[:, 1] - coord[:, 1]*vel[:, 0])*massa[indicesnew])
JperR[i] = np.sqrt(np.sum(Jx)*np.sum(Jx) + np.sum(Jy)*np.sum(Jy) + np.sum(Jz)*np.sum(Jz))
indicesold = indicesnew
if self.temphalo['exists']:
return
return JperR
def get_spin_parameter(self, coords, IDs, radius = False, M=False):
if self.temphalo['exists']:
indices =self.temphalo['indices'][:self.temphalo['virialradiusindex']]
R = self.temphalo['R200']
M = self.temphalo['M200']
self.temphalo['lambda'] = np.zeros_like(R)
vel0 = self.temphalo['Vel']
if self.partType==7:
self.temphalo['lambdaH'] = np.zeros_like(R)
self.temphalo['lambdaDM'] = np.zeros_like(R)
Htemp = self.temphalo['HindicesM200']
DMtemp =self.temphalo['DMindicesM200']
elif radius:
R = radius
indices = self.get_indices(coords, IDs, radius=radius)
vel0 = np.array([0, 0, 0])
if len(indices) == 0:
#print("Error: no particles found")
return 0.0
else:
R = np.max(self.get_radius(point=coords)[indices])
indices = self.get_indices(coords, IDs, radius=radius)
vel0 = np.array([0, 0, 0])
if len(indices) == 0:
#print("Error: no particles found")
return 0.0
coord = self.get_coordinates()[indices, :] - coords
coord = np.where(
|
np.abs(coord)
|
numpy.abs
|
from uuid import uuid4
from operator import itemgetter
from itertools import chain
from copy import deepcopy
import json
from functools import reduce
def gen_uuid():
return str(uuid4())
import numpy as np
import pickle
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.linear_model import LogisticRegression, SGDClassifier
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.svm import LinearSVC, SVC
from sklearn.multiclass import OneVsRestClassifier, OneVsOneClassifier
from sklearn.feature_selection import SelectFromModel, VarianceThreshold
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import MultiLabelBinarizer
from sklearn.preprocessing import OneHotEncoder, LabelEncoder, LabelBinarizer
from sklearn.metrics import precision_recall_fscore_support
from skmultilearn.problem_transform import LabelPowerset, ClassifierChain
from skmultilearn.problem_transform import BinaryRelevance
from scipy.sparse import vstack, csr_matrix, hstack, issparse, coo_matrix
from scipy.stats import norm
from keras.layers import Input, Dense, Lambda, LSTM, RepeatVector
from keras.models import Model
from keras import backend as K
from keras import metrics
from keras.datasets import mnist
from keras.layers import Input
from keras.optimizers import RMSprop
from keras import regularizers
from keras.layers import Input, Dense, Lambda, LSTM, RepeatVector, Dropout
from keras.layers.core import Dense, Activation
from keras.metrics import categorical_accuracy
from keras.models import Model
from keras import backend as K
from keras import metrics
from keras.datasets import mnist
from keras.layers import Input
from keras.models import Sequential
from time_series_to_ir import TimeSeriesToIR
from base_scrabble import BaseScrabble
from common import *
from hcc import StructuredClassifierChain
from brick_parser import pointTagsetList as point_tagsets,\
locationTagsetList as location_tagsets,\
equipTagsetList as equip_tagsets,\
pointSubclassDict as point_subclass_dict,\
equipSubclassDict as equip_subclass_dict,\
locationSubclassDict as location_subclass_dict,\
tagsetTree as tagset_tree
tagset_list = point_tagsets + location_tagsets + equip_tagsets
tagset_list.append('networkadapter')
def tree_flatter(tree, init_flag=True):
branches_list = list(tree.values())
d_list = list(tree.keys())
for branches in branches_list:
for branch in branches:
added_d_list = tree_flatter(branch)
d_list = [d for d in d_list if d not in added_d_list]\
+ added_d_list
return d_list
def extend_tree(tree, k, d):
for curr_head, branches in tree.items():
if k==curr_head:
branches.append(d)
for branch in branches:
extend_tree(branch, k, d)
def calc_leaves_depth(tree, d=dict(), depth=0):
curr_depth = depth + 1
for tagset, branches in tree.items():
if d.get(tagset):
d[tagset] = max(d[tagset], curr_depth)
else:
d[tagset] = curr_depth
for branch in branches:
new_d = calc_leaves_depth(branch, d, curr_depth)
for k, v in new_d.items():
if d.get(k):
d[k] = max(d[k], v)
else:
d[k] = v
return d
def augment_tagset_tree(tagsets, subclass_dict):
for tagset in set(tagsets):
if '-' in tagset:
classname = tagset.split('-')[0]
#tagset_tree[classname].append({tagset:[]})
extend_tree(tagset_tree, classname, {tagset:[]})
subclass_dict[classname].append(tagset)
subclass_dict[tagset] = []
else:
if tagset not in subclass_dict.keys():
classname = tagset.split('_')[-1]
subclass_dict[classname].append(tagset)
subclass_dict[tagset] = []
extend_tree(tagset_tree, classname, {tagset:[]})
class SequenceAutoencoder(object):
def __init__(self, latent_dim=20):
self.latent_dim = latent_dim
def fit_new(self, x, y=None):
timesteps = x.shape[1]
input_dim = x.shape[2]
self.ae = Sequential()
self.ae.add(Dense(self.latent_dim,
input_shape=(timesteps,input_dim,),
activation='relu',
name='enc'))
self.ae.add(Dropout(0.2))
self.ae.add(Dense(input_dim,
activation='softmax',
name='dec'))
self.encoder = Model(inputs=self.ae.input,
outputs=self.ae.get_layer('enc').output)
#rmsprop = RMSprop(lr=0.05)
self.ae.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['categorical_accuracy'],)
self.ae.fit(x, x, epochs=1)
def fit(self, x, y=None):
timesteps = x.shape[1]
input_dim = x.shape[2]
self.ae = Sequential()
#m.add(LSTM(latent_dim, input_dim=in_dim, return_sequen|ces=True, name='enc'), )
self.ae.add(LSTM(self.latent_dim,
activation='softsign',
input_shape=(timesteps,input_dim,),
return_sequences=True,
unroll=True,
name='enc'), )
self.ae.add(LSTM(input_dim,
activation='softsign',
return_sequences=True,
unroll=True,
name='dec',
))
self.ae.add(Activation('softmax'))
self.encoder = Model(inputs=self.ae.input,
outputs=self.ae.get_layer('enc').output)
rmsprop = RMSprop(lr=0.005)
self.ae.compile(loss='categorical_hinge',
optimizer=rmsprop,
metrics=['categorical_accuracy', 'binary_accuracy'],)
self.ae.fit(x, x, epochs=1)
def fit_dep(self, x, y=None):
timesteps = x.shape[1]
input_dim = x.shape[2]
inputs = Input(shape=(timesteps, input_dim))
encoded = LSTM(self.latent_dim)(inputs)
decoded = RepeatVector(timesteps)(encoded)
decoded = LSTM(input_dim, return_sequences=True)(decoded)
encoded_input = Input(shape=(self.latent_dim,))
self.sequence_autoencoder = Model(inputs, decoded)
self.encoder = Model(inputs, encoded)
self.sequence_autoencoder.compile(
#loss='binary_crossentropy',
loss='categorical_crossentropy',
optimizer='RMSprop',
metrics=['binary_accuracy']
)
self.sequence_autoencoder.fit(x, x)
def transform(self, x):
return self.encoder.predict(x)
class SequenceVectorizer(object):
def __init__(self, tokenizer, vocabulary, max_len):
self.tokenizer = tokenizer
self.vocabulary = vocabulary
self.max_len = max_len
def fit(self, x):
#le = LabelBinarizer().fit(reduce(adder , x))
self.le = LabelBinarizer().fit(list(self.vocabulary.keys()))
vocabs = list(self.vocabulary.keys())
binarized = self.le.transform(vocabs)
(locs, indices) = np.where(binarized==1)
for loc, index in zip(locs, indices):
self.vocabulary[vocabs[loc]] = index
def transform(self, x):
x = map(self.tokenizer, x)
stack = []
for sentence in x:
encoded = self.le.transform(sentence)
padder = np.zeros((self.max_len - encoded.shape[0],
encoded.shape[1]))
encoded = np.vstack([encoded, padder])
stack.append(encoded)
encoded_labels = np.stack(stack)
return encoded_labels
class Ir2Tagsets(BaseScrabble):
def __init__(self,
target_building,
target_srcids,
building_label_dict,
building_sentence_dict,
building_tagsets_dict,
source_buildings=[],
source_sample_num_list=[],
learning_srcids=[],
conf={}):
super(Ir2Tagsets, self).__init__(
target_building,
target_srcids,
building_label_dict,
building_sentence_dict,
building_tagsets_dict,
source_buildings,
source_sample_num_list,
learning_srcids,
conf)
self.ts2ir = None
self.ts_feature_filename = 'temp/features.pkl'
if 'use_cluster_flag' in conf:
self.use_cluster_flag = conf['use_cluster_flag']
else:
self.use_cluster_flag = True
if 'eda_flag' in conf:
self.eda_flag = conf['eda_flag'],
else:
self.eda_flag = False
if 'use_brick_flag' in conf:
self.use_brick_flag = conf['use_brick_flag']
else:
self.use_brick_flag = True
if 'n_jobs' in conf:
self.n_jobs = conf['n_jobs']
else:
#self.n_jobs = 1
self.n_jobs = 24
if 'ts_flag' in conf:
self.ts_flag = conf['ts_flag']
else:
self.ts_flag = False
if 'negative_flag' in conf:
self.negative_flag = conf['negative_flag']
else:
self.negative_flag = False
if 'tagset_classifier_type' in conf:
self.tagset_classifier_type = conf['tagset_classifier_type']
else:
#self.tagset_classifier_type = 'StructuredCC_autoencoder'
self.tagset_classifier_type = 'StructuredCC'
if 'n_estimators' in conf:
self.n_estimators = conf['n_estimators']
else:
self.n_estimators = 10 # TODO: Find the proper value
if 'vectorizer_type' in conf:
self.vectorizer_type = conf['vectorizer_type']
else:
self.vectorizer_type = 'tfidf'
#self.vectorizer_type = 'sequence'
if 'query_strategy' in conf:
self.query_strategy = conf['query_strategy']
else:
self.query_strategy = 'phrase_util'
if 'autoencode' in conf:
self.autoencode = conf['autoencode']
else:
#self.autoencode = True
self.autoencode = False
self._init_data()
self._init_brick()
def _init_brick(self):
self.brick_srcids = []
self.tagset_list = point_tagsets + location_tagsets + equip_tagsets
self.tagset_list.append('networkadapter')
self.subclass_dict = dict()
self.subclass_dict.update(point_subclass_dict)
self.subclass_dict.update(equip_subclass_dict)
self.subclass_dict.update(location_subclass_dict)
self.subclass_dict['networkadapter'] = list()
self.subclass_dict['unknown'] = list()
self.subclass_dict['none'] = list()
self.tagset_tree = deepcopy(tagset_tree)
def _init_data(self):
self.sentence_dict = {}
self.label_dict = {}
self.tagsets_dict = {}
self.phrase_dict = {}
self.point_dict = {}
self.max_len = None
for building, source_sample_num in zip(self.source_buildings,
self.source_sample_num_list):
self.sentence_dict.update(self.building_sentence_dict[building])
one_label_dict = self.building_label_dict[building]
self.label_dict.update(one_label_dict)
if not self.learning_srcids:
sample_srcid_list = select_random_samples(
building,
one_label_dict.keys(),
source_sample_num,
self.use_cluster_flag)
self.learning_srcids += sample_srcid_list
one_tagsets_dict = self.building_tagsets_dict[building]
self.tagsets_dict.update(one_tagsets_dict)
for srcid, tagsets in one_tagsets_dict.items():
point_tagset = 'none'
for tagset in tagsets:
if tagset in point_tagsets:
point_tagset = tagset
break
self.point_dict[srcid] = point_tagset
self.phrase_dict = make_phrase_dict(self.sentence_dict,
self.label_dict)
self.max_len = max([len(phrases) for phrases
in self.phrase_dict.values()])
# validation
for srcid in self.target_srcids:
assert srcid in self.tagsets_dict
def _augment_brick_samples(self, doc, srcids):
brick_truths_dict = dict()
self.brick_srcids = []
brick_doc = []
logging.info('Start adding Brick samples')
brick_copy_num = 6
self.brick_tagsets_dict = dict()
self.brick_doc = list()
for tagset in tagset_list:
for j in range(0, brick_copy_num):
#multiplier = random.randint(2, 6)
srcid = 'brick;' + gen_uuid()
self.brick_srcids.append(srcid)
self.brick_tagsets_dict[srcid] = [tagset]
tagset_doc = list()
for tag in tagset.split('_'):
tagset_doc += [tag] * random.randint(1,2)
brick_doc.append(' '.join(tagset_doc))
doc += brick_doc
self.tagsets_dict.update(self.brick_tagsets_dict)
srcids += self.brick_srcids
return doc, srcids
def _extend_tagset_list(self, new_tagsets):
self.tagset_list += new_tagsets
self.tagset_list = list(set(self.tagset_list))
def update_model(self, srcids):
self.learning_srcids += srcids
self.target_srcids = [srcid for srcid in self.target_srcids
if srcid not in self.learning_srcids]
invalid_num = sum([srcid not in self.tagsets_dict for srcid in
self.learning_srcids + self.target_srcids]) #debug
self._extend_tagset_list(reduce(adder, [self.tagsets_dict[srcid]
for srcid in self.learning_srcids + self.target_srcids]))
augment_tagset_tree(self.tagset_list, self.subclass_dict)
self._build_tagset_classifier(self.learning_srcids,
self.target_srcids,
validation_srcids=[])
def _make_doc_vectorizer(self, doc):
doc = [sentence.split() for sentence in doc]
le = LabelBinarizer().fit(reduce(adder , doc))
stack = []
for sentence in doc:
encoded = le.transform(sentence)
padder = np.zeros((self.max_len - encoded.shape[0],
encoded.shape[1]))
encoded = np.vstack([encoded, padder])
stack.append(encoded)
encoded_labels = np.stack(stack)
return encoded_labels
def _augment_negative_examples(self, doc, srcids):
negative_doc = []
negative_truths_dict = {}
negative_srcids = []
for srcid in self.learning_srcids:
true_tagsets = list(set(self.tagsets_dict[srcid]))
sentence = self.phrase_dict[srcid]
for tagset in true_tagsets:
negative_srcid = srcid + ';' + gen_uuid()
removing_tagsets = set()
new_removing_tagsets = set([tagset])
removing_tags = []
negative_tagsets = list(filter(tagset.__ne__, true_tagsets))
i = 0
while len(new_removing_tagsets) != len(removing_tagsets):
i += 1
if i>5:
pdb.set_trace()
removing_tagsets = deepcopy(new_removing_tagsets)
for removing_tagset in removing_tagsets:
removing_tags += removing_tagset.split('_')
for negative_tagset in negative_tagsets:
for tag in removing_tags:
if tag in negative_tagset.split('_'):
new_removing_tagsets.add(negative_tagset)
negative_sentence = [tag for tag in sentence if\
tag not in removing_tags]
for tagset in removing_tagsets:
negative_tagsets = list(filter(tagset.__ne__,
negative_tagsets))
# negative_sentence = [word for word in sentence \
# if word not in tagset.split('_')]
negative_doc.append(' '.join(negative_sentence))
negative_truths_dict[negative_srcid] = negative_tagsets
negative_srcids.append(negative_srcid)
for i in range(0,50):
# Add empty examples
negative_srcid = gen_uuid()
negative_doc.append('')
negative_srcids.append(negative_srcid)
negative_truths_dict[negative_srcid] = []
doc += negative_doc
srcids += negative_srcids
self.tagsets_dict.update(negative_truths_dict)
return doc, srcids
def _build_tagset_classifier(self,
learning_srcids,
target_srcids,
validation_srcids):
learning_srcids = deepcopy(learning_srcids)
# Update TagSet pool to include TagSets not in Brick.
orig_sample_num = len(learning_srcids)
new_tagset_list = tree_flatter(self.tagset_tree, [])
new_tagset_list = new_tagset_list + [ts for ts in self.tagset_list \
if ts not in new_tagset_list]
self.tagset_list = new_tagset_list
self.tagset_binarizer = MultiLabelBinarizer(self.tagset_list)
self.tagset_binarizer.fit([self.tagset_list])
assert self.tagset_list == self.tagset_binarizer.classes_.tolist()
#self.tagsets_dict = {srcid: self.tagsets_dict[srcid]
# for srcid in learning_srcids}
## Init brick tag_list
# TODO: Maybe this should be done in initialization stage.
self.tag_list = list(set(reduce(adder, map(splitter,
self.tagset_list))))
# All possible vocabularies.
vocab_dict = dict([(tag, i) for i, tag in enumerate(self.tag_list)])
# Define Vectorizer
tokenizer = lambda x: x.split()
# TODO: We could use word embedding like word2vec here instead.
if self.vectorizer_type == 'tfidf':
self.tagset_vectorizer = TfidfVectorizer(tokenizer=tokenizer,
vocabulary=vocab_dict)
elif self.vectorizer_type == 'meanbembedding':
self.tagset_vectorizer = MeanEmbeddingVectorizer(tokenizer=tokenizer,
vocabulary=vocab_dict)
elif self.vectorizer_type == 'count':
self.tagset_vectorizer = CountVectorizer(tokenizer=tokenizer,
vocabulary=vocab_dict)
elif self.vectorizer_type == 'sequence':
self.tagset_vectorizer = SequenceVectorizer(tokenizer=tokenizer,
vocabulary=vocab_dict,
max_len=self.max_len)
else:
raise Exception('Wrong vectorizer type: {0}'
.format(self.vectorizer_type))
if self.ts_flag:
pass
#TODO: Run self._augment_with_ts()
## Transform learning samples
learning_doc = [' '.join(self.phrase_dict[srcid])
for srcid in learning_srcids]
test_doc = [' '.join(self.phrase_dict[srcid])
for srcid in target_srcids]
## Augment with negative examples.
if self.negative_flag:
learning_doc, learning_srcids = \
self._augment_negative_examples(learning_doc, learning_srcids)
## Init Brick samples.
if self.use_brick_flag:
learning_doc, learning_srcids = \
self._augment_brick_samples(learning_doc,
learning_srcids)
self.tagset_vectorizer.fit(learning_doc + test_doc)# + brick_doc)
# Apply Easy-Domain-Adaptation mechanism. Not useful.
if self.eda_flag:
pass
# TODO: self._augment_eda()
else:
# Make TagSet vectors.
learning_vect_doc = self.tagset_vectorizer.transform(learning_doc)
if not isinstance(learning_vect_doc, np.ndarray):
learning_vect_doc = learning_vect_doc.todense()
truth_mat = csr_matrix([self.tagset_binarizer.transform(
[self.tagsets_dict[srcid]])[0]
for srcid in learning_srcids])
if self.eda_flag:
assert False, 'This should not be called for now'
zero_vectors = self.tagset_binarizer.transform(\
[[] for i in range(0, unlabeled_vect_doc.shape[0])])
truth_mat = vstack([truth_mat, zero_vectors])
learning_vect_doc = np.vstack([learning_vect_doc, unlabeled_vect_doc])
if self.autoencode:
self.encoder = SequenceAutoencoder()
self.encoder .fit(learning_vect_doc)
learning_vect_doc = self.encoder.transform(learning_vect_doc)
if self.tagset_classifier_type == 'StructuredCC_autoencoder':
def meta_scc(**kwargs):
#feature_selector = SelectFromModel(LinearSVC(C=1))
#feature_selector = SequenceAutoencoder()
base_base_classifier = GradientBoostingClassifier(**kwargs)
base_classifier = Pipeline([#('feature_selection',
# feature_selector),
('classification',
base_base_classifier)
])
tagset_classifier = StructuredClassifierChain(
base_classifier,
self.tagset_binarizer,
self.subclass_dict,
self.tagset_vectorizer.vocabulary,
self.n_jobs,
self.use_brick_flag,
self.tagset_vectorizer)
return tagset_classifier
meta_classifier = meta_scc
"""
rf_params_list_dict = {
'n_estimators': [10, 50, 100],
'criterion': ['gini', 'entropy'],
'max_features': [None, 'auto'],
'max_depth': [1, 5, 10, 50],
'min_samples_leaf': [2,4,8],
'min_samples_split': [2,4,8]
}
gb_params_list_dict = {
'loss': ['deviance', 'exponential'],
'learning_rate': [0.1, 0.01, 1, 2],
'criterion': ['friedman_mse', 'mse'],
'max_features': [None, 'sqrt'],
'max_depth': [1, 3, 5, 10],
'min_samples_leaf': [1,2,4,8],
'min_samples_split': [2,4,8]
}
params_list_dict = gb_params_list_dict
"""
elif self.tagset_classifier_type == 'StructuredCC':
def meta_scc(**kwargs):
feature_selector = SelectFromModel(LinearSVC(C=1))
#feature_selector = SelectFromModel(LinearSVC(C=0.01, penalty='l1', dual=False))
base_base_classifier = GradientBoostingClassifier(**kwargs)
#base_base_classifier = SGDClassifier(loss='modified_huber', penalty='elasticnet')
#base_base_classifier = PassiveAggressiveClassifier(loss='squared_hinge', C=0.1)
#base_base_classifier = LogisticRegression()
#base_base_classifier = RandomForestClassifier(**kwargs)
base_classifier = Pipeline([('feature_selection',
feature_selector),
('classification',
base_base_classifier)
])
tagset_classifier = StructuredClassifierChain(
base_classifier,
self.tagset_binarizer,
self.subclass_dict,
self.tagset_vectorizer.vocabulary,
self.n_jobs,
self.use_brick_flag,
self.tagset_vectorizer)
return tagset_classifier
meta_classifier = meta_scc
"""
rf_params_list_dict = {
'n_estimators': [10, 50, 100],
'criterion': ['gini', 'entropy'],
'max_features': [None, 'auto'],
'max_depth': [1, 5, 10, 50],
'min_samples_leaf': [2,4,8],
'min_samples_split': [2,4,8]
}
gb_params_list_dict = {
'loss': ['deviance', 'exponential'],
'learning_rate': [0.1, 0.01, 1, 2],
'criterion': ['friedman_mse', 'mse'],
'max_features': [None, 'sqrt'],
'max_depth': [1, 3, 5, 10],
'min_samples_leaf': [1,2,4,8],
'min_samples_split': [2,4,8]
}
params_list_dict = gb_params_list_dict
"""
else:
assert False, 'Not existing classifier type: {0}'\
.format(self.tagset_classifier_type)
best_params = {'learning_rate':0.1, 'subsample':0.25}
self.tagset_classifier = meta_classifier(**best_params)
# Actual fitting.
if isinstance(self.tagset_classifier, StructuredClassifierChain):
self.tagset_classifier.fit(learning_vect_doc, truth_mat.toarray(), \
orig_sample_num=len(learning_vect_doc)
- len(self.brick_srcids))
else:
assert False, 'This should not be reachable for now'
self.tagset_classifier.fit(learning_vect_doc, truth_mat.toarray())
def ir2tagset_al_query_samples_phrase_util(self,
test_srcids,
building,
pred_tagsets_dict,
inc_num):
phrase_usage_dict = {}
for srcid in test_srcids:
pred_tagsets = pred_tagsets_dict[srcid]
phrase_usage_dict[srcid] = self._determine_used_phrases(
self.phrase_dict[srcid],
pred_tagsets)
phrase_usages = list(phrase_usage_dict.values())
mean_usage_rate = np.mean(phrase_usages)
std_usage_rate = np.std(phrase_usages)
# Select underexploited sentences.
threshold = mean_usage_rate - std_usage_rate
todo_sentence_dict = dict((srcid, alpha_tokenizer(''.join(
self.sentence_dict[srcid])))
for srcid, usage_rate
in phrase_usage_dict.items()
if usage_rate < threshold and srcid in test_srcids)
cluster_dict = get_cluster_dict(building)
todo_srcids = select_random_samples(building, \
list(todo_sentence_dict.keys()),
min(inc_num, len(todo_sentence_dict)), \
True,\
reverse=True,
cluster_dict=cluster_dict,
shuffle_flag=False
)
#if the numbers are not enough randomly select more:
if len(todo_srcids) < inc_num:
more_num = inc_num - len(todo_srcids)
todo_sentence_dict = dict((srcid, alpha_tokenizer(''.join(
self.sentence_dict[srcid])))
for srcid, usage_rate
in phrase_usage_dict.items()
if srcid in test_srcids)
cluster_dict = get_cluster_dict(building)
todo_srcids = select_random_samples(building, \
list(todo_sentence_dict.keys()),
min(more_num, len(todo_sentence_dict)), \
True,\
cluster_dict=cluster_dict,
shuffle_flag=True
)
return todo_srcids
def select_informative_samples(self, sample_num):
pred = self.predict(self.target_srcids)
if self.query_strategy == 'phrase_util':
new_srcids = self.ir2tagset_al_query_samples_phrase_util(
self.target_srcids,
self.target_building,
pred,
sample_num)
else:
raise ValueError('Query Strategy Wrong: {0}'.format(query_strategy))
return new_srcids
def _determine_used_phrases(self, phrases, tagsets):
phrases_usages = list()
pred_tags = reduce(adder, [tagset.split('_') for tagset in tagsets], [])
used_cnt = 0.0
unused_cnt = 0.0
for phrase in phrases:
phrase_tags = phrase.split('_')
for tag in phrase_tags:
if tag in ['leftidentifier', 'rightidentifier']:
continue
if tag in pred_tags:
used_cnt += 1 / len(phrase_tags)
else:
unused_cnt += 1 / len(phrase_tags)
if used_cnt == 0:
score = 0
else:
score = used_cnt / (used_cnt + unused_cnt)
return score
def _predict_and_proba(self, target_srcids):
#return self.tagset_classifier, self.tagset_vectorizer, self.tagset_binarizer, self.ts2ir
phrase_dict = {srcid: self.phrase_dict[srcid]
for srcid in target_srcids}
if self.ts_flag:
phrase_dict = self._augment_phrases_with_ts(phrase_dict, target_srcids, self.ts2ir)
doc = [' '.join(phrase_dict[srcid]) for srcid in target_srcids]
vect_doc = self.tagset_vectorizer.transform(doc) # should this be fit_transform?
if self.autoencode:
vect_doc = self.encoder.transform(vect_doc)
certainty_dict = dict()
tagsets_dict = dict()
pred_mat = self.tagset_classifier.predict(vect_doc)
prob_mat = self.tagset_classifier.predict_proba(vect_doc)
if not isinstance(pred_mat, np.ndarray):
try:
pred_mat = pred_mat.toarray()
except:
pred_mat = np.asarray(pred_mat)
pred_tagsets_dict = dict()
pred_certainty_dict = dict()
pred_point_dict = dict()
for i, (srcid, pred) in enumerate(zip(target_srcids, pred_mat)):
#for i, (srcid, pred, point_pred) \
#in enumerate(zip(srcids, pred_mat, point_mat)):
pred_tagsets_dict[srcid] = self.tagset_binarizer.inverse_transform(\
|
np.asarray([pred])
|
numpy.asarray
|
import faiss
import pandas as pd
import time
import numpy as np
import torch
import os
from scipy import stats as s
class knn:
def __init__(self, datafile, savefile=None, knn_size=10, save_to_file=True, resume=True):
self.knn_size = knn_size
self.x_data = None
self.y_data = None
self.save_file = datafile if not savefile else savefile
self.classes = None
self.save_to_file = save_to_file
self.faiss_index = None
# self.faiss_index = faiss.IndexFlatL2()
if datafile and resume:
print(f'loading data from file: {datafile}')
if (os.path.exists(datafile)):
print('File found')
data = torch.load(datafile)
self.x_data = data['x'].numpy()
self.y_data = data['y']
print(
f'Found {self.x_data.shape[0]} points with {len(set(self.y_data))} classes')
print(pd.Series(self.y_data).value_counts())
self.classes = list(set(self.y_data))
self.faiss_index = faiss.IndexFlatL2(self.x_data.shape[-1])
self.faiss_index.add(self.x_data)
else:
print('File not found')
def print_info(self):
print(pd.Series(self.y_data).value_counts())
def add_points(self, x, y):
if self.x_data is None:
self.x_data = np.array(x)
self.y_data = y
self.faiss_index = faiss.IndexFlatL2(self.x_data.shape[-1])
else:
self.x_data =
|
np.concatenate([self.x_data, x])
|
numpy.concatenate
|
import numpy as np
from numpy.linalg import norm
import time
from sketching_matrices import gaussian_random_sketch, subsampled_randomized_hadamard_transforms, sparse_johnson_lindenstrauss_transform
def calculate_hessian_using_sketch(A, m, sketch, sketch_fn, sketch_time, factor_time):
"""
Args:
A: Data matrix
m: sketch size
sketch: Takes string input = {Gaussian, SRHT, Sparse JL}. Defaults to Gaussian
sketch_fn: user-defined sketch function
sketch_time: Total sketch time taken already
factor_time: Total factor time taken already
Returns: pseudo-inverse(A^TS^TSA), total sketch time, total factor time.
"""
# 1. Generate sketching matrix S_t
sketch_start_time = time.time()
if sketch_fn is not None:
S_t = sketch_fn(A, m, 0)
else:
if sketch == 'SRHT':
S_t = subsampled_randomized_hadamard_transforms(m, A.shape[0])
elif sketch == 'Uniform':
S_t = (1 / (m ** 0.5)) * np.random.uniform(-1, 1, size=(m, A.shape[0]))
elif sketch == 'Sparse JL':
s = int(round(((m / A.shape[1]) ** 0.5) * (
|
np.log10(A.shape[1] ** 2)
|
numpy.log10
|
import pandas as pd
import numpy as np
np.random.seed(42)
from sklearn import model_selection
from sklearn.preprocessing import OneHotEncoder
import tensorflow as tf
tf.set_random_seed(42)
from sklearn.metrics import roc_auc_score
from sklearn.preprocessing import normalize
from sklearn.preprocessing import StandardScaler
from keras.datasets import boston_housing
(x_train, y_train), (x_test, y_test) = boston_housing.load_data()
noise_features = 1000
x_train = np.concatenate([x_train,
|
np.random.normal(size=(x_train.shape[0], noise_features))
|
numpy.random.normal
|
import numpy as np
import scipy as sp
import scipy.sparse as sps
import scipy.sparse.linalg.eigen.arpack as arp
import sys
from parafermions.MPS import MPS
import time
class MPO(object):
"""
Base MPO class for a chain MPO.
"""
def __init__(self, N, L, dtype=np.dtype('complex128'), existing=None,):
"""
Constructor of which constructs trivial MPO with virtual bond dimension 1.
Parameters
-----------
N: int
Physical bond dimension.
L: int
Length of chain.
dtype: datatype
The datatype to use for MPO elements (default=complex128).
existing: MPO object
If an existing MPO object is passed in then this is copied (default=None).
"""
if existing is None:
self.L = L
self.N = N
self.chi = 1
self.I = np.eye(N)
self.dtype = dtype
self.dim = N**L
Ws = dict() # create a dictionary to store tensors
for i in range(L):
W = np.zeros((self.chi,self.chi,N,N), dtype=self.dtype)
W[0,0,:,:] = self.I
Ws[i] = W
self.shape = (N**L, N**L) # for mat vec routine
self.Lp = np.zeros(self.chi, dtype=dtype); self.Lp[0] = 1.0
self.Rp = np.zeros(self.chi, dtype=dtype); self.Rp[-1] = 1.0
self.Ws = Ws
else:
E = existing
self.L = E.L
self.N = E.N
self.chi = E.chi # this is more the maximum chi
self.I = np.eye(E.N)
self.dtype = E.dtype
self.dim = E.N**E.L
Ws = dict() # create a dictionary to store tensors
for i in range(E.L):
Ws[i] = np.copy(E.Ws[i])
self.shape = (E.N**E.L, E.N**E.L) # for mat vec routine
self.Lp = np.copy(E.Lp)
self.Rp = np.copy(E.Rp)
self.Ws = Ws
def getdim(self):
"""
Return full physical dimension of operator.
"""
return self.dim
def fullmat(self):
"""
Get the full matrix. Only for testing with small systems.
"""
r = np.tensordot(self.Ws[self.L-1], self.Rp, axes=([1], [0]))
for j in range(self.L-2, -1, -1):
r = np.tensordot(self.Ws[j], r, axes=([1], [0]))
y = np.tensordot(self.Lp, r, axes=([0],[0]))
y = np.transpose(y, list(range(0, 2*self.L, 2)) + list(range(1, 2*self.L, 2)))
return np.reshape(y, (self.N**self.L, self.N**self.L))
def fullmats(self):
"""
Get the full matrix in sparse format.
"""
nnz = (self.L * self.chi) * self.dim
nzis = np.zeros(nnz, dtype=np.int)
nzjs = np.zeros(nnz, dtype=np.int)
nzs = np.zeros(nnz, dtype=self.dtype)
fill = 0
i = 0
maxnzs = 0
while i < self.dim:
phys_indices = self.decompose_index(i)
phys_indices = phys_indices[::-1]
# now finally calculate the element by multiplying matrices
# simple way
# r = np.tensordot(self.Rp, self.Ws[self.L-1][:, :,phys_indices[self.L-1],:], axes=([0], [1]))
# for j in range(self.L-2, -1, -1):
# r = np.tensordot(self.Ws[j][:,:,phys_indices[j],:], r, axes=([1], [0]))
# rowvec = np.tensordot(r, self.Lp, axes=([0], [0]))
# more optimal
l = np.tensordot(self.Lp, self.Ws[0][:, :,phys_indices[0],:], axes=([0], [0]))
for site in range(1, int(self.L/2)):
l = np.tensordot(l, self.Ws[site][:,:,phys_indices[site],:], axes=([site-1], [0]))
r = np.tensordot(self.Rp, self.Ws[self.L-1][:, :,phys_indices[self.L-1],:], axes=([0], [1]))
for j in range(self.L-2, int(self.L/2)-1, -1):
r = np.tensordot(self.Ws[j][:,:,phys_indices[j],:], r, axes=([1], [0]))
rowvec = np.tensordot(l, r, axes=([int(self.L/2) - 1], [0]))
# we can save more time by reusing p)revious partial contractions
rowvec = np.reshape(rowvec, (self.dim))
row_nzjs = np.nonzero(rowvec)[0]
nnzjs = len(row_nzjs)
if nnzjs > maxnzs:
maxnzs = nnzjs
if nnzjs > 0:
if (fill + nnzjs) >= nnz:
print('Oh no, more non zeros than anticipated.')
return None
s = slice(fill, fill + nnzjs, 1)
nzjs[s] = row_nzjs
nzis[s] = i
nzs[s] = rowvec[nzjs[s]]
fill += nnzjs
i += 1
return sps.coo_matrix((nzs[:fill], (nzis[:fill], nzjs[:fill])), shape=(self.dim, self.dim),dtype=self.dtype).tocsr()
def mats_subspace(self, subspace):
"""
Get the matrix of a particular subspace in sparse format.
subspace: 2d int array
Array of basis states of subspace.
"""
subspace_dim = subspace.shape[0]
subspace_indices = np.zeros(subspace_dim, dtype=np.int64)
for i in range(subspace_dim):
subspace_indices[i] = self.recompose_index(subspace[i,:])
# there is a potential issue when using multiple bands that the subspace indices are not sorted.
# we sort here and keep track of mapping.
subspace_indices_order = np.argsort(subspace_indices)
subspace_indices = subspace_indices[subspace_indices_order]
#print 'subspace indices : ' + str(subspace_indices) + ' at ' + str(subspace_indices_order)
#nnz = (self.L * self.chi) * subspace_dim
#nzis = np.zeros(nnz, dtype=np.int)
#nzjs = np.zeros(nnz, dtype=np.int)
#nzs = np.zeros(nnz, dtype=self.dtype)
nzis = []
nzjs = []
nzs = []
fill = 0
i = 0
maxnzs = 0
while i < subspace_dim:
phys_indices = subspace[i,::-1]
# more optimal
l = np.tensordot(self.Lp, self.Ws[0][:, :,phys_indices[0],:], axes=([0], [0]))
for site in range(1, self.L//2):
l =
|
np.tensordot(l, self.Ws[site][:,:,phys_indices[site],:], axes=([site-1], [0]))
|
numpy.tensordot
|
# This file is part of Frhodo. Copyright © 2020, UChicago Argonne, LLC
# and licensed under BSD-3-Clause. See License.txt in the top-level
# directory for license and copyright information.
import re
from tabulate import tabulate
import matplotlib as mpl
import numpy as np
from scipy import stats
from plot.base_plot import Base_Plot
from plot.draggable import Draggable
class Plot(Base_Plot):
def __init__(self, parent, widget, mpl_layout):
super().__init__(parent, widget, mpl_layout)
self.start_ind = 300
self.end_ind = 800
def info_table_text(self, preshock, postshock, prec=2):
def fix_g_format(value, prec):
text = '{:.{dec}g}'.format(value, dec=prec)
text = text.replace("e+", "e")
return re.sub("e(-?)0*(\d+)", r"e\1\2", text)
shock_zone = 2
# table = [['pre-shock', fix_g_format(preshock, prec)],
# ['post-shock', fix_g_format(postshock, prec)],
# ['difference', fix_g_format(postshock-preshock, prec)]]
table = [['pre-shock', '{:.{dec}f}'.format(preshock, dec=prec)],
['post-shock', '{:.{dec}f}'.format(postshock, dec=prec)],
['difference', '{:.{dec}f}'.format(postshock-preshock, dec=prec)]]
table = tabulate(table).split('\n')[1:-1] # removes header and footer
table.insert(0, 'Baseline Averages')
table_left_justified = []
max_len = len(max(table, key=len))
for line in table:
table_left_justified.append('{:<{max_len}}'.format(line, max_len=max_len))
return '\n'.join(table_left_justified)
def create_canvas(self):
self.ax = []
self.ax.append(self.fig.add_subplot(1,1,1))
self.ax[0].item = {}
self.ax[0].item['exp_data'] = self.ax[0].scatter([],[], color='#0C94FC', facecolors='#0C94FC',
s=16, linewidth=0.5, alpha = 0.85)
self.ax[0].item['start_divider'] = self.ax[0].add_line(mpl.lines.Line2D([],[],
marker='$'+'\u2336'+'$', markersize=18, markerfacecolor='0', markeredgecolor='0', markeredgewidth=0.5,
zorder=2))
self.ax[0].item['start_avg_l'] = self.ax[0].add_line(mpl.lines.Line2D([],[], ls='--', c= '0'))
self.ax[0].item['end_divider'] = self.ax[0].add_line(mpl.lines.Line2D([],[],
marker='$'+'\u2336'+'$', markersize=18, markerfacecolor='0', markeredgecolor='0', markeredgewidth=0.5,
zorder=2))
self.ax[0].item['end_avg_l'] = self.ax[0].add_line(mpl.lines.Line2D([],[], ls='--', c= '0'))
self.ax[0].item['textbox'] = self.ax[0].text(.98,.98, self.info_table_text(0, 0), fontsize=10, fontname='DejaVu Sans Mono',
horizontalalignment='right', verticalalignment='top', transform=self.ax[0].transAxes)
self.fig.subplots_adjust(left=0.06, bottom=0.065, right=0.98,
top=0.97, hspace=0, wspace=0.12)
# Create canvas from Base
super().create_canvas()
# Add draggable lines
draggable_items = [[0, 'start_divider'], [0, 'end_divider']]
for pair in draggable_items:
n, name = pair # n is the axis number, name is the item key
update_fcn = lambda x, y, name=name: self.draggable_update_fcn(name, x, y)
self.ax[n].item[name].draggable = Draggable(self, self.ax[n].item[name], update_fcn)
def draggable_update_fcn(self, name, x, y):
if self.parent.display_shock['raw_data'].size == 0: return
x0, xpress, xnew, xpressnew = x['0'], x['press'], x['new'], x['press_new']
y0, ypress, ynew, ypressnew = y['0'], y['press'], y['new'], y['press_new']
if name is 'start_divider':
self.start_ind = np.argmin(np.abs(self.t - xnew))
if self.start_ind < 3:
self.start_ind = 3
elif name is 'end_divider':
self.end_ind = np.argmin(np.abs(self.t - xnew))
self.update(estimate_ind=False)
def update(self, estimate_ind=True, update_lim=False):
def shape_data(x,y): return np.transpose(np.vstack((x,y)))
def set_xy(plot, x, y):
plot.set_xdata(x)
plot.set_ydata(y)
def estimateInd(data, frac, alpha):
def pred_int(i_old, i):
SD = np.std(data[:i_old]) # Standard deviation of sample
sigma = SD**2 # Variance of Sample
return t*np.sqrt((sigma/i_old + sigma/(i-i_old))) # Prediction interval for 2 means
def calc_mu_t(i):
mu = np.mean(data[:i]) # Mean of sample
df = i - 1
t = stats.t.ppf(1-alpha/2, df=df)
return mu, t
i_old = int(np.round(np.shape(data)[0]*frac))
i = i_old + 1
mu, t = calc_mu_t(i_old)
i_max = np.shape(data)[0] - 1
j = 0
while i != i_old: # sorta bisection, boolean hybrid monstrosity
if np.abs(mu - np.mean(data[i_old:i])) > pred_int(i_old, i):
j = 0
i = int(np.floor((i+i_old)/2))
else:
i_old = i
mu, t = calc_mu_t(i_old) # calculate new mu, t
j += 1
i += j**3 # this is to speed up the search
if i > i_max:
i = i_max
break
return i
parent = self.parent
if np.isnan(parent.display_shock['Sample_Rate']):
self.clear_plot()
return
data = parent.display_shock['raw_data'].reshape(-1,)
self.t = np.arange(np.shape(data)[0])/parent.display_shock['Sample_Rate']
t = self.t
if estimate_ind:
self.start_ind = estimateInd(data, frac=0.1, alpha=0.002) # 2-tail 99.9%
self.end_ind = -estimateInd(data[::-1], frac=0.1, alpha=0.001) # 2-tail 99.95%
start_ind = self.start_ind
end_ind = self.end_ind
self.ax[0].item['exp_data'].set_offsets(shape_data(t, data))
start_avg =
|
np.mean(data[:start_ind])
|
numpy.mean
|
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 4 18:03:24 2017
@author: Kester
"""
import numpy as np
from scipy import stats
class Trepan(object):
"""
A class that represents the TREPAN tree model. Must be initialised with an
oracle, and then fitted to a specific dataset to build the tree.
Variables:
maxsize: maximum number of internal nodes to grow the tree to (before pruning)
minsamples: the minimum number of samples to use for determining each m-of-n test
significance: the significance threshold to use when determining if a node
has a different distribution of values for a particular feature
from the previous node
improvement: the percentage improvement on the previous test needed to
accept an expanded m-of-n test
oracle: a fitted model used as an oracle
Public methods:
fit(samples): Fits a TREPAN tree using the oracle and the samples provided.
predict(samples): Uses an already fitted tree to predict a class for
each of the samples provided.
draw_tree(filename): Outputs an already fitted tree as a .dot graph to
the provided filenname.
oracle_predict(samples): Predict a class using the oracle for the samples
provided.
"""
def __init__(self,
oracle,
maxsize=10,
minsamples=1000,
significance=0.05,
improvement=1.05,
verbose=False,
logging=False):
self.maxsize = maxsize
self.minsamples = minsamples
self.significance = significance
self.improvement = improvement
self.verbose = verbose
self.logging = logging
self.tree = {}
self.fitted = False
self.building = False
self.oracle = oracle
def fit(self, traindata, trainlabels=[], testdata=[], testlabels=[], featnames=[]):
"""
Takes a set of training data (as a numpy array), saves it as part of
the class, and calls the build_tree method to make a TREPAN tree.
"""
if self.fitted:
raise Exception('This TREPAN model has already been fitted.')
self.traindata = traindata
self.trainlabels = trainlabels
self.testdata = testdata
self.testlabels = testlabels
self.numsamples = traindata.shape[0]
self.numfeats = traindata.shape[1]
# Check inputs match up
if self.logging and len(self.trainlabels) != self.numsamples:
raise Exception('Number of training examples and labels do not match')
if self.logging and self.testdata.shape[1] != self.numfeats:
raise Exception('Test data has incorrect number of features')
if self.logging and len(self.testlabels) != self.testdata.shape[0]:
raise Exception('Number of test examples and labels do not match')
if len(featnames):
self.featnames = featnames
else:
self.featnames = [str(i) for i in range(self.numfeats)]
if len(self.featnames) != self.numfeats:
raise Exception('Number of feature names does not match number of features')
# Make sure tree is empty before we build
self.tree = {}
self.__build_tree()
self.fitted = True
def __build_tree(self):
"""
A high-level function to build a TREPAN tree, consisting of creating a
tree and repeatedly expanding it until certain stopping conditions are
reached, then pruning branches.
The tree is stored as a dictionary of nodes, with the node name as the
key.
"""
# Record that we're currently building the tree
self.building = True
# Initialise list of best node values
self.fvalues = {}
# Create initial node with name '1'
self.tree['1'] = self.__create_initial_node('1')
if self.verbose:
print("Initialising tree...")
# Initialise accuracy and fidelity logs if appropriate
if self.logging:
self.trainaccs = []
self.trainfids = []
self.testaccs = []
self.testfids = []
finished = False
# Repeat node expansion until stopping conditions reached
while not finished:
# Find best node with the max f-value in the dictionary
bestnode = max(self.fvalues, key=lambda x: self.fvalues[x])
# Expand it
wasexpanded = self.__expand_node(bestnode)
# Log if necessary
if self.logging and wasexpanded:
self.trainaccs.append(self.accuracy(self.traindata, self.trainlabels))
self.trainfids.append(self.fidelity(self.traindata))
if len(self.testdata) > 0:
self.testaccs.append(self.accuracy(self.testdata, self.testlabels))
self.testfids.append(self.fidelity(self.testdata))
# Check there are still nodes to expand
finished = self.__check_stopping()
self.__prune_tree()
self.building = False
def __check_stopping(self):
"""
Checks whether tree building should be finished. This is either if there
are no nodes in the queue to expand, or the number of internal nodes
in the tree is equal to the maximum size specified.
"""
internalnodes = sum([not self.tree[node]['isleaf'] for node in self.tree])
if len(self.fvalues) == 0 or internalnodes >= self.maxsize:
finished = True
else:
finished = False
return finished
def __create_initial_node(self, nodename):
"""
Creates the initial node for the tree; a simple wrapper for dict creation.
Reach and fidelity are both set to 1. Many other variables are set
to None and will be defined when this node is expanded as the first
part of the tree-building process.
See description of __create_node method for explanation of all the
variables saved in a node.
"""
constraints = []
reach = 1
fidelity = 1
self.fvalues[nodename] = reach * (1 - fidelity)
# All training data reaches this node
reached = [True for i in range(self.numsamples)]
isleaf = True
# Predict (ONLY for this initial node) using raw predictions from oracle -
# no drawn samples
predictedclass = stats.mode(self.oracle_predict(self.traindata[reached])).mode[0]
mntest = None
daughter0 = None
daughter1 = None
parent = None
sister = None
node = {
'constraints': constraints,
'reach': reach,
'fidelity': fidelity,
'reached': reached,
'isleaf': isleaf,
'predictedclass': predictedclass,
'mntest': mntest,
'0daughter': daughter0,
'1daughter': daughter1,
'parent': parent,
'sister': sister
}
return node
def __create_node(self, nodename, parent, prediction, sister, passed):
"""
Creates a new node using the passed variables. Includes calculation of
samples that reach this node, plus predicting local labels for a predicted
class and the fidelity.
Values in node dictionary:
constraints: A list of the m-of-n tests that must be passed/failed
to reach this node.
reach: The proportion of total training data that reaches this node.
reached: A boolean array showing the exact training data examples
that reach this node.
predictedclass: The predicted class at this node (i.e. the modal
value predicted by the oracle for samples at the node)
fidelity: The proportion of samples at this node for which the
node's predicted class matches their class as predicted by
the oracle.
mntest: The m-of-n test for this node. m-of-n tests are stored as a
tuple with the format (m, [f1, f2...fn]).
fn values are also tuples, with the format
(feature, threshold, greater), where feature is the integer
index of the feature being split on, threshold is the floating
point value being checked against, and greater is a boolean
which indicates whether a value must be equal or greater than
(True) or less than (False) the threshold to count towards
passing the test.
Finally, when a test is added to a constraints list, it
includes a third value, passed, a boolean that indicates
whether the test must be passed or failed to reach the
relevant node.
isleaf: A boolean showing whether a node is a leaf node.
parent: The name of the node's parent node
0daughter: The name of the node's daughter node when its m-of-n test
is failed.
1daughter: The name of the node's daughter node when its m-of-n test
is failed.
sister: The name of the node's sister node (i.e. they have the same parent)
"""
# Create constraints from parent's constraints plus its m-of-n test
newtest = (self.tree[parent]['mntest'][0], self.tree[parent]['mntest'][1], passed)
constraints = self.tree[parent]['constraints'] + list([newtest])
# Find how many samples reach the node (as boolean array)
reached = [
self.__passes_mn_tests(self.traindata[i, :], constraints)
for i in range(self.numsamples)
]
reach = sum(reached) / self.numsamples
# Get labels for samples that reach the node
localsamples = self.traindata[reached]
labels = self.oracle_predict(localsamples)
# Assign predicted classes based on the m-of-n test that created this node
predictedclass = prediction
# Use this to calculate fidelity
if reach > 0:
fidelity = sum(labels == predictedclass) / sum(reached)
else:
fidelity = 0
# Get all features already used in this branch
constrainedfeats = []
for test in constraints:
for subtest in test[1]:
constrainedfeats.append(subtest[0])
# If labels are not all the same, and we haven't used all features,
# add this to the list of nodes to expand
if len(np.unique(labels)) > 1 and len(constrainedfeats) < self.numfeats:
self.fvalues[nodename] = reach * (1 - fidelity)
isleaf = True
mntest = None
daughter0 = None
daughter1 = None
node = {
'constraints': constraints,
'reach': reach,
'fidelity': fidelity,
'reached': reached,
'isleaf': isleaf,
'predictedclass': predictedclass,
'mntest': mntest,
'0daughter': daughter0,
'1daughter': daughter1,
'parent': parent,
'sister': sister
}
return node
def __passes_mn_test(self, example, test):
"""
Takes a particular example of test data, and checks whether it passes a
single m-of-n test that is also provided.
"""
testpassed = False
counter = 0
featurespassed = 0
# Pull out m and n for clarity
m = test[0]
n = len(test[1])
# Loop until either the test is passed or we get to n features tested
while (not testpassed) and counter < n:
# Pull out details for particular subtest
feature = test[1][counter][0]
threshold = test[1][counter][1]
greater = test[1][counter][2]
# Check if subtest passed
if (greater and example[feature] >= threshold) or \
((not greater) and example[feature] < threshold):
featurespassed += 1
# Check if overall test passed
if featurespassed >= m:
testpassed = True
counter += 1
return testpassed
def __passes_mn_tests(self, example, constraints):
"""
Takes an example and a list of m-of-n tests and checks if all tests
are passed by that particular sample.
"""
allpassed = True
counter = 0
# Loop over tests until one is failed or we have tested them all
while allpassed and counter < len(constraints):
passed = self.__passes_mn_test(example,
(constraints[counter][0], constraints[counter][1]))
if passed != constraints[counter][2]:
allpassed = False
counter += 1
return allpassed
def __expand_node(self, nodename):
"""
Expands a provided node by constructing an m-of-n test and using it to
create two daughter nodes depending on whether it was passed or failed.
"""
# Construct m-of-n test
constructed = self.__construct_test(nodename)
mntest = constructed[0]
passclass = constructed[1]
failclass = constructed[2]
wasexpanded = False
# Check we made a test before editing the node - otherwise, skip to the
# end and just keep this node as an unexpanded leaf
if mntest is not None:
# Add test to the node
self.tree[nodename]['mntest'] = mntest
# Generate daughter nodes
daughter0 = nodename + '0'
daughter1 = nodename + '1'
if self.verbose:
print("Creating new nodes...")
self.tree[daughter0] = self.__create_node(daughter0, nodename, failclass, daughter1,
False)
self.tree[daughter1] = self.__create_node(daughter1, nodename, passclass, daughter0,
True)
# Adjust the current node's values to register expansion
self.tree[nodename]['0daughter'] = daughter0
self.tree[nodename]['1daughter'] = daughter1
self.tree[nodename]['isleaf'] = False
wasexpanded = True
del self.fvalues[nodename]
# Return so we know if an expansion actually happened (i.e. an m-of-n
# test was found)
return wasexpanded
def __draw_sample(self, nodename):
"""
A function that takes the name of a node in the tree, and draws extra
samples if fewer than the allowed minimum size of samples reach
that node. (e.g. ifwe want 10,000 examples, and have 9,100, we will
draw 900).
The distributions are calculated with Gaussian Kernel Density Estimators
and must be drawn in accordance with the relevant constraints in order
for examples to reach the node.
The list distrnodes is used and created by this function. This is a list
of length equal to the number of features, and each entry is a node name,
showing which node's feature distribution is currently being used to
calculate that feature for this branch. (e.g. if we are on node '100',
and the value for feature 3 is '1', we would check the current
distribution of feature 3 against that for node '1', and if it is not
different, use the distribution from feature 1 for sample drawing).
"""
# Create a dictionary to hold the features for which only a single value
# makes it through to this node. This feature will always take this value
# as it's impossible to create a KDE for a single-valued distribution.
singlevalsdict = {}
# Find local samples and calculate how many are needed
localsamples = self.traindata[self.tree[nodename]['reached']]
samplesneeded = self.minsamples - localsamples.shape[0]
# Find nodes used for kernel construction by parent node by feature
# (used for checking if feature distributions have changed)
parent = self.tree[nodename]['parent']
if nodename == '1':
distrs = ['1' for i in range(self.numfeats)]
else:
distrs = self.tree[parent]['distrnodes']
# Pull out node constraints for use later
constraints = self.tree[nodename]['constraints']
# Create a distribution list for this node
distrnodes = [nodename for i in range(self.numfeats)]
# Set the bandwidth for the KDEs
bandwidth = 1 / np.sqrt(localsamples.shape[0])
# Only do any of this if we need samples
if samplesneeded > 0:
# Create list to store KDEs for each feature
kernels = [None for i in range(self.numfeats)]
for feat in range(self.numfeats):
# Get the appropriate set of samples to check against
distrindices = self.tree[distrs[feat]]['reached']
parentsamples = self.traindata[distrindices]
# Check if distribution for feature is diff from parent node
# using Kolgomorov-Smirnov test
# Including Bonferroni correction
if stats.ks_2samp(localsamples[:, feat],
parentsamples[:, feat])[1] <= self.significance / self.numfeats:
# Check for single values
uniques = np.unique(localsamples[:, feat])
if len(uniques) == 1:
singlevalsdict[feat] = uniques[0]
# If not single-valued, create KDE
else:
kernels[feat] = stats.gaussian_kde(localsamples[:, feat],
bw_method=bandwidth)
else:
# If distribution doesn't differ, do same as above,
# but for parent node instead
uniques =
|
np.unique(parentsamples[:, feat])
|
numpy.unique
|
#! /usr/bin/env python
"""
MIMAS - The Multi-resolution Image Mask for Depexo Software
TODO: Write an in/out reader for MOC formats described by
http://arxiv.org/abs/1505.02937
"""
from __future__ import print_function
import logging
import numpy as np
import os
import re
from astropy.coordinates import Angle, SkyCoord
import astropy.units as u
from astropy.io import fits as pyfits
from astropy.wcs import wcs as pywcs
import healpy as hp
from .regions import Region
from .catalogs import load_table, write_table
__author__ = "<NAME>"
__version__ = 'v1.3.1'
__date__ = '2018-08-29'
# globals
filewcs = None
class Dummy():
"""
A state storage class for MIMAS to work with.
Attributes
----------
add_region : list
List of :class:`depexoTools.MIMAS.Region` to be added.
rem_region : list
List of :class:`depexoTools.MIMAS.Region` to be subtracted.
include_circles : [[ra, dec, radius],...]
List of circles to be added to the region, units are degrees.
exclude_circles : [[ra, dec, radius], ...]
List of circles to be subtracted from the region, units are degrees.
include_polygons : [[ra,dec, ...], ...]
List of polygons to be added to the region, units are degrees.
exclude_polygons : [[ra,dec, ...], ...]
List of polygons to be subtracted from the region, units are degrees.
maxdepth : int
Depth or resolution of the region for HEALPix.
There are 4*2**maxdepth pixels at the deepest layer.
Default = 8.
galactic: bool
If true then all ra/dec coordinates will be interpreted as if they were in galactic
lat/lon (degrees)
"""
def __init__(self, maxdepth=8):
self.add_region = []
self.rem_region = []
self.include_circles = []
self.exclude_circles = []
self.include_polygons = []
self.exclude_polygons = []
self.maxdepth = maxdepth
self.galactic = False
return
def galactic2fk5(l, b):
"""
Convert galactic l/b to fk5 ra/dec
Parameters
----------
l, b : float
Galactic coordinates in radians.
Returns
-------
ra, dec : float
FK5 ecliptic coordinates in radians.
"""
a = SkyCoord(l, b, unit=(u.radian, u.radian), frame='galactic')
return a.fk5.ra.radian, a.fk5.dec.radian
def mask_plane(data, wcs, region, negate=False):
"""
Mask a 2d image (data) such that pixels within 'region' are set to nan.
Parameters
----------
data : 2d-array
Image array.
wcs : astropy.wcs.WCS
WCS for the image in question.
region : :class:`depexoTools.regions.Region`
A region within which the image pixels will be masked.
negate : bool
If True then pixels *outside* the region are masked.
Default = False.
Returns
-------
masked : 2d-array
The original array, but masked as required.
"""
# create an array but don't set the values (they are random)
indexes = np.empty((data.shape[0]*data.shape[1], 2), dtype=int)
# since I know exactly what the index array needs to look like i can construct
# it faster than list comprehension would allow
# we do this only once and then recycle it
idx = np.array([(j, 0) for j in range(data.shape[1])])
j = data.shape[1]
for i in range(data.shape[0]):
idx[:, 1] = i
indexes[i*j:(i+1)*j] = idx
# put ALL the pixles into our vectorized functions and minimise our overheads
ra, dec = wcs.wcs_pix2world(indexes, 1).transpose()
bigmask = region.sky_within(ra, dec, degin=True)
if not negate:
bigmask = np.bitwise_not(bigmask)
# rework our 1d list into a 2d array
bigmask = bigmask.reshape(data.shape)
# and apply the mask
data[bigmask] = np.nan
return data
def mask_file(regionfile, infile, outfile, negate=False):
"""
Created a masked version of file, using a region.
Parameters
----------
regionfile : str
A file which can be loaded as a :class:`depexoTools.regions.Region`.
The image will be masked according to this region.
infile : str
Input FITS image.
outfile : str
Output FITS image.
negate : bool
If True then pixels *outside* the region are masked.
Default = False.
See Also
--------
:func:`depexoTools.MIMAS.mask_plane`
"""
# Check that the input file is accessible and then open it
if not os.path.exists(infile): raise AssertionError("Cannot locate fits file {0}".format(infile))
im = pyfits.open(infile)
if not os.path.exists(regionfile): raise AssertionError("Cannot locate region file {0}".format(regionfile))
region = Region.load(regionfile)
try:
wcs = pywcs.WCS(im[0].header, naxis=2)
except: # TODO: figure out what error is being thrown
wcs = pywcs.WCS(str(im[0].header), naxis=2)
if len(im[0].data.shape) > 2:
data = np.squeeze(im[0].data)
else:
data = im[0].data
print(data.shape)
if len(data.shape) == 3:
for plane in range(data.shape[0]):
mask_plane(data[plane], wcs, region, negate)
else:
mask_plane(data, wcs, region, negate)
im[0].data = data
im.writeto(outfile, overwrite=True)
logging.info("Wrote {0}".format(outfile))
return
def mask_table(region, table, negate=False, racol='ra', deccol='dec'):
"""
Apply a given mask (region) to the table, removing all the rows with ra/dec inside the region
If negate=False then remove the rows with ra/dec outside the region.
Parameters
----------
region : :class:`depexoTools.regions.Region`
Region to mask.
table : Astropy.table.Table
Table to be masked.
negate : bool
If True then pixels *outside* the region are masked.
Default = False.
racol, deccol : str
The name of the columns in `table` that should be interpreted as ra and dec.
Default = 'ra', 'dec'
Returns
-------
masked : Astropy.table.Table
A view of the given table which has been masked.
"""
inside = region.sky_within(table[racol], table[deccol], degin=True)
if not negate:
mask = np.bitwise_not(inside)
else:
mask = inside
return table[mask]
def mask_catalog(regionfile, infile, outfile, negate=False, racol='ra', deccol='dec'):
"""
Apply a region file as a mask to a catalog, removing all the rows with ra/dec inside the region
If negate=False then remove the rows with ra/dec outside the region.
Parameters
----------
regionfile : str
A file which can be loaded as a :class:`depexoTools.regions.Region`.
The catalogue will be masked according to this region.
infile : str
Input catalogue.
outfile : str
Output catalogue.
negate : bool
If True then pixels *outside* the region are masked.
Default = False.
racol, deccol : str
The name of the columns in `table` that should be interpreted as ra and dec.
Default = 'ra', 'dec'
See Also
--------
:func:`depexoTools.MIMAS.mask_table`
:func:`depexoTools.catalogs.load_table`
"""
logging.info("Loading region from {0}".format(regionfile))
region = Region.load(regionfile)
logging.info("Loading catalog from {0}".format(infile))
table = load_table(infile)
masked_table = mask_table(region, table, negate=negate, racol=racol, deccol=deccol)
write_table(masked_table, outfile)
return
def mim2reg(mimfile, regfile):
"""
Convert a MIMAS region (.mim) file into a DS9 region (.reg) file.
Parameters
----------
mimfile : str
Input file in MIMAS format.
regfile : str
Output file.
"""
region = Region.load(mimfile)
region.write_reg(regfile)
logging.info("Converted {0} -> {1}".format(mimfile, regfile))
return
def mim2fits(mimfile, fitsfile):
"""
Convert a MIMAS region (.mim) file into a MOC region (.fits) file.
Parameters
----------
mimfile : str
Input file in MIMAS format.
fitsfile : str
Output file.
"""
region = Region.load(mimfile)
region.write_fits(fitsfile, moctool='MIMAS {0}-{1}'.format(__version__, __date__))
logging.info("Converted {0} -> {1}".format(mimfile, fitsfile))
return
def mask2mim(maskfile, mimfile, threshold=1.0, maxdepth=8):
"""
Use a fits file as a mask to create a region file.
Pixels in mask file that are equal or above the threshold will be included in the reigon,
while those that are below the threshold will not.
Parameters
----------
maskfile : str
Input file in fits format.
mimfile : str
Output filename
threshold : float
threshold value for separating include/exclude values
maxdepth : int
Maximum depth (resolution) of the healpix pixels
"""
hdu = pyfits.open(maskfile)
wcs = pywcs.WCS(hdu[0].header)
x, y = np.where(hdu[0].data >= threshold)
ra, dec = wcs.all_pix2world(y, x, 0)
sky = np.radians(Region.radec2sky(ra, dec))
vec = Region.sky2vec(sky)
x, y, z = np.transpose(vec)
pix = hp.vec2pix(2**maxdepth, x, y, z, nest=True)
region = Region(maxdepth=maxdepth)
region.add_pixels(pix, depth=maxdepth)
region._renorm()
save_region(region, mimfile)
logging.info("Converted {0} -> {1}".format(maskfile, mimfile))
return
def box2poly(line):
"""
Convert a string that describes a box in ds9 format, into a polygon that is given by the corners of the box
Parameters
----------
line : str
A string containing a DS9 region command for a box.
Returns
-------
poly : [ra, dec, ...]
The corners of the box in clockwise order from top left.
"""
words = re.split('[(\s,)]', line)
ra = words[1]
dec = words[2]
width = words[3]
height = words[4]
if ":" in ra:
ra = Angle(ra, unit=u.hour)
else:
ra = Angle(ra, unit=u.degree)
dec = Angle(dec, unit=u.degree)
width = Angle(float(width[:-1])/2, unit=u.arcsecond) # strip the "
height = Angle(float(height[:-1])/2, unit=u.arcsecond) # strip the "
center = SkyCoord(ra, dec)
tl = center.ra.degree+width.degree, center.dec.degree+height.degree
tr = center.ra.degree-width.degree, center.dec.degree+height.degree
bl = center.ra.degree+width.degree, center.dec.degree-height.degree
br = center.ra.degree-width.degree, center.dec.degree-height.degree
return np.ravel([tl, tr, br, bl]).tolist()
def circle2circle(line):
"""
Parse a string that describes a circle in ds9 format.
Parameters
----------
line : str
A string containing a DS9 region command for a circle.
Returns
-------
circle : [ra, dec, radius]
The center and radius of the circle.
"""
words = re.split('[(,\s)]', line)
ra = words[1]
dec = words[2]
radius = words[3][:-1] # strip the "
if ":" in ra:
ra = Angle(ra, unit=u.hour)
else:
ra = Angle(ra, unit=u.degree)
dec = Angle(dec, unit=u.degree)
radius = Angle(radius, unit=u.arcsecond)
return [ra.degree, dec.degree, radius.degree]
def poly2poly(line):
"""
Parse a string of text containing a DS9 description of a polygon.
This function works but is not very robust due to the constraints of healpy.
Parameters
----------
line : str
A string containing a DS9 region command for a polygon.
Returns
-------
poly : [ra, dec, ...]
The coordinates of the polygon.
"""
words = re.split('[(\s,)]', line)
ras = np.array(words[1::2])
decs = np.array(words[2::2])
coords = []
for ra, dec in zip(ras, decs):
if ra.strip() == '' or dec.strip() == '':
continue
if ":" in ra:
pos = SkyCoord(Angle(ra, unit=u.hour), Angle(dec, unit=u.degree))
else:
pos = SkyCoord(Angle(ra, unit=u.degree), Angle(dec, unit=u.degree))
# only add this point if it is some distance from the previous one
coords.extend([pos.ra.degree, pos.dec.degree])
return coords
def reg2mim(regfile, mimfile, maxdepth):
"""
Parse a DS9 region file and write a MIMAS region (.mim) file.
Parameters
----------
regfile : str
DS9 region (.reg) file.
mimfile : str
MIMAS region (.mim) file.
maxdepth : str
Depth/resolution of the region file.
"""
logging.info("Reading regions from {0}".format(regfile))
lines = (l for l in open(regfile, 'r') if not l.startswith('#'))
poly = []
circles = []
for line in lines:
if line.startswith('box'):
poly.append(box2poly(line))
elif line.startswith('circle'):
circles.append(circle2circle(line))
elif line.startswith('polygon'):
logging.warning("Polygons break a lot, but I'll try this one anyway.")
poly.append(poly2poly(line))
else:
logging.warning("Not sure what to do with {0}".format(line[:-1]))
container = Dummy(maxdepth=maxdepth)
container.include_circles = circles
container.include_polygons = poly
region = combine_regions(container)
save_region(region, mimfile)
return
def combine_regions(container):
"""
Return a region that is the combination of those specified in the container.
The container is typically a results instance that comes from argparse.
Order of construction is: add regions, subtract regions, add circles, subtract circles,
add polygons, subtract polygons.
Parameters
----------
container : :class:`depexoTools.MIMAS.Dummy`
The regions to be combined.
Returns
-------
region : :class:`depexoTools.regions.Region`
The constructed region.
"""
# create empty region
region = Region(container.maxdepth)
# add/rem all the regions from files
for r in container.add_region:
logging.info("adding region from {0}".format(r))
r2 = Region.load(r[0])
region.union(r2)
for r in container.rem_region:
logging.info("removing region from {0}".format(r))
r2 = Region.load(r[0])
region.without(r2)
# add circles
if len(container.include_circles) > 0:
for c in container.include_circles:
circles = np.radians(
|
np.array(c)
|
numpy.array
|
from __future__ import print_function
import unittest
import numpy as np
import os
import shutil
import tempfile
import coremltools.models.datatypes as datatypes
from coremltools.models import neural_network as neural_network
from coremltools.models.utils import macos_version
import coremltools
import itertools
np.random.seed(10)
class CorrectnessTest(unittest.TestCase):
def _compare_shapes(self, np_preds, coreml_preds):
if np.squeeze(np_preds).shape != np.squeeze(coreml_preds).shape:
return False
else:
return True
def _compare_predictions(self, np_preds, coreml_preds, delta = .01):
np_preds = np_preds.flatten()
coreml_preds = coreml_preds.flatten()
for i in range(len(np_preds)):
max_den = max(1.0, np_preds[i], coreml_preds[i])
if np.abs(np_preds[i] / max_den - coreml_preds[i] / max_den) > delta:
return False
return True
def get_size_after_stride(X, params):
start = params["start"]
end = params["end"]
stride = params["stride"]
if params["axis"] == 'width': axis = 2
if params["axis"] == 'height': axis = 1
if params["axis"] == 'channel': axis = 0
N = X.shape[axis]
if end < 0: end = end + N
end = min(end, N)
if start > N-1:
L = 0
else:
L = np.floor((end - 1 - start)/stride) + 1
if L<0 : L = 0
return L
def get_numpy_predictions_slice(X, params):
start = params["start"]
end = params["end"]
stride = params["stride"]
if params["axis"] == 'width': return X[:,:,start:end:stride]
if params["axis"] == 'height': return X[:,start:end:stride,:]
if params["axis"] == 'channel': return X[start:end:stride,:,:]
def get_coreml_predictions_slice(X, params):
coreml_preds = []
eval = True
try:
input_dim = X.shape
output_dim = (1, 1, 1) #some random dimensions here: we are going to remove this information later
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', datatypes.Array(*output_dim))]
builder = neural_network.NeuralNetworkBuilder(input_features, output_features)
builder.add_slice('slice', 'data', 'output', start_index = params["start"],
end_index = params["end"], stride = params["stride"], axis = params["axis"])
#Remove output shape by deleting and adding an output
del builder.spec.description.output[-1]
output = builder.spec.description.output.add()
output.name = 'output'
output.type.multiArrayType.dataType = coremltools.proto.FeatureTypes_pb2.ArrayFeatureType.ArrayDataType.Value('DOUBLE')
#save the model
model_dir = tempfile.mkdtemp()
model_path = os.path.join(model_dir, 'test_layer.mlmodel')
coremltools.utils.save_spec(builder.spec, model_path)
#preprare input and get predictions
coreml_model = coremltools.models.MLModel(model_path)
coreml_input = {'data': X}
if macos_version() >= (10, 13):
coreml_preds = coreml_model.predict(coreml_input)['output']
else:
coreml_preds = None
if os.path.exists(model_dir):
shutil.rmtree(model_dir)
except RuntimeError as e:
print(e)
eval = False
return coreml_preds, eval
def get_numpy_predictions_reduce(X, params):
if params["axis"] == 'CHW': axis = (0,1,2)
if params["axis"] == 'HW' : axis = (1,2)
if params["axis"] == 'C' : axis = 0
if params["axis"] == 'H' : axis = 1
if params["axis"] == 'W' : axis = 2
if params["mode"] == 'sum': return np.sum(X, axis)
if params["mode"] == 'avg': return np.mean(X, axis)
if params["mode"] == 'prod': return np.prod(X, axis)
if params["mode"] == 'logsum': return np.sum(np.log(X+1e-6), axis)
if params["mode"] == 'sumsquare': return np.sum(X ** 2, axis)
if params["mode"] == 'L2': return np.sqrt(np.sum(X ** 2, axis))
if params["mode"] == 'L1': return np.sum(np.abs(X), axis)
if params["mode"] == 'max': return np.amax(X, axis)
if params["mode"] == 'min': return np.amin(X, axis)
if params["mode"] == 'argmax': return np.argmax(X, axis)
def get_coreml_predictions_reduce(X, params):
coreml_preds = []
eval = True
try:
input_dim = X.shape
output_dim = (1, 1, 1) #some random dimensions here: we are going to remove this information later
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', datatypes.Array(*output_dim))]
builder = neural_network.NeuralNetworkBuilder(input_features, output_features)
builder.add_reduce('reduce', 'data', 'output', axis = params["axis"], mode = params["mode"])
#Remove output shape by deleting and adding an output
del builder.spec.description.output[-1]
output = builder.spec.description.output.add()
output.name = 'output'
output.type.multiArrayType.dataType = coremltools.proto.FeatureTypes_pb2.ArrayFeatureType.ArrayDataType.Value('DOUBLE')
#save the model
model_dir = tempfile.mkdtemp()
model_path = os.path.join(model_dir, 'test_layer.mlmodel')
coremltools.utils.save_spec(builder.spec, model_path)
#preprare input and get predictions
coreml_model = coremltools.models.MLModel(model_path)
coreml_input = {'data': X}
if macos_version() >= (10, 13):
coreml_preds = coreml_model.predict(coreml_input)['output']
else:
coreml_preds = None
if os.path.exists(model_dir):
shutil.rmtree(model_dir)
except RuntimeError as e:
print(e)
eval = False
return coreml_preds, eval
def get_coreml_predictions_unary(x, mode, alpha = 1.0):
#create a tiny mlmodel
input_dim = x.shape
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', datatypes.Array(*input_dim))]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_unary(name= 'unary', input_name = 'data', output_name = 'output', mode = mode, alpha = alpha)
#save the model
model_dir = tempfile.mkdtemp()
model_path = os.path.join(model_dir, 'test_layer.mlmodel')
coremltools.utils.save_spec(builder.spec, model_path)
#preprare input and get predictions
coreml_model = coremltools.models.MLModel(model_path)
if macos_version() >= (10, 13):
coreml_input = {'data': x}
coreml_preds = coreml_model.predict(coreml_input)['output']
else:
coreml_preds = None
if os.path.exists(model_dir):
shutil.rmtree(model_dir)
return coreml_preds
class SimpleTest(CorrectnessTest):
def test_tiny_upsample_linear_mode(self):
#create a tiny mlmodel
input_dim = (1,1,3) #(C,H,W)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_upsample(name= 'upsample',
scaling_factor_h = 2, scaling_factor_w = 3,
input_name= 'data', output_name= 'output',
mode = 'BILINEAR')
#save the model
model_dir = tempfile.mkdtemp()
model_path = os.path.join(model_dir, 'test_layer.mlmodel')
coremltools.utils.save_spec(builder.spec, model_path)
#preprare input and get predictions
coreml_model = coremltools.models.MLModel(model_path)
if macos_version() >= (10, 13):
coreml_input = {'data': np.reshape(np.array([1.0,2.0,3.0]), (1,1,3))}
coreml_preds = coreml_model.predict(coreml_input)['output']
#harcoded for this simple test case
numpy_preds = np.array([[1, 1.333, 1.666, 2, 2.333, 2.666, 3, 3, 3],\
[1, 1.333, 1.6666, 2, 2.33333, 2.6666, 3, 3, 3]])
#numpy_preds = np.array([[1, 1, 1, 2, 2, 2, 3, 3, 3],[1, 1, 1, 2, 2, 2, 3, 3, 3]])
#Test
self.assertTrue(self._compare_shapes(numpy_preds, coreml_preds))
self.assertTrue(self._compare_predictions(numpy_preds, coreml_preds))
if os.path.exists(model_dir):
shutil.rmtree(model_dir)
def test_LRN(self):
#create a tiny mlmodel
input_dim = (1,3,3)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', datatypes.Array(*input_dim))]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_lrn(name= 'lrn', input_name = 'data', output_name = 'output',
alpha = 2, beta = 3, local_size = 1, k = 8)
#save the model
model_dir = tempfile.mkdtemp()
model_path = os.path.join(model_dir, 'test_layer.mlmodel')
coremltools.utils.save_spec(builder.spec, model_path)
#preprare input and get predictions
coreml_model = coremltools.models.MLModel(model_path)
if macos_version() >= (10, 13):
coreml_input = {'data': np.ones((1,3,3))}
coreml_preds = coreml_model.predict(coreml_input)['output']
#harcoded for this simple test case
numpy_preds = 1e-3 * np.ones((1,3,3))
self.assertTrue(self._compare_shapes(numpy_preds, coreml_preds))
self.assertTrue(self._compare_predictions(numpy_preds, coreml_preds))
if os.path.exists(model_dir):
shutil.rmtree(model_dir)
def test_MVN(self):
#create a tiny mlmodel
input_dim = (2,2,2)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', datatypes.Array(*input_dim))]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_mvn(name= 'mvn', input_name = 'data', output_name = 'output',
across_channels = False, normalize_variance = False)
#save the model
model_dir = tempfile.mkdtemp()
model_path = os.path.join(model_dir, 'test_layer.mlmodel')
coremltools.utils.save_spec(builder.spec, model_path)
#preprare input and get predictions
coreml_model = coremltools.models.MLModel(model_path)
if macos_version() >= (10, 13):
coreml_input = {'data': np.reshape(np.arange(8, dtype=np.float32), (2,2,2))}
coreml_preds = coreml_model.predict(coreml_input)['output']
#harcoded for this simple test case
numpy_preds = np.reshape(np.arange(8) - np.array([1.5,1.5,1.5,1.5,5.5,5.5,5.5,5.5]),(2,2,2))
self.assertTrue(self._compare_shapes(numpy_preds, coreml_preds))
self.assertTrue(self._compare_predictions(numpy_preds, coreml_preds))
if os.path.exists(model_dir):
shutil.rmtree(model_dir)
def test_L2_normalize(self):
#create a tiny mlmodel
input_dim = (1,2,2)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', datatypes.Array(*input_dim))]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_l2_normalize(name= 'mvn', input_name = 'data', output_name = 'output')
#save the model
model_dir = tempfile.mkdtemp()
model_path = os.path.join(model_dir, 'test_layer.mlmodel')
coremltools.utils.save_spec(builder.spec, model_path)
#preprare input and get predictions
coreml_model = coremltools.models.MLModel(model_path)
if macos_version() >= (10, 13):
coreml_input = {'data': np.reshape(np.arange(4, dtype=np.float32), (1,2,2))}
coreml_preds = coreml_model.predict(coreml_input)['output']
#harcoded for this simple test case
numpy_preds = np.reshape(np.arange(4, dtype=np.float32), (1,2,2))/np.sqrt(14)
self.assertTrue(self._compare_shapes(numpy_preds, coreml_preds))
self.assertTrue(self._compare_predictions(numpy_preds, coreml_preds))
if os.path.exists(model_dir):
shutil.rmtree(model_dir)
def test_unary(self):
x = np.reshape(np.arange(1,5, dtype=np.float32), (1,2,2))
coreml_preds = get_coreml_predictions_unary(x, 'sqrt')
if coreml_preds is not None:
numpy_preds = np.sqrt(x)
self.assertTrue(self._compare_shapes(numpy_preds, coreml_preds))
self.assertTrue(self._compare_predictions(numpy_preds, coreml_preds))
coreml_preds = get_coreml_predictions_unary(x, 'rsqrt')
if coreml_preds is not None:
numpy_preds = 1/np.sqrt(x)
self.assertTrue(self._compare_shapes(numpy_preds, coreml_preds))
self.assertTrue(self._compare_predictions(numpy_preds, coreml_preds))
coreml_preds = get_coreml_predictions_unary(x, 'inverse')
if coreml_preds is not None:
numpy_preds = 1/x
self.assertTrue(self._compare_shapes(numpy_preds, coreml_preds))
self.assertTrue(self._compare_predictions(numpy_preds, coreml_preds))
coreml_preds = get_coreml_predictions_unary(x, 'power', 3)
if coreml_preds is not None:
numpy_preds = x ** 3
self.assertTrue(self._compare_shapes(numpy_preds, coreml_preds))
self.assertTrue(self._compare_predictions(numpy_preds, coreml_preds))
coreml_preds = get_coreml_predictions_unary(x, 'exp')
if coreml_preds is not None:
numpy_preds = np.exp(x)
self.assertTrue(self._compare_shapes(numpy_preds, coreml_preds))
self.assertTrue(self._compare_predictions(numpy_preds, coreml_preds))
coreml_preds = get_coreml_predictions_unary(x, 'log')
if coreml_preds is not None:
numpy_preds = np.log(x)
self.assertTrue(self._compare_shapes(numpy_preds, coreml_preds))
self.assertTrue(self._compare_predictions(numpy_preds, coreml_preds))
coreml_preds = get_coreml_predictions_unary(x, 'abs')
if coreml_preds is not None:
numpy_preds = np.abs(x)
self.assertTrue(self._compare_shapes(numpy_preds, coreml_preds))
self.assertTrue(self._compare_predictions(numpy_preds, coreml_preds))
coreml_preds = get_coreml_predictions_unary(x, 'threshold', alpha = 2)
if coreml_preds is not None:
numpy_preds = np.maximum(x, 2)
self.assertTrue(self._compare_shapes(numpy_preds, coreml_preds))
self.assertTrue(self._compare_predictions(numpy_preds, coreml_preds))
def test_split(self):
#create a tiny mlmodel
input_dim = (9,2,2)
x = np.random.rand(*input_dim)
input_features = [('data', datatypes.Array(*input_dim))]
output_names = []
output_features = []
for i in range(3):
out = 'out_' + str(i)
output_names.append(out)
output_features.append((out, None))
builder = neural_network.NeuralNetworkBuilder(input_features, output_features)
builder.add_split(name= 'split', input_name = 'data', output_names = output_names)
#save the model
model_dir = tempfile.mkdtemp()
model_path = os.path.join(model_dir, 'test_layer.mlmodel')
coremltools.utils.save_spec(builder.spec, model_path)
#preprare input and get predictions
coreml_model = coremltools.models.MLModel(model_path)
if macos_version() >= (10, 13):
coreml_input = {'data': x}
coreml_preds_dict = coreml_model.predict(coreml_input)
for i in range(3):
coreml_preds = coreml_preds_dict[output_names[i]]
numpy_preds = x[i*3:i*3+3,:,:]
self.assertTrue(self._compare_shapes(numpy_preds, coreml_preds))
self.assertTrue(self._compare_predictions(numpy_preds, coreml_preds))
if os.path.exists(model_dir):
shutil.rmtree(model_dir)
def test_scale_constant(self):
#create a tiny mlmodel
input_dim = (1,2,2)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features, output_features)
builder.add_scale(name = 'scale', W = 5, b = 45, has_bias = True, input_name = 'data', output_name = 'output')
#save the model
model_dir = tempfile.mkdtemp()
model_path = os.path.join(model_dir, 'test_layer.mlmodel')
coremltools.utils.save_spec(builder.spec, model_path)
#preprare input and get predictions
coreml_model = coremltools.models.MLModel(model_path)
if macos_version() >= (10, 13):
x = np.reshape(np.arange(4, dtype=np.float32), (1,2,2))
coreml_input = {'data': x}
coreml_preds = coreml_model.predict(coreml_input)['output']
#harcoded for this simple test case
numpy_preds = 5 * x + 45
self.assertTrue(self._compare_shapes(numpy_preds, coreml_preds))
self.assertTrue(self._compare_predictions(numpy_preds, coreml_preds))
if os.path.exists(model_dir):
shutil.rmtree(model_dir)
def test_scale_matrix(self):
#create a tiny mlmodel
input_dim = (1,2,2)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features, output_features)
W = np.reshape(np.arange(5,9), (1,2,2))
builder.add_scale(name = 'scale', W = W, b = None, has_bias = False, input_name = 'data', output_name = 'output',
shape_scale = [1,2,2])
#save the model
model_dir = tempfile.mkdtemp()
model_path = os.path.join(model_dir, 'test_layer.mlmodel')
coremltools.utils.save_spec(builder.spec, model_path)
#preprare input and get predictions
coreml_model = coremltools.models.MLModel(model_path)
if macos_version() >= (10, 13):
x = np.reshape(np.arange(4, dtype=np.float32), (1,2,2))
coreml_input = {'data': x}
coreml_preds = coreml_model.predict(coreml_input)['output']
#harcoded for this simple test case
numpy_preds = W * x
self.assertTrue(self._compare_shapes(numpy_preds, coreml_preds))
self.assertTrue(self._compare_predictions(numpy_preds, coreml_preds))
if os.path.exists(model_dir):
shutil.rmtree(model_dir)
def test_bias_constant(self):
#create a tiny mlmodel
input_dim = (1,2,2)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features, output_features)
builder.add_bias(name = 'bias', b = 45, input_name = 'data', output_name = 'output')
#save the model
model_dir = tempfile.mkdtemp()
model_path = os.path.join(model_dir, 'test_layer.mlmodel')
coremltools.utils.save_spec(builder.spec, model_path)
#preprare input and get predictions
coreml_model = coremltools.models.MLModel(model_path)
if macos_version() >= (10, 13):
x = np.reshape(np.arange(4, dtype=np.float32), (1,2,2))
coreml_input = {'data': x}
coreml_preds = coreml_model.predict(coreml_input)['output']
#harcoded for this simple test case
numpy_preds = x + 45
self.assertTrue(self._compare_shapes(numpy_preds, coreml_preds))
self.assertTrue(self._compare_predictions(numpy_preds, coreml_preds))
if os.path.exists(model_dir):
shutil.rmtree(model_dir)
def test_bias_matrix(self):
#create a tiny mlmodel
input_dim = (1,2,2)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features, output_features)
b = np.reshape(np.arange(5,9), (1,2,2))
builder.add_bias(name = 'bias', b = b, input_name = 'data', output_name = 'output',
shape_bias = [1,2,2])
#save the model
model_dir = tempfile.mkdtemp()
model_path = os.path.join(model_dir, 'test_layer.mlmodel')
coremltools.utils.save_spec(builder.spec, model_path)
#preprare input and get predictions
coreml_model = coremltools.models.MLModel(model_path)
if macos_version() >= (10, 13):
x = np.reshape(np.arange(4, dtype=np.float32), (1,2,2))
coreml_input = {'data': x}
coreml_preds = coreml_model.predict(coreml_input)['output']
#harcoded for this simple test case
numpy_preds = x + b
self.assertTrue(self._compare_shapes(numpy_preds, coreml_preds))
self.assertTrue(self._compare_predictions(numpy_preds, coreml_preds))
if os.path.exists(model_dir):
shutil.rmtree(model_dir)
def test_load_constant(self):
#create a tiny mlmodel
input_dim = (1,2,2)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features, output_features)
b = np.reshape(np.arange(5,9), (1,2,2))
builder.add_load_constant(name= 'load_constant', output_name = 'bias', constant_value = b, shape = [1,2,2])
builder.add_elementwise(name= 'add', input_names = ['data', 'bias'], output_name = 'output', mode = 'ADD')
#save the model
model_dir = tempfile.mkdtemp()
model_path = os.path.join(model_dir, 'test_layer.mlmodel')
coremltools.utils.save_spec(builder.spec, model_path)
#preprare input and get predictions
coreml_model = coremltools.models.MLModel(model_path)
if macos_version() >= (10, 13):
x = np.reshape(np.arange(4, dtype=np.float32), (1,2,2))
coreml_input = {'data': x}
coreml_preds = coreml_model.predict(coreml_input)['output']
#harcoded for this simple test case
numpy_preds = x + b
self.assertTrue(self._compare_shapes(numpy_preds, coreml_preds))
self.assertTrue(self._compare_predictions(numpy_preds, coreml_preds))
# Test half precision case
coreml_fp16_model = coremltools.utils.convert_neural_network_weights_to_fp16(coreml_model)
coreml_preds = coreml_fp16_model.predict(coreml_input)['output']
self.assertTrue(self._compare_shapes(numpy_preds, coreml_preds))
self.assertTrue(self._compare_predictions(numpy_preds, coreml_preds))
if os.path.exists(model_dir):
shutil.rmtree(model_dir)
def test_min(self):
#create a tiny mlmodel
input_dim = (1,2,2)
input_features = [('data_0', datatypes.Array(*input_dim)), ('data_1', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features, output_features)
builder.add_elementwise(name = 'min', input_names= ['data_0', 'data_1'], output_name = 'output', mode = 'MIN')
#save the model
model_dir = tempfile.mkdtemp()
model_path = os.path.join(model_dir, 'test_layer.mlmodel')
coremltools.utils.save_spec(builder.spec, model_path)
#preprare input and get predictions
coreml_model = coremltools.models.MLModel(model_path)
if macos_version() >= (10, 13):
x1 = np.reshape(np.arange(4, dtype=np.float32), (1,2,2))
x2 = np.reshape(np.arange(2,6, dtype=np.float32), (1,2,2))
coreml_input = {'data_0': x1, 'data_1': x2}
coreml_preds = coreml_model.predict(coreml_input)['output']
#harcoded for this simple test case
numpy_preds = np.minimum(x1,x2)
self.assertTrue(self._compare_shapes(numpy_preds, coreml_preds))
self.assertTrue(self._compare_predictions(numpy_preds, coreml_preds))
if os.path.exists(model_dir):
shutil.rmtree(model_dir)
def test_conv_same_padding(self):
#create a tiny mlmodel
input_dim = (10,15,15)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features, output_features)
W = np.random.rand(3,3,10,20)
builder.add_convolution(name = 'conv', kernel_channels = 10, output_channels = 20,
height = 3, width = 3, stride_height = 2, stride_width = 2,
border_mode = 'same', groups = 1,
W = W, b = None, has_bias = False,
input_name = 'data', output_name = 'output',
same_padding_asymmetry_mode = 'TOP_LEFT_HEAVY')
#save the model
model_dir = tempfile.mkdtemp()
model_path = os.path.join(model_dir, 'test_layer.mlmodel')
coremltools.utils.save_spec(builder.spec, model_path)
#preprare input and get predictions
coreml_model = coremltools.models.MLModel(model_path)
if macos_version() >= (10, 13):
x = np.random.rand(*input_dim)
coreml_input = {'data': x}
coreml_preds = coreml_model.predict(coreml_input)['output']
#harcoded for this simple test case
numpy_preds = np.random.rand(20,8,8)
self.assertTrue(self._compare_shapes(numpy_preds, coreml_preds))
if os.path.exists(model_dir):
shutil.rmtree(model_dir)
def test_deconv_valid_padding(self):
#create a tiny mlmodel
input_dim = (10,15,15)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features, output_features)
W = np.random.rand(3,3,20,10)
builder.add_convolution(name = 'deconv', kernel_channels = 10, output_channels = 20,
height = 3, width = 3, stride_height = 2, stride_width = 2,
border_mode = 'valid', groups = 1,
W = W, b = None, has_bias = False,
is_deconv = True,
input_name = 'data', output_name = 'output',
padding_top = 2, padding_bottom = 3, padding_left = 2, padding_right = 3)
#save the model
model_dir = tempfile.mkdtemp()
model_path = os.path.join(model_dir, 'test_layer.mlmodel')
coremltools.utils.save_spec(builder.spec, model_path)
#preprare input and get predictions
coreml_model = coremltools.models.MLModel(model_path)
if macos_version() >= (10, 13):
x = np.random.rand(*input_dim)
coreml_input = {'data': x}
coreml_preds = coreml_model.predict(coreml_input)['output']
#harcoded for this simple test case
numpy_preds = np.random.rand(20,26,26)
self.assertTrue(self._compare_shapes(numpy_preds, coreml_preds))
if os.path.exists(model_dir):
shutil.rmtree(model_dir)
def test_linear_activation(self):
#create a tiny mlmodel
input_dim = (10,15,15)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features, output_features)
builder.add_activation(name = 'activation',
non_linearity = 'LINEAR',
input_name = 'data',
output_name = 'output', params= [34.0, 67.0])
#save the model
model_dir = tempfile.mkdtemp()
model_path = os.path.join(model_dir, 'test_layer.mlmodel')
coremltools.utils.save_spec(builder.spec, model_path)
#preprare input and get predictions
coreml_model = coremltools.models.MLModel(model_path)
if macos_version() >= (10, 13):
x = np.random.rand(*input_dim)
coreml_input = {'data': x}
coreml_preds = coreml_model.predict(coreml_input)['output']
#harcoded for this simple test case
numpy_preds = 34.0 * x + 67.0
self.assertTrue(self._compare_shapes(numpy_preds, coreml_preds))
self.assertTrue(self._compare_predictions(numpy_preds, coreml_preds))
if os.path.exists(model_dir):
shutil.rmtree(model_dir)
def test_padding_constant(self):
#create a tiny mlmodel
input_dim = (1,2,3)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features, output_features)
builder.add_padding(name = 'pad',
left = 1, right = 0, top = 2, bottom = 0,
value = -1,
input_name = 'data',
output_name = 'output')
#save the model
model_dir = tempfile.mkdtemp()
model_path = os.path.join(model_dir, 'test_layer.mlmodel')
coremltools.utils.save_spec(builder.spec, model_path)
#preprare input and get predictions
coreml_model = coremltools.models.MLModel(model_path)
if macos_version() >= (10, 13):
x = np.reshape(np.array([[1,2,3], [4,5,6]]), (1,2,3)).astype(np.float32)
coreml_input = {'data': x}
coreml_preds = coreml_model.predict(coreml_input)['output']
#harcoded for this simple test case
numpy_preds = np.reshape(np.array([[-1,-1,-1,-1], [-1,-1,-1,-1], [-1,1,2,3], [-1,4,5,6]]), (1,4,4)).astype(np.float32)
self.assertTrue(self._compare_shapes(numpy_preds, coreml_preds))
self.assertTrue(self._compare_predictions(numpy_preds, coreml_preds))
if os.path.exists(model_dir):
shutil.rmtree(model_dir)
def test_padding_replication(self):
#create a tiny mlmodel
input_dim = (1,2,3)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features, output_features)
builder.add_padding(name = 'pad',
left = 1, top = 2,
input_name = 'data',
output_name = 'output', padding_type = 'replication')
#save the model
model_dir = tempfile.mkdtemp()
model_path = os.path.join(model_dir, 'test_layer.mlmodel')
coremltools.utils.save_spec(builder.spec, model_path)
#preprare input and get predictions
coreml_model = coremltools.models.MLModel(model_path)
if macos_version() >= (10, 13):
x = np.reshape(np.array([[1,2,3], [4,5,6]]), (1,2,3)).astype(np.float32)
coreml_input = {'data': x}
coreml_preds = coreml_model.predict(coreml_input)['output']
#harcoded for this simple test case
numpy_preds = np.reshape(np.array([[1,1,2,3], [1,1,2,3], [1,1,2,3], [4,4,5,6]]), (1,4,4)).astype(np.float32)
self.assertTrue(self._compare_shapes(numpy_preds, coreml_preds))
self.assertTrue(self._compare_predictions(numpy_preds, coreml_preds))
if os.path.exists(model_dir):
shutil.rmtree(model_dir)
def test_reshape_target_shape_3(self):
#create a tiny mlmodel
input_dim = (1,2,5) #(C,H,W)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features, output_features)
builder.add_reshape(name = 'reshape', input_name = 'data', output_name = 'output', target_shape = (10,1,1), mode = 0)
#save the model
model_dir = tempfile.mkdtemp()
model_path = os.path.join(model_dir, 'test_layer.mlmodel')
coremltools.utils.save_spec(builder.spec, model_path)
#preprare input and get predictions
coreml_model = coremltools.models.MLModel(model_path)
if macos_version() >= (10, 13):
x = np.random.rand(*input_dim)
coreml_input = {'data': x}
coreml_preds = coreml_model.predict(coreml_input)['output']
#harcoded for this simple test case
numpy_preds = np.reshape(x, (10,1,1))
self.assertTrue(self._compare_shapes(numpy_preds, coreml_preds))
self.assertTrue(self._compare_predictions(numpy_preds, coreml_preds))
if os.path.exists(model_dir):
shutil.rmtree(model_dir)
def test_reshape_target_shape_4(self):
#create a tiny mlmodel
input_dim = (1,2,5) #(C,H,W)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features, output_features)
builder.add_reshape(name = 'reshape', input_name = 'data', output_name = 'output', target_shape = (1,10,1,1), mode = 0)
#save the model
model_dir = tempfile.mkdtemp()
model_path = os.path.join(model_dir, 'test_layer.mlmodel')
coremltools.utils.save_spec(builder.spec, model_path)
#preprare input and get predictions
coreml_model = coremltools.models.MLModel(model_path)
if macos_version() >= (10, 13):
x = np.random.rand(*input_dim)
coreml_input = {'data': x}
coreml_preds = coreml_model.predict(coreml_input)['output']
#harcoded for this simple test case
numpy_preds = np.reshape(x, (1,10,1,1))
self.assertTrue(self._compare_shapes(numpy_preds, coreml_preds))
self.assertTrue(self._compare_predictions(numpy_preds, coreml_preds))
if os.path.exists(model_dir):
shutil.rmtree(model_dir)
class SimpleTestCPUOnly(CorrectnessTest):
def test_bias_matrix_CPU(self):
#create a tiny mlmodel
input_dim = (1,2,2)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features, output_features)
b = np.reshape(np.arange(5,9), (1,2,2))
builder.add_bias(name = 'bias', b = b, input_name = 'data', output_name = 'output',
shape_bias = [1,2,2])
#save the model
model_dir = tempfile.mkdtemp()
model_path = os.path.join(model_dir, 'test_layer.mlmodel')
coremltools.utils.save_spec(builder.spec, model_path)
#preprare input and get predictions
coreml_model = coremltools.models.MLModel(model_path)
if macos_version() >= (10, 13):
x = np.reshape(np.arange(4, dtype=np.float32), (1,2,2))
coreml_input = {'data': x}
coreml_preds = coreml_model.predict(coreml_input, useCPUOnly = True)['output']
#harcoded for this simple test case
numpy_preds = x + b
self.assertTrue(self._compare_shapes(numpy_preds, coreml_preds))
self.assertTrue(self._compare_predictions(numpy_preds, coreml_preds))
if os.path.exists(model_dir):
shutil.rmtree(model_dir)
def test_linear_activation_CPU(self):
#create a tiny mlmodel
input_dim = (10,15,15)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features, output_features)
builder.add_activation(name = 'activation',
non_linearity = 'LINEAR',
input_name = 'data',
output_name = 'output', params= [34.0, 67.0])
#save the model
model_dir = tempfile.mkdtemp()
model_path = os.path.join(model_dir, 'test_layer.mlmodel')
coremltools.utils.save_spec(builder.spec, model_path)
#preprare input and get predictions
coreml_model = coremltools.models.MLModel(model_path)
if macos_version() >= (10, 13):
x = np.random.rand(*input_dim)
coreml_input = {'data': x}
coreml_preds = coreml_model.predict(coreml_input, useCPUOnly = True)['output']
#harcoded for this simple test case
numpy_preds = 34.0 * x + 67.0
self.assertTrue(self._compare_shapes(numpy_preds, coreml_preds))
self.assertTrue(self._compare_predictions(numpy_preds, coreml_preds))
if os.path.exists(model_dir):
shutil.rmtree(model_dir)
class StressTest(CorrectnessTest):
def test_slice_layer(self):
'''
Define Params
'''
params_dict = dict(
input_shape = [[30,100,8], [80,50,5], [4,12,5], [56,8,14]],
axis = ['channel', 'height', 'width'],
start = [0,1,2,5],
end = [5,100,56,-1,-2,-4],
stride = [1,2,3]
)
params = list(itertools.product(*params_dict.values()))
all_candidates = [dict(zip(params_dict.keys(), x)) for x in params]
valid_params = []
for pr in all_candidates:
X = np.random.rand(*pr["input_shape"])
if get_size_after_stride(X, pr):
valid_params.append(pr)
print("Total params to be tested: ", len(valid_params), "out of canditates: ", len(all_candidates))
'''
Test
'''
failed_tests_compile = []
failed_tests_shape = []
failed_tests_numerical = []
for i in range(len(valid_params)):
params = valid_params[i]
#print("=========: ", params)
#if i % 10 == 0: print("======== Testing {}/{}".format(str(i), str(len(valid_params))))
X =
|
np.random.rand(*params["input_shape"])
|
numpy.random.rand
|
import streamlit as st
import cv2
import numpy as np
import config
import time
import pandas as pd
from utils.enhance import cropped_image
from config import NMS_THRESH, LABELS
crop, image = None, None
# Initialization
# load the COCO class labels our YOLO model was trained on
# derive the paths to the YOLO weights and model configuration
weightsPath = config.MODEL_PATH
configPath = config.CONFIG_PATH
def yolo_detector(frame, net, ln, MIN_CONF, Idx=0):
# grab the dimensions of the frame and initialize the list of
# results
(H, W) = frame.shape[:2]
results = []
# construct a blob from the input frame and then perform a forward
# pass of the YOLO object detector, giving us our bounding boxes
# and associated probabilities
blob = cv2.dnn.blobFromImage(frame, 1 / 255.0, (416, 416),
swapRB=True, crop=False)
net.setInput(blob)
start = time.time()
layerOutputs = net.forward(ln)
st.sidebar.success("Processing time for YOLOV3: {} {:.4f} seconds.".format('\n',time.time() - start))
print("Processing time for YOLOV3: --- {:.4f} seconds ---".format(time.time() - start))
# initialize our lists of detected bounding boxes, centroids, and
# confidences, respectively
boxes = []
centroids = []
confidences = []
# loop over each of the layer outputs
for output in layerOutputs:
# loop over each of the detections
for detection in output:
# extract the class ID and confidence (i.e., probability)
# of the current object detection
scores = detection[5:]
classID = np.argmax(scores)
confidence = scores[classID]
# filter detections by (1) ensuring that the object
# detected was a person and (2) that the minimum
# confidence is met
if classID == Idx and confidence > MIN_CONF:
# scale the bounding box coordinates back relative to
# the size of the image, keeping in mind that YOLO
# actually returns the center (x, y)-coordinates of
# the bounding box followed by the boxes' width and
# height
box = detection[0:4] * np.array([W, H, W, H])
(centerX, centerY, width, height) = box.astype("int")
# use the center (x, y)-coordinates to derive the top
# and and left corner of the bounding box
x = int(centerX - (width / 2))
y = int(centerY - (height / 2))
# update our list of bounding box coordinates,
# centroids, and confidences
boxes.append([x, y, int(width), int(height)])
centroids.append((centerX, centerY))
confidences.append(float(confidence))
# apply non-maxima suppression to suppress weak, overlapping
# bounding boxes
idxs = cv2.dnn.NMSBoxes(boxes, confidences, MIN_CONF, NMS_THRESH)
# ensure at least one detection exists
if len(idxs) > 0:
# loop over the indexes we are keeping
for i in idxs.flatten():
# extract the bounding box coordinates
(x, y) = (boxes[i][0], boxes[i][1])
(w, h) = (boxes[i][2], boxes[i][3])
# update our results list to consist of the person
# prediction probability, bounding box coordinates,
# and the centroid
r = (confidences[i], (x, y, x + w, y + h), centroids[i])
results.append(r)
# return the list of results
return results
@st.cache(allow_output_mutation=True, show_spinner=False)
def load_network(configpath, weightspath):
with st.spinner("Loading Yolo weights!"):
# load our YOLO object detector trained on our dataset (1 class)
net = cv2.dnn.readNetFromDarknet(configPath, weightsPath)
# determine only the *output* layer names that we need from YOLO
output_layer_names = net.getLayerNames()
output_layer_names = [output_layer_names[i[0] - 1] for i in net.getUnconnectedOutLayers()]
return net, output_layer_names
def yolo_crop_correction(frame, bbox, w, h):
# resizing cropped image
(startX, startY, endX, endY) = bbox
crop = cropped_image(frame, (startX, startY, endX, endY))
crop_w, crop_h = endX - startX, endY - startY # height & width of number plate of 416*416 image
width_m, height_m = w/416, h/416 # width and height multiplier
w2, h2 = round(crop_w*width_m), round(crop_h*height_m)
crop = cv2.resize(np.asarray(crop), (w2, h2))
return crop
def yolo_inference(image, confidence_cutoff):
# YOLO Detection
# Preprocess
frame = cv2.resize(
|
np.asarray(image)
|
numpy.asarray
|
# https://takun-physics.net/12612/
#from numpy import sin, cos
import math
import numpy as np
import matplotlib.pyplot as plt
import scipy.integrate as integrate
import matplotlib.animation as animation
import pandas as pd
import os
# 微分方程式
def ini_cor_func(j, x_ini_cor, y_ini_cor, x, L):
if j == 0:
x_ini_cor[j] = L[j] * np.sin(x[j])
y_ini_cor[j] = -L[j] * np.cos(x[j])
else:
x_ini_cor[j] = L[j] * np.sin(x[j]) + x_ini_cor[j - 1]
y_ini_cor[j] = -L[j] * np.cos(x[j]) + y_ini_cor[j - 1]
return x_ini_cor[j], y_ini_cor[j]
def N_func(t, n, x, v, m, L, g, E):
A = np.zeros((n, n), dtype=np.float64)
B = np.zeros((n, n), dtype=np.float64)
for i in range(n):
for j in range(n):
for k in range(max(i, j), n):
A[i][j] += m[k]
B[i][j] += m[k]
if i == j:
A[i][j] *= L[j]
B[i][j] *= g * np.sin(x[i])
else:
A[i][j] *= L[j] * np.cos(x[i] - x[j])
B[i][j] *= L[j] * v[j] ** 2 * np.sin(x[i] - x[j])
# 逆行列の計算
inv_A = np.linalg.inv(A)
# inv_A*Bを計算
inv_A_B = np.dot(inv_A, B)
F = np.dot(inv_A_B, E)
return F
def main(num, end):
num = int(num)
df_param = pd.read_csv('N_param0.csv')
df_param = pd.DataFrame(df_param)
# df_param = df_param[:, 1:].values
df_param = np.array(df_param)
df_param = df_param[:, 1:num+1]
m = np.array(df_param[0, :]).astype(np.float64).round(2)
L = np.array(df_param[1, :]).astype(np.float64).round(2)
theta = np.array(df_param[2, :]).astype(np.float64).round(2)
x = np.array([np.radians(i) for i in theta]).astype(np.float64)
v = np.array(df_param[3, :]).astype(np.float64).round(2)
# 初期条件のコピー
x0 = x.copy()
# 初期位置の確認
n = len(df_param[0, :])
x_ini_cor = np.zeros(n, dtype=np.float64)
y_ini_cor = np.zeros(n, dtype=np.float64)
for j in range(n):
x_ini_cor[j], y_ini_cor[j] = ini_cor_func(j, x_ini_cor, y_ini_cor, x, L)
size1 = 1
x_ini_cor = x_ini_cor * size1
y_ini_cor = y_ini_cor * size1
xplot_ = np.insert(x_ini_cor, 0, 0)
yplot_ = np.insert(y_ini_cor, 0, 0)
plt.grid()
plt.plot(xplot_, yplot_, 'ko-', lw=2)
# Calculate propertv
init = 0
end = int(end)
dt = 0.05
h = dt
loop = int(end / h)
n = len(df_param[0, :])
g = 9.8
# initial state
t = init
tpoints = np.arange(init, end, h)
xpoints = []
vpoints = []
# A = np.zeros((n,n),dtype=np.float64)
# B = np.zeros((n,n),dtype=np.float64)
E = -np.ones_like(x)
xpoints = []
vpoints = []
# 配列要素数の定義
j1 = np.zeros_like(v)
k1 = np.zeros_like(x)
j2 = np.zeros_like(v)
k2 = np.zeros_like(x)
j3 =
|
np.zeros_like(v)
|
numpy.zeros_like
|
import numpy as np
import glob
import os
import os.path
import matplotlib.pyplot as plt
import matplotlib
#matplotlib.use('Agg')
import matplotlib.colors as colors
import matplotlib.cm as cm
from matplotlib.offsetbox import TextArea, VPacker, AnnotationBbox
import errno
import math
from matplotlib.ticker import NullFormatter # useful for `logit` scale
import itertools
from matplotlib import rcParams
import module
def plotting(index_label,figure_storage, plot_array, mbh, mstar, mass_range_array,mbh_mstar_array,mean_bh, mean_star,plot_format,plot_quality):
matplotlib.rcParams.update({'font.size': 20})
marker = itertools.cycle(('<', 'X', '^', 'o', '*','>','p','s'))
colors=["blue", "red", "green", "magenta","orange","grey"]
fig, ax = plt.subplots()
fig.canvas.draw()
index_label2=""
for letter in index_label:
if(letter=="/"):
index_label2 = index_label2 + "_"
else:
index_label2 = index_label2 + letter
filename = index_label2+plot_format
filename1 = os.path.join(figure_storage, filename)
XBH, YSTAR = np.meshgrid(mbh, mstar)
for count in range(2):
if(len(mbh_mstar_array[count][0])>0):
ax.contourf(XBH,YSTAR,mass_range_array[count],colors = colors[count],alpha=0.3)
ax.scatter(mean_bh, mean_star)
plt.rcParams['hatch.color'] = "white"
masked = np.ma.masked_where(plot_array!=2,plot_array)
ax.contourf(XBH,YSTAR,masked,colors = "green",alpha=1.0,hatches=['x'])
ax.set_xscale("log")
ax.set_yscale("log")
ax.set_ylim(mstar[0],mstar[-1])
ax.set_xlim(mbh[0],mbh[-1])
xtick_array= get_tick_array(mbh[0],mbh[-1],"x")
ytick_array= get_tick_array(mstar[0],mstar[-1],"y")
ax.set_xticks(xtick_array)
ax.xaxis.set_major_formatter(plt.FuncFormatter(format_func))
ax.yaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_minor_formatter(NullFormatter())
ax.set_yticks(ytick_array)
ax.yaxis.set_major_formatter(plt.FuncFormatter(format_func))
ax.yaxis.set_label_coords(-0.1,0.5)
ax.set_ylabel(r"$M_{\star}\/[\mathrm{M}_{\odot}]$")
ax.set_xlabel(r"$M_{\mathrm{BH}}\/[10^{6}\/\mathrm{M}_{\odot}]$")
ax.set_title(index_label, fontsize = 25)
ax = plt.gca()
plt.tight_layout()
plt.gcf().subplots_adjust(left=0.16)
plt.gcf().subplots_adjust(bottom=0.16)
plt.savefig(filename1,dpi=plot_quality)
plt.close()
return
def plot_double_intersection(figure_storage,index_array,plot_array,mass_range_array,plot_format,plot_quality,c1,del_omega,samplesize,mbh_sol_array,mstar_sol_array,solution_exist):
marker = itertools.cycle(('<', 'X', '^', 'o', '*','>','p','s'))
colors=["blue", "red", "green", "magenta","orange","grey"]
matplotlib.rcParams.update({'font.size': 20})
temp_array=[[[],[]],[[],[]]]
for sample in range(samplesize):
if(solution_exist[sample]==0):
mbh_lo = mbh_sol_array[sample][0] - mbh_sol_array[sample][1]
mbh_hi = mbh_sol_array[sample][2] + mbh_sol_array[sample][0]
mstar_lo = mstar_sol_array[sample][0] - mstar_sol_array[sample][1]
mstar_hi = mstar_sol_array[sample][2] + mstar_sol_array[sample][0]
temp_array[0][0].append(mbh_lo)
temp_array[0][1].append(mbh_hi)
temp_array[1][0].append(mstar_lo)
temp_array[1][1].append(mstar_hi)
mbh = 10.0** np.linspace(np.log10(np.amin(temp_array[0][0]))*0.75,np.log10(np.amax(temp_array[0][1]))*1.25,100)
mstar = 10.0** np.linspace(np.log10(np.amin(temp_array[1][0]))*0.75,np.log10(np.amax(temp_array[1][1]))*1.25,100)
XBH, YSTAR = np.meshgrid(mbh, mstar)
cmap = "jet"
fig, ax = plt.subplots()
int_count = 2
filename = "c1_"+str(c1)[0:5]+"_del_omega_"+str(del_omega/math.pi)[0:3]+"_inferred_mass"+plot_format
filename1 = os.path.join(figure_storage, filename)
good_sample_size = 0
offset=[]
for sample in range(samplesize):
maxval = np.amax(plot_array[sample].flatten())
if(maxval==int_count):
good_sample_size = good_sample_size + 1
offset.append(good_sample_size)
vmin = 0
vmax = max(good_sample_size,1)
sample_count = 0
min_bh = 1e100
min_star = 1e100
max_bh = -1e100
max_star = -1e100
for sample in range(samplesize):
if(solution_exist[sample]==0):
mean_bh = 0.0
mean_star = 0.0
area_total = 0.0
bh_sol = mbh_sol_array[sample][0]
error_bh_l = mbh_sol_array[sample][1]
error_bh_h = mbh_sol_array[sample][2]
star_sol = mstar_sol_array[sample][0]
error_star_l = mstar_sol_array[sample][1]
error_star_h = mstar_sol_array[sample][2]
mean_bh = mbh_sol_array[sample][0]
ccmap = matplotlib.cm.get_cmap(cmap)
color = ccmap((offset[sample]-vmin)/(vmax-vmin))
if (mean_bh>0.0):
markers = next(marker)
pp = ax.errorbar(bh_sol,star_sol,yerr=np.array([(error_star_l, error_star_h)]).T,xerr =
|
np.array([(error_bh_l, error_bh_h)])
|
numpy.array
|
# Triangulate 3D joints
#
# <NAME> <<EMAIL>>
import os
import cv2 as cv
import argparse
import json
import pickle
import subprocess
import numpy as np
from glob import glob
import itertools
from multiprocessing import Pool
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from joint_ids import *
from dataset_ids import *
from pdb import set_trace as st
np.seterr(all='raise')
scenes = get_scenes()
cams = get_cams()
connectivity_ikea = get_ikea_connectivity() # == COCO format
connectivity_body25 = get_body25_connectivity()
def triangulate_joints(args):
camera_parameters = get_camera_parameters(args)
scan_folders = get_scan_dirs(args.dataset_dir)
# Select save format:
if args.save_format == 'ikea':
joint_names = get_ikea_joint_names()
connectivity = connectivity_ikea
elif args.save_format == 'body25':
joint_names = get_body25_joint_names()[:25]
connectivity = connectivity_body25
if 'keypoint_rcnn' in args.input_predictions:
input_joint_names = get_ikea_joint_names()
elif 'openpose' in args.input_predictions:
input_joint_names = get_body25_joint_names()
input_joint_names_dict = {name: i for i, name in enumerate(input_joint_names)}
num_joints = len(joint_names)
with open(os.path.join(args.dataset_dir, 'test_cross_env.txt'), 'r') as f:
test_paths = f.read().splitlines()
with open(os.path.join(args.dataset_dir, 'train_cross_env.txt'), 'r') as f:
train_paths = f.read().splitlines()
reproj_gt_meter = AverageMeter('Reprojection error')
reproj_gt_meter_train = AverageMeter('Reprojection error')
reproj_gt_meter_test = AverageMeter('Reprojection error')
pck_gt_meter = AverageMeter('PCK')
pck_gt_meter_train = AverageMeter('PCK')
pck_gt_meter_test = AverageMeter('PCK')
for i, scan_folder in enumerate(scan_folders):
print(f"\nProcessing {i} of {len(scan_folders)}: {' '.join(scan_folder.split('/')[-2:])}")
# Determine scene ID:
label = scan_folder.split('/')[-1]
scene = label.split('_')[3]
assert scene in scenes
prediction_path = os.path.join('predictions', 'pose2d', args.input_predictions)
use_all_frames = False
if use_all_frames:
# Use all frames:
# Check all cams, since some have more frames than others...
json_mask = os.path.join(scan_folder, 'dev1', prediction_path, 'scan_video_????????????_keypoints.json')
json_files1 = glob(json_mask)
json_mask = os.path.join(scan_folder, 'dev2', prediction_path, 'scan_video_????????????_keypoints.json')
json_files2 = glob(json_mask)
json_mask = os.path.join(scan_folder, 'dev3', prediction_path, 'scan_video_????????????_keypoints.json')
json_files3 = glob(json_mask)
json_index = np.argmin([len(json_files1), len(json_files2), len(json_files3)])
json_files = [json_files1, json_files2, json_files3][json_index]
keypoint_filenames = sorted([os.path.basename(json_file) for json_file in json_files])
else:
# Use frames with GT 2D annotations:
json_mask = os.path.join(scan_folder, 'dev3', 'pose2d', '??????.json') # GT 2D annotations
json_files = sorted(glob(json_mask)) # eg <root>/<scan_folder>/dev3/pose2d/000000.json
frame_strs = [os.path.splitext(os.path.basename(json_file))[0] for json_file in json_files] # eg 000000
keypoint_filenames = sorted([f'scan_video_000000{frame_str}_keypoints.json' for frame_str in frame_strs]) # eg scan_video_000000000000_keypoints.json
for file_index, keypoint_filename in enumerate(keypoint_filenames):
joints2d = np.zeros((num_joints, 3, 3))
for cam in cams:
json_file = os.path.join(scan_folder, cam, prediction_path, keypoint_filename)
if not os.path.exists(json_file):
continue # Predictions don't exist (missing video frame)
with open(json_file) as f:
pose2d = json.load(f)
if len(pose2d["people"]) == 0:
continue
keypoints = None
# max_length = 0.0
max_score = -np.Inf
# Choose highest scoring person in frame:
for person_id, person in enumerate(pose2d["people"]):
kps = np.array(person["pose_keypoints_2d"]).reshape(-1, 3) # [x1, y1, c1], [x2, ... in COCO or body25 format
average_score = np.mean(kps[:, 2])
if average_score > max_score:
max_score = average_score
keypoints = kps
# Convert to ikea joints:
for j, joint_name in enumerate(joint_names):
joint_id = input_joint_names_dict[joint_name]
joints2d[j, cams.index(cam), :] = keypoints[joint_id, :]
# Undistort points:
do_undistort = False
if do_undistort:
for cam_id, cam in enumerate(cams):
joints2d_cam = joints2d[:, cam_id, :2] # 17x2
K = camera_parameters[scene][cam]["K"]
dist_coefs = camera_parameters[scene][cam]["dist_coefs"]
joints2d_cam_undistorted = cv.undistortPoints(joints2d_cam.T, K, dist_coefs, None, None, K).squeeze() # input 2xN/Nx2, output 1xN/Nx1 2-channel
joints2d[:, cam_id, :2] = joints2d_cam_undistorted
# Loop over joints:
joints3d = np.zeros((num_joints, 4)) # 17x4
for j in range(num_joints):
joint2d = joints2d[j, :, :] # 3x3
if np.count_nonzero(joint2d[:, 2] >= args.score_threshold) < 2: # Skip if insufficient good detections for triangulation
continue
Ps = []
xs = []
C = 1.0
for cam_id, cam in enumerate(cams):
if joint2d[cam_id, 2] >= args.score_threshold:
Ps.append(camera_parameters[scene][cam]["P"].astype(float))
xs.append(np.array([joint2d[cam_id, 0], joint2d[cam_id, 1], 1.0]).astype(float)) # homogeneous
C *= joint2d[cam_id, 2]
if len(Ps) == 2:
# Triangulate points from 2 views:
X = cv.triangulatePoints(Ps[0], Ps[1], xs[0][:2], xs[1][:2]) # dev1+dev3 (preferred pair)
X /= X[3]
X = X.squeeze()
X[3] = C
else:
# Triangulate from all 2-view pairs and average (suboptimal):
X1 = cv.triangulatePoints(Ps[0], Ps[1], xs[0][:2], xs[1][:2]) # dev1+dev2
X2 = cv.triangulatePoints(Ps[0], Ps[2], xs[0][:2], xs[2][:2]) # dev1+dev3
X3 = cv.triangulatePoints(Ps[1], Ps[2], xs[1][:2], xs[2][:2]) # dev2+dev3
X1 /= X1[3]
X2 /= X2[3]
X3 /= X3[3]
X1 = X1.squeeze()
X2 = X2.squeeze()
X3 = X3.squeeze()
X = np.mean((X1, X2, X3), axis=0)
X[3] = C
joints3d[j, :] = X
# Filter any points that are far from the median of the others (dimension-wise):
non_zero_indices = joints3d[:, 3] > 0.0
if non_zero_indices.any(): # At least one joint
joints3d_median = np.median(joints3d[non_zero_indices, :], axis=0) # excluding zeros
error = np.abs(joints3d[:, :3] - joints3d_median[:3])
for j in range(num_joints):
if joints3d[j, 3] > 0.0 and any(error[j, :] > args.distance_to_median_threshold): # 200 cm
joints3d[j, :] = np.zeros(4)
# Filter any points that are far from any other point:
for j in range(num_joints):
if joints3d[j, 3] > 0.0:
distances = []
for j2 in range(num_joints):
if joints3d[j2, 3] > 0.0 and j != j2:
distances.append(np.linalg.norm(joints3d[j, :3] - joints3d[j2, :3]))
if distances and np.array(distances).min() > args.distance_to_closest_threshold: # 100 cm
joints3d[j, :] = np.zeros(4)
# Compute reprojection errors in each view:
# Discard joints with large reprojection error in any view
for cam_id, cam in enumerate(cams):
P = camera_parameters[scene][cam]["P"]
for j in range(num_joints):
if joints3d[j, 3] > 0.0: # Skip joints that were not triangulated
if joints2d[j, cam_id, 2] > args.score_threshold: # Skip 2D joints that were not well detected
x2d = joints2d[j, cam_id, :2]
x3dproj = P @ np.array([joints3d[j, 0], joints3d[j, 1], joints3d[j, 2], 1.0]) # Project to 2D
x3dproj /= x3dproj[2]
x3dproj = x3dproj[:2]
reprojection_error = np.linalg.norm(x2d - x3dproj)
# print(f"{cam} {joint_names[j]} \t\t {reprojection_error:3.1f}")
if reprojection_error > args.reprojection_threshold:
joints3d[j, :] =
|
np.zeros(4)
|
numpy.zeros
|
# %% [markdown]
# # Fitting a normal distribution with tensorflow probability
# Here we look at fitting a normal distribution to some data using Tensorflow Probability.
# %%
import numpy as np
import pandas as pd
import tensorflow as tf
import tensorflow_probability as tfp
import matplotlib.pyplot as plt
plt.style.use("seaborn-whitegrid")
# %% [markdown]
# Generate some data from a normal distribution:
# %%
n = 1000
true_mu = 2.0
true_std = 3.4
x =
|
np.random.normal(loc=true_mu, scale=true_std, size=n)
|
numpy.random.normal
|
import os
import torch
from skimage import io, transform
import numpy as np
from torch.utils.data import Dataset, DataLoader
import imgaug as ia
from sklearn.model_selection import StratifiedKFold
import cv2
root_dir = '/data1/trinh/data/patches_data/SBP/Prostate_Dataset/patches_750_r7b3/'
train_dirs = ['11S-1_1(x400)', '11S-1_2(x400)', '11S-1_3(x400)','11S-1_6(x400)']
valid_dirs = ['11S-1_4(x400)', '11S-1_5(x400)']
class ToTensor(object):
"""
This is a transform(augmentation)class
convert ndarrays in sample to Tensors
"""
# swap color axis because
# input : numpy image: H x W x C
# output: torch image: C X H X W
def __call__(self, image):
image = image.transpose((2, 0, 1))
return torch.from_numpy(image)
def read_prostate_dataset():
# make whole dataset list
# input : root that path
# output : x_whole, y_whole that contains all file paths and classes each
train_x, train_y = [], []
valid_x, valid_y = [], []
for train_dir in train_dirs:
train_root = root_dir + train_dir
for(path, dir, filenames) in os.walk(train_root):
for filename in filenames:
file_path = os.path.join(path, filename)
if path[-6:] == 'benign':
y_class = 0
elif path[-6:] == 'grade3':
y_class = 1
elif path[-6:] == 'grade4':
y_class = 2
train_x.append(file_path)
train_y.append(y_class)
for valid_dir in valid_dirs:
valid_root = root_dir + valid_dir
for(path, dir, filenames) in os.walk(valid_root):
for filename in filenames:
file_path = os.path.join(path, filename)
if path[-6:] == 'benign':
y_class = 0
elif path[-6:] == 'grade3':
y_class = 1
elif path[-6:] == 'grade4':
y_class = 2
valid_x.append(file_path)
valid_y.append(y_class)
print(len(valid_x))
print('LOADED DATA')
print('---------# train_data : {}\n'
'benign class : {}\n'
'cancer1 : {}\n'
'cancer2 : {}\n'
'---------# valid_data : {}\n'
'benign class : {}\n'
'cancer1 : {}\n'
'cancer2 : {}\n'.format(
len(train_x), np.sum(np.asarray(train_y)==0),
np.sum(np.asarray(train_y) == 1),
np.sum(np.asarray(train_y) == 2),
len(valid_x), np.sum(np.asarray(valid_y) == 0),
np.sum(np.asarray(valid_y) == 1),
np.sum(np.asarray(valid_y) == 2)
)
)
train_x = np.array(train_x)
train_y = np.array(train_y)
valid_x = np.array(valid_x)
valid_y = np.array(valid_y)
for i in range(0,3):
if i == 2:
pass
else:
num_dup = int(round(np.sum(train_y == 1) / np.sum(train_y == i)))
idx = np.where(train_y == i)
data = train_x[idx]
labels = train_y[idx]
for num in range(num_dup-1):
train_x = np.concatenate([train_x, data])
train_y = np.concatenate([train_y, labels])
print('DUPLICATED DATA')
print('---------# train_data : {}\n'
'benign class : {}\n'
'cancer1 : {}\n'
'cancer2 : {}\n'
'---------# valid_data : {}\n'
'benign class : {}\n'
'cancer1 : {}\n'
'cancer2 : {}\n'.format(
len(train_x), np.sum(np.asarray(train_y)==0),
np.sum(
|
np.asarray(train_y)
|
numpy.asarray
|
#!/usr/bin/env cdat
"""
# Cloud Property Histograms. Part I: Cloud Radiative Kernels. J. Climate, 25, 3715-3735. doi:10.1175/JCLI-D-11-00248.1.
# v2: This script is written to demonstrate how to compute the cloud feedback using for a
# short (2-year) period of MPI-ESM-LR using the difference between amipFuture and amip runs.
# One should difference longer periods for more robust results -- these are just for demonstrative purposes
# Data that are used in this script:
# 1. model clisccp field
# 2. model rsuscs field
# 3. model rsdscs field
# 4. model tas field
# 5. cloud radiative kernels
# This script written by <NAME> (<EMAIL>) on 14 July 2017
"""
# IMPORT STUFF:
# =====================
from mpl_toolkits.basemap import Basemap
import cdms2 as cdms
import cdutil
import MV2 as MV
import numpy as np
import pylab as pl
import matplotlib as mpl
###########################################################################
# HELPFUL FUNCTIONS FOLLOW
###########################################################################
###########################################################################
def add_cyclic(data):
# Add Cyclic point around 360 degrees longitude:
lons = data.getLongitude()[:]
dx = np.gradient(lons)[-1]
data2 = data(longitude=(0, dx + np.max(lons)), squeeze=True)
return data2
###########################################################################
def nanarray(vector):
# this generates a masked array with the size given by vector
# example: vector = (90,144,28)
# similar to this=NaN*ones(x,y,z) in matlab
this = MV.zeros(vector)
this = MV.masked_where(this == 0, this)
return this
###########################################################################
def map_SWkern_to_lon(Ksw, albcsmap):
from scipy.interpolate import interp1d
# Map each location's clear-sky surface albedo to the correct albedo bin
# Ksw is size 12,7,7,lats,3
# albcsmap is size A,lats,lons
albcs = np.arange(0.0, 1.5, 0.5)
A = albcsmap.shape[0]
TT = Ksw.shape[1]
PP = Ksw.shape[2]
lenlat = Ksw.shape[3]
lenlon = albcsmap.shape[2]
SWkernel_map = nanarray((A, TT, PP, lenlat, lenlon))
for M in range(A):
MM = M
while MM > 11:
MM = MM - 12
for LA in range(lenlat):
alon = albcsmap[M, LA, :]
# interp1d can't handle mask but it can deal with NaN (?)
try:
alon2 = MV.where(alon.mask, np.nan, alon)
except:
alon2 = alon
if
|
np.ma.count(alon2)
|
numpy.ma.count
|
#
# The following code is taken from arviz library
# (https://github.com/arviz-devs/arviz), version 0.6.1,
# distributed under Apache License Version 2.0.
#
# Changes made to arviz code:
# * Replaced _logsumexp with logsumexp from scipy.special
#
# Original code comes from <NAME>, <NAME>:
# https://github.com/avehtari/PSIS
#
# Theory: https://arxiv.org/abs/1507.02646v5
#
import numpy as np
from scipy.special import logsumexp
def _psislw(log_weights, cutoff_ind, cutoffmin, k_min=1.0 / 3):
"""
Pareto smoothed importance sampling (PSIS) for a 1D vector.
Parameters
----------
log_weights : array
Array of length n_observations
cutoff_ind : int
cutoffmin : float
k_min : float
Returns
-------
lw_out : array
Smoothed log weights
kss : float
Pareto tail index
"""
x = np.asarray(log_weights)
# improve numerical accuracy
x -= np.max(x)
# sort the array
x_sort_ind = np.argsort(x)
# divide log weights into body and right tail
xcutoff = max(x[x_sort_ind[cutoff_ind]], cutoffmin)
expxcutoff = np.exp(xcutoff)
(tailinds,) =
|
np.where(x > xcutoff)
|
numpy.where
|
import numpy as np
import cv2
import copy
from scipy.interpolate import RectBivariateSpline
import glob
def huber_loss(a, gamma=0.01):
return np.sum(np.square(gamma)*(np.sqrt(1+np.square(a/gamma))-1))
def LucasKanade(in_temp, in_temp_a, rectangle, s=np.zeros(2)):
x1, y1, x2, y2 = rectangle[0], rectangle[1], rectangle[2], rectangle[3]
temp_y, temp_x = np.gradient(in_temp_a)
ds = 1
thresh = 0.0001
#while np.square(ds).sum() > thresh:
while huber_loss(ds,1) > thresh:
s_x, s_y = s[0], s[1]
w_x1, w_y1, w_x2, w_y2 = x1 + s_x, y1 + s_y, x2 + s_x, y2 + s_y
u = np.linspace(x1, x2, 87)
v = np.linspace(y1, y2, 36)
u0, v0 = np.meshgrid(u, v)
w_u = np.linspace(w_x1, w_x2, 87)
w_v =
|
np.linspace(w_y1, w_y2, 36)
|
numpy.linspace
|
# coding: utf-8
# ### Load the required libraries
# In[1]:
import numpy as np
import os
import sys
from keras.models import Model
from keras import backend as K
from keras.optimizers import Adam
from keras.utils import plot_model
import tensorflow as tf
from numpy import inf
import matplotlib
matplotlib.use('agg')
import cv2
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import shutil
import SimpleITK as sitk
# Change to your own directory
sys.path.append('../')
import os.path
from nets.unet_HW import build_net
# from nets.custom_losses import (exp_dice_loss, exp_categorical_crossentropy, combine_loss, correlation_crossentropy_2D)
from nets.custom_losses import *
from utils.segmentation_training_mc import segmentation_training
from utils.kerasutils import get_image, correct_data_format, save_model_summary, get_channel_axis
from utils.imageutils import map_label
from utils.input_data_mc import InputSegmentationArrays
# from IPython import get_ipython
import pandas as pd
import cv2
import random
from scipy import ndarray
import skimage as sk
from skimage import transform
from keras.preprocessing.image import ImageDataGenerator
# When displaying figures have them as inline with notebook and not as standalone popups
# get_ipython().magic(u'matplotlib inline')
# alpha=float(sys.argv[2])
# exp=float(sys.argv[3])
cc_weight=0
image_weight=float(sys.argv[2])
# interval=int(sys.argv[3])
N_epoch=int(sys.argv[3])
N_per_epoch=int(sys.argv[4])
output_folder=sys.argv[5]
model_name=sys.argv[6]
N_labeled=int(sys.argv[7])
output_model_path=output_folder+'/'+model_name
print('model_name')
print(model_name)
# print('alpha:', alpha)
# print('exp: ',exp)
# print('cc_weight: ', cc_weight)
print('image_weight: ', image_weight)
print('N_epoch',N_epoch)
print('N_per_epoch',N_per_epoch)
print('N_labeled',N_labeled)
if not os.path.exists(output_folder):
os.makedirs(output_folder)
# print('segmentation file',seg_name)
#
if K.backend() == 'tensorflow':
# Use only gpu #X (with tf.device(/gpu:X) does not work)
os.environ['CUDA_VISIBLE_DEVICES'] = sys.argv[1]
# Automatically choose an existing and supported device if the specified one does not exist
config = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)
# To constrain the use of gpu memory, otherwise all memory is used
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
K.set_session(sess)
suffix = 'nlst'
nlabel=5
ncluster=(np.zeros(nlabel,np.int) + 1)
# ncluster[0]=5
class_weights=np.ones(nlabel,np.float32)
data_table = pd.read_csv('./data/3D/train_list', delimiter=' ')
train_img_files = pd.read_csv('./data/3D/train_list', delimiter=' ')['file'].values
train_seg_files = pd.read_csv('./data/3D/train_label_list', delimiter=' ')['file'].values
test_img_files = pd.read_csv('./data/3D/test_list', delimiter=' ')['file'].values
test_seg_files = pd.read_csv('./data/3D/test_label_list', delimiter=' ')['file'].values
valid_img_files = pd.read_csv('./data/3D/valid_list', delimiter=' ')['file'].values
valid_seg_files = pd.read_csv('./data/3D/valid_label_list', delimiter=' ')['file'].values
unlabeled_img_files = pd.read_csv('./data/3D/unlabeled_list_new.txt', delimiter=' ')['file'].values
train_IDs = pd.read_csv('./data/3D/train_IDs', delimiter=' ')['ID'].values
train_flags = pd.read_csv('./data/3D/train_IDs', delimiter=' ')['Label'].values
valid_IDs = pd.read_csv('./data/3D/valid_IDs', delimiter=' ')['ID'].values
test_IDs = pd.read_csv('./data/3D/test_IDs', delimiter=' ')['ID'].values
print(train_img_files)
print(valid_IDs)
print(test_IDs)
def random_rotation(image, seg):
# pick a random degree of rotation between 25% on the left and 25% on the right
random_degree = random.uniform(-50, 50)
rotated_img=sk.transform.rotate(image, random_degree, order=1)
rotated_seg=sk.transform.rotate(seg, random_degree, order=0)
return [rotated_img. rotated_seg]
def to_categorical(y, num_classes=None):
"""Converts a class vector (integers) to binary class matrix.
E.g. for use with categorical_crossentropy.
# Arguments
y: class vector to be converted into a matrix
(integers from 0 to num_classes).
num_classes: total number of classes.
# Returns
A binary matrix representation of the input.
"""
y = np.array(y, dtype='int')
input_shape = y.shape
if input_shape and input_shape[-1] == 1 and len(input_shape) > 1:
input_shape = tuple(input_shape[:-1])
y = y.ravel()
if not num_classes:
num_classes = np.max(y) + 1
n = y.shape[0]
categorical = np.zeros((n, num_classes))
categorical[np.arange(n), y] = 1
output_shape = input_shape + (num_classes,)
categorical = np.reshape(categorical, output_shape)
return categorical
def transfer_seg(seg):
# seg[seg == 63] = 0
# seg[seg == 95] = 1 # Membrane
# seg[seg == 80] = 2 # Nucleus
# seg[seg == 127] = 3 # Granules
# seg[seg == 47] = 4 # Mito
# seg[seg == 112] = 1 # Lipid
#
# seg[seg == 3] = 1 # Granules
# seg[seg == 4] = 1 # Mito
return seg
def transfer_image(img):
# max_v=np.max(img)
# min_v=np.min(img)
#
# new_img=(img-min_v)*255/(max_v-min_v+1E-10)
# return new_img
return img * 10000
'''
Generator for images and labels
'''
def generator_train_from_filelist(img_list, seg_list, unlabeled_list, angle=0.0, scale=0):
batch_size=slice_neighbor*2+1
# print(slice_neighbor, batch_size, interval)
data_gen_args = dict(featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=30,
width_shift_range=0.,
height_shift_range=0.,
zoom_range=0.1)
image_datagen = ImageDataGenerator(**data_gen_args)
list_L=len(img_list)-1
if list_L>N_labeled:
list_L=N_labeled
while 1:
# np.random
# print(, len(seg_list))
# kk
index = random.randint(0, list_L)
flag=1
while flag>0:
imfn = 'data/3D/Image_train_3D/' + img_list[index].rsplit('_', 1)[0] + "/z/" + img_list[index].rsplit('_', 1)[0] + "_2D_z_" + img_list[index].rsplit('_', 1)[1]
segfn = 'data/3D/Label_train_3D/' + img_list[index].rsplit('_', 1)[0] + "/z/" + seg_list[index].rsplit('_', 1)[0] + "_2D_z_" + seg_list[index].rsplit('_', 1)[1]
seg_supervised = sitk.GetArrayFromImage(sitk.ReadImage(segfn))
if np.max(seg_supervised)==0:
index = random.randint(0, list_L)
else:
flag=0
# print(imfn)
img_supervised = sitk.GetArrayFromImage(sitk.ReadImage(imfn))
img_supervised = np.squeeze(img_supervised)
# print(np.unique(seg_supervised))
seg_supervised = transfer_seg(seg_supervised)
seg_supervised = np.squeeze(seg_supervised)
# print(np.unique(seg_supervised))
# kk
#index = random.randint(0, len(unlabeled_list)-1)
#imfn = 'data/3D/Unlabeled_3D/' + unlabeled_list[index].rsplit('_', 1)[0] + "/z/" + unlabeled_list[index].rsplit('_', 1)[0] + "_2D_z_" + unlabeled_list[index].rsplit('_', 1)[1]
#img_unsupervised = sitk.GetArrayFromImage(sitk.ReadImage(imfn))
#img_unsupervised = np.squeeze(img_unsupervised)
rangle=
|
np.random.random()
|
numpy.random.random
|
import os
import matplotlib.pyplot as plt
import numpy as np
import torch
from mpl_toolkits.mplot3d import Axes3D
# coding: utf-8
__all__ = ['Ackley', 'Sphere', 'Rosenbrock', 'Beale', 'GoldsteinPrice', 'Booth',
'BukinN6', 'Matyas', 'LeviN13', 'ThreeHumpCamel', 'Easom', 'Eggholder',
'McCormick', 'SchafferN2', 'SchafferN4', 'StyblinskiTang', 'DeJongsF1',
'DeJongsF2', 'DeJongsF3', 'DeJongsF4', 'DeJongsF5', 'Ellipsoid', 'KTablet',
'FiveWellPotential', 'WeightedSphere', 'HyperEllipsodic',
'SumOfDifferentPower', 'Griewank', 'Michalewicz', 'Perm', 'Rastrigin',
'Schwefel', 'SixHumpCamel', 'Shuberts', 'XinSheYang', 'Zakharov']
__oneArgument__ = ['Beale', 'GoldsteinPrice', 'Booth', 'BukinN6', 'Matyas', 'LeviN13',
'ThreeHumpCamel', 'Easom', 'Eggholder', 'McCormick', 'SchafferN2',
'SchafferN4', 'DeJongsF3', 'DeJongsF4', 'DeJongsF5',
'FiveWellPotential', 'SixHumpCamel', 'Shuberts']
__twoArgument__ = ['Ackley', 'Sphere', 'Rosenbrock', 'StyblinskiTang', 'DeJongsF1',
'DeJongsF2', 'Ellipsoid', 'KTablet', 'WeightedSphere',
'HyperEllipsodic', 'SumOfDifferentPower', 'Griewank',
'Michalewicz', 'Rastrigin', 'Schwefel', 'XinSheYang', 'Zakharov']
__threeArgument__ = ['Perm']
##### Basic function #####
class OptimalBasic:
def __init__(self, variable_num):
self.variable_num = variable_num
self.max_search_range = np.array([0] * self.variable_num)
self.min_search_range = np.array([0] * self.variable_num)
self.optimal_solution = np.array([0] * self.variable_num)
self.global_optimum_solution = 0
self.plot_place = 0.25
self.func_name = ''
self.save_dir = os.path.dirname(os.path.abspath(__file__)) + '\\img\\'
if (os.path.isdir(self.save_dir) == False):
os.mkdir(self.save_dir)
def get_global_optimum_solution(self):
return self.global_optimum_solution
def get_optimal_solution(self):
return self.optimal_solution
def get_search_range(self):
return [self.max_search_range, self.min_search_range]
def get_func_val(self, variables):
return -1
def plot(self):
x = np.arange(self.min_search_range[0], self.max_search_range[0],
self.plot_place, dtype=np.float32)
y = np.arange(self.min_search_range[1], self.max_search_range[1],
self.plot_place, dtype=np.float32)
X, Y = np.meshgrid(x, y)
Z = []
for xy_list in zip(X, Y):
z = []
for xy_input in zip(xy_list[0], xy_list[1]):
tmp = list(xy_input)
tmp.extend(list(self.optimal_solution[0:self.variable_num - 2]))
z.append(self.get_func_val(np.array(tmp)))
Z.append(z)
Z = np.array(Z)
fig = plt.figure()
ax = Axes3D(fig)
ax.plot_wireframe(X, Y, Z)
plt.show()
def save_fig(self):
x = np.arange(self.min_search_range[0], self.max_search_range[0],
self.plot_place, dtype=np.float32)
y = np.arange(self.min_search_range[1], self.max_search_range[1],
self.plot_place, dtype=np.float32)
X, Y = np.meshgrid(x, y)
Z = []
for xy_list in zip(X, Y):
z = []
for xy_input in zip(xy_list[0], xy_list[1]):
tmp = list(xy_input)
tmp.extend(list(self.optimal_solution[0:self.variable_num - 2]))
z.append(self.get_func_val(np.array(tmp)))
Z.append(z)
Z = np.array(Z)
fig = plt.figure()
ax = Axes3D(fig)
ax.plot_wireframe(X, Y, Z)
plt.savefig(self.save_dir + self.func_name + '.png')
plt.close()
##### Optimization benchmark function group #####
##### Class Ackley function #####
class Ackley(OptimalBasic):
def __init__(self, variable_num=2):
super().__init__(variable_num)
self.max_search_range = torch.tensor([32.768] * self.variable_num)
self.min_search_range = torch.tensor([-32.768] * self.variable_num)
self.optimal_solution = torch.tensor([0] * self.variable_num)
self.global_optimum_solution = 0
self.func_name = 'Ackley'
def get_func_val(self, variables):
tmp1 = torch.tensor(20. - 20. * torch.exp(
-0.2 * torch.sqrt(1. / self.variable_num * torch.sum(
torch.FloatTensor([v * v for v in variables])))))
pi = torch.acos(torch.zeros(1))
tmp2 = torch.tensor(
torch.exp(torch.tensor(1.0)) - torch.exp(1. / self.variable_num * torch.sum(
torch.FloatTensor(
[torch.cos(torch.tensor(v * 2.0 * pi)) for v in variables]))))
return tmp1 + tmp2
##### Class Sphere function #####
class Sphere(OptimalBasic):
def __init__(self, variable_num):
super().__init__(variable_num)
self.max_search_range = np.array([1000] * self.variable_num) # nearly inf
self.min_search_range = np.array([-1000] * self.variable_num) # nearly inf
self.optimal_solution = np.array([1] * self.variable_num)
self.global_optimum_solution = 0
self.plot_place = 10
self.func_name = 'Sphere'
def get_func_val(self, variables):
return np.sum(np.square(variables))
##### Class Rosenbrock function #####
class Rosenbrock(OptimalBasic):
def __init__(self, variable_num=2):
super().__init__(variable_num)
self.max_search_range = torch.tensor([5] * self.variable_num)
self.min_search_range = torch.tensor([-5] * self.variable_num)
self.optimal_solution = torch.tensor([1] * self.variable_num)
self.global_optimum_solution = 0
self.plot_place = 0.25
self.func_name = 'Rosenbrock'
def get_func_val(self, variables):
f = 0
for i in range(self.variable_num - 1):
f += 100 * torch.pow(variables[i + 1] - torch.pow(variables[i], 2),
2) + torch.pow(variables[i] - 1, 2)
return f
##### Class Beale function #####
class Beale(OptimalBasic):
def __init__(self):
super().__init__(2)
self.max_search_range = torch.tensor([4.5] * self.variable_num)
self.min_search_range = torch.tensor([-4.5] * self.variable_num)
self.optimal_solution = torch.tensor([3., 0.5])
self.global_optimum_solution = 0
self.plot_place = 0.25
self.func_name = 'Beale'
def get_func_val(self, variables):
tmp1 = torch.pow(1.5 - variables[0] + variables[0] * variables[1], 2)
tmp2 = torch.pow(
2.25 - variables[0] + variables[0] * torch.pow(variables[1], 2), 2)
tmp3 = torch.pow(
2.625 - variables[0] + variables[0] * torch.pow(variables[1], 3), 2)
return tmp1 + tmp2 + tmp3
##### Class Goldstein-Price function #####
class GoldsteinPrice(OptimalBasic):
def __init__(self):
super().__init__(2)
self.max_search_range = np.array([2.] * self.variable_num)
self.min_search_range = np.array([-2.] * self.variable_num)
self.optimal_solution = np.array([0., -1.])
self.global_optimum_solution = 3
self.plot_place = 0.25
self.func_name = 'GoldsteinPrice'
def get_func_val(self, variables):
tmp1 = (1 + torch.pow(variables[0] + variables[1] + 1, 2) * (
19 - 14 * variables[0] + 3 * torch.pow(variables[0], 2) - 14 *
variables[1] + 6 * variables[0] *
variables[1] + 3 * torch.pow(variables[1], 2)))
tmp2 = (30 + (torch.pow(2 * variables[0] - 3 * variables[1], 2) * (
18 - 32 * variables[0] + 12 * torch.pow(variables[0], 2) + 48 *
variables[1] - 36 * variables[0] *
variables[1] + 27 * torch.pow(variables[1], 2))))
return tmp1 * tmp2
##### Class Booth function #####
class Booth(OptimalBasic):
def __init__(self):
super().__init__(2)
self.max_search_range = np.array([10.] * self.variable_num)
self.min_search_range = np.array([-10.] * self.variable_num)
self.optimal_solution = np.array([1., -3.])
self.global_optimum_solution = 0
self.func_name = 'Booth'
def get_func_val(self, variables):
tmp1 = np.power(variables[0] + 2 * variables[1] - 7, 2)
tmp2 = np.power(2 * variables[0] + variables[1] - 5, 2)
return tmp1 + tmp2
##### Class Bukin function N.6 #####
class BukinN6(OptimalBasic):
def __init__(self):
super().__init__(2)
self.max_search_range = np.array([-5., 3.])
self.min_search_range = np.array([-15., -3.])
self.optimal_solution = np.array([-10., 1.])
self.global_optimum_solution = 0
self.func_name = 'BukinN6'
def get_func_val(self, variables):
tmp1 = 100 * np.sqrt(
np.absolute(variables[1] - 0.01 * np.power(variables[1], 2)))
tmp2 = 0.01 * np.absolute(variables[0] + 10)
return tmp1 + tmp2
##### Class Matyas function #####
class Matyas(OptimalBasic):
def __init__(self):
super().__init__(2)
self.max_search_range = np.array([10.] * self.variable_num)
self.min_search_range = np.array([-10.] * self.variable_num)
self.optimal_solution = np.array([0., 0.])
self.global_optimum_solution = 0
self.func_name = 'Matyas'
def get_func_val(self, variables):
tmp1 = 0.26 * (np.power(variables[0], 2) + np.power(variables[1], 2))
tmp2 = 0.48 * variables[0] * variables[1]
return tmp1 - tmp2
##### Class Levi function N.13 #####
class LeviN13(OptimalBasic):
def __init__(self):
super().__init__(2)
self.max_search_range = np.array([10.] * self.variable_num)
self.min_search_range = np.array([-10.] * self.variable_num)
self.optimal_solution = np.array([1., 1.])
self.global_optimum_solution = 0
self.func_name = 'LeviN13'
def get_func_val(self, variables):
tmp1 = np.power(np.sin(3 * np.pi * variables[0]), 2)
tmp2 = np.power(variables[0] - 1, 2) * (
1 + np.power(np.sin(3 * np.pi * variables[1]), 2))
tmp3 = np.power(variables[1] - 1, 2) * (
1 + np.power(np.sin(2 * np.pi * variables[1]), 2))
return tmp1 + tmp2 + tmp3
##### Class Three-hump camel function #####
class ThreeHumpCamel(OptimalBasic):
def __init__(self):
super().__init__(2)
self.max_search_range = np.array([5.] * self.variable_num)
self.min_search_range = np.array([-5.] * self.variable_num)
self.optimal_solution = np.array([0., 0.])
self.global_optimum_solution = 0
self.func_name = 'ThreeHumpCamel'
def get_func_val(self, variables):
return 2 * np.power(variables[0], 2) - 1.05 * np.power(variables[0],
4) + np.power(
variables[0], 6) / 6 + \
variables[0] * variables[1] + np.power(variables[1], 2)
##### Class Easom function #####
class Easom(OptimalBasic):
def __init__(self):
super().__init__(2)
self.max_search_range = np.array([100.] * self.variable_num)
self.min_search_range = np.array([-100.] * self.variable_num)
self.optimal_solution = np.array([np.pi, np.pi])
self.global_optimum_solution = -1
self.plot_place = 10
self.func_name = 'Easom'
def get_func_val(self, variables):
return -1.0 * np.cos(variables[0]) * np.cos(variables[1]) * np.exp(
-(np.power(variables[0] - np.pi, 2) + np.power(variables[1] - np.pi, 2)))
##### Class Eggholder function #####
class Eggholder(OptimalBasic):
def __init__(self):
super().__init__(2)
self.max_search_range = np.array([512.] * self.variable_num)
self.min_search_range = np.array([-512.] * self.variable_num)
self.optimal_solution = np.array([512., 404.2319])
self.global_optimum_solution = -959.6407
self.plot_place = 5
self.func_name = 'Eggholder'
def get_func_val(self, variables):
tmp1 = -(variables[1] + 47) * np.sin(
np.sqrt(np.absolute(variables[1] + variables[0] / 2 + 47)))
tmp2 = -variables[0] * np.sin(
np.sqrt(np.absolute(variables[0] - (variables[1] + 47))))
return tmp1 + tmp2
##### Class McCormick function #####
class McCormick(OptimalBasic):
def __init__(self):
super().__init__(2)
self.max_search_range = np.array([4.] * self.variable_num)
self.min_search_range = np.array([-1.5, -3.])
self.optimal_solution = np.array([-0.54719, -1.54719])
self.global_optimum_solution = -1.9133
self.func_name = 'McCormick'
def get_func_val(self, variables):
tmp1 = np.sin(variables[0] + variables[1]) + np.power(
variables[0] - variables[1], 2)
tmp2 = -1.5 * variables[0] + 2.5 * variables[1] + 1
return tmp1 + tmp2
##### Class Schaffer function N.2 #####
class SchafferN2(OptimalBasic):
def __init__(self):
super().__init__(2)
self.max_search_range = np.array([100.] * self.variable_num)
self.min_search_range = np.array([-100] * self.variable_num)
self.optimal_solution = np.array([0., 0.])
self.global_optimum_solution = 0
self.plot_place = 10
self.func_name = 'SchafferN2'
def get_func_val(self, variables):
tmp1 = np.power(np.sin(np.power(variables[0], 2) - np.power(variables[1], 2)),
2) - 0.5
tmp2 = np.power(
1 + 0.001 * (np.power(variables[0], 2) + np.power(variables[1], 2)), 2)
return 0.5 + tmp1 / tmp2
##### Class Schaffer function N.4 #####
class SchafferN4(OptimalBasic):
def __init__(self):
super().__init__(2)
self.max_search_range = np.array([100.] * self.variable_num)
self.min_search_range = np.array([-100] * self.variable_num)
self.optimal_solution = np.array([0., 1.25313])
self.global_optimum_solution = 0
self.plot_place = 10
self.func_name = 'SchafferN4'
def get_func_val(self, variables):
tmp1 = np.power(np.cos(
np.sin(np.absolute(np.power(variables[0], 2) - np.power(variables[1], 2)))),
2) - 0.5
tmp2 = np.power(
1 + 0.001 * (np.power(variables[0], 2) + np.power(variables[1], 2)), 2)
return 0.5 + tmp1 / tmp2
##### Class Styblinski-Tang function #####
class StyblinskiTang(OptimalBasic):
def __init__(self, variable_num):
super().__init__(variable_num)
self.max_search_range = np.array([5.] * self.variable_num)
self.min_search_range = np.array([-5.] * self.variable_num)
self.optimal_solution = np.array([-2.903534] * self.variable_num)
self.global_optimum_solution = -39.166165 * self.variable_num
self.func_name = 'StyblinskiTang'
def get_func_val(self, variables):
tmp1 = 0
for i in range(self.variable_num):
tmp1 += np.power(variables[i], 4) - 16 * np.power(variables[i], 2) + 5 * \
variables[i]
return tmp1 / 2
##### Class De Jong's function F1 #####
class DeJongsF1(Sphere):
def __init__(self, variable_num):
super().__init__(variable_num)
self.func_name = 'DeJongsF1'
##### Class De Jong's function F2 #####
class DeJongsF2(Rosenbrock):
def __init__(self, variable_num):
super().__init__(variable_num)
self.func_name = 'DeJongsF2'
##### Class De Jong's function F3 #####
class DeJongsF3(OptimalBasic):
def __init__(self):
super().__init__(5)
self.max_search_range = np.array([5.12] * self.variable_num)
self.min_search_range = np.array([-5.12] * self.variable_num)
self.optimal_solution = np.array([-5.12] * self.variable_num)
self.global_optimum_solution = 0
self.func_name = 'DeJongsF3'
def get_func_val(self, variables):
tmp1 = 0
for i in range(self.variable_num):
tmp1 += np.floor(variables[i])
return tmp1
##### Class De Jong's function F4 #####
class DeJongsF4(OptimalBasic):
def __init__(self):
super().__init__(30)
self.max_search_range = np.array([1.28] * self.variable_num)
self.min_search_range = np.array([-1.28] * self.variable_num)
self.optimal_solution = np.array([0.] * self.variable_num)
self.global_optimum_solution = np.random.normal(0, 1)
self.func_name = 'DeJongsF4'
def get_func_val(self, variables):
tmp1 = 0
for i in range(self.variable_num):
tmp1 += (i + 1) * np.power(variables[i], 4)
return tmp1 + np.random.normal(0, 1)
##### Class De Jong's function F5 #####
class DeJongsF5(OptimalBasic):
def __init__(self):
super().__init__(25)
self.max_search_range = np.array([65.536] * self.variable_num)
self.min_search_range = np.array([-65.536] * self.variable_num)
self.optimal_solution = np.array([-32.32] * self.variable_num)
self.global_optimum_solution = 1.
self.plot_place = 1.5
self.func_name = 'DeJongsF5'
def get_func_val(self, variables):
A = np.zeros([2, 25])
a = [-32, 16, 0, 16, 32]
A[0, :] = np.tile(a, (1, 5))
tmp = []
for x in a:
tmp_list = [x] * 5
tmp.extend(tmp_list)
A[1, :] = tmp
sum = 0
for i in range(self.variable_num):
a1i = A[0, i]
a2i = A[1, i]
term1 = i
term2 = np.power(variables[0] - a1i, 6)
term3 = np.power(variables[1] - a2i, 6)
new = 1 / (term1 + term2 + term3)
sum += new
return 1 / (0.002 + sum)
##### Class Ellipsoid function #####
class Ellipsoid(OptimalBasic):
def __init__(self, variable_num=2):
super().__init__(variable_num)
self.max_search_range = np.array([5.12] * self.variable_num)
self.min_search_range = np.array([-5.12] * self.variable_num)
self.optimal_solution = np.array([0.] * self.variable_num)
self.global_optimum_solution = 0.
self.func_name = 'Ellipsoid'
def get_func_val(self, variables):
tmp = 0
for i in range(self.variable_num):
tmp += torch.pow(
torch.pow(torch.tensor(1000.0), i / (self.variable_num - 1)) *
variables[i], 2)
return tmp
##### Class k-tablet function #####
class KTablet(OptimalBasic):
def __init__(self, variable_num):
super().__init__(variable_num)
self.max_search_range = np.array([5.12] * self.variable_num)
self.min_search_range = np.array([-5.12] * self.variable_num)
self.optimal_solution = np.array([0.] * self.variable_num)
self.global_optimum_solution = 0.
self.func_name = 'KTablet'
def get_func_val(self, variables):
tmp = 0
k = int(self.variable_num / 4)
for i in range(k):
tmp += variables[i]
for i in range(k, self.variable_num):
tmp += np.power(100 * variables[i], 2)
return tmp
##### Class Five-well potential function #####
# Not yet checked to do working properly
class FiveWellPotential(OptimalBasic):
def __init__(self):
super().__init__(2)
self.max_search_range = np.array([20.] * self.variable_num)
self.min_search_range = np.array([-20.] * self.variable_num)
self.optimal_solution = np.array([4.92, -9.89])
self.global_optimum_solution = -1.4616
self.plot_place = 1
self.func_name = 'FiveWellPotential'
def get_func_val(self, variables):
tmp1 = []
tmp1.append(1 - 1 / (
1 + 0.05 * np.power(np.power(variables[0], 2) + (variables[1] - 10),
2)))
tmp1.append(-1 / (1 + 0.05 * (
np.power(variables[0] - 10, 2) + np.power(variables[1], 2))))
tmp1.append(-1 / (1 + 0.03 * (
np.power(variables[0] + 10, 2) + np.power(variables[1], 2))))
tmp1.append(-1 / (1 + 0.05 * (
np.power(variables[0] - 5, 2) + np.power(variables[1] + 10, 2))))
tmp1.append(-1 / (1 + 0.1 * (
np.power(variables[0] + 5, 2) + np.power(variables[1] + 10, 2))))
tmp1_sum = 0
for x in tmp1:
tmp1_sum += x
tmp2 = 1 + 0.0001 * np.power(
(np.power(variables[0], 2) + np.power(variables[1], 2)), 1.2)
return tmp1_sum * tmp2
##### Class Weighted Sphere function or hyper ellipsodic function #####
class WeightedSphere(OptimalBasic):
def __init__(self, variable_num):
super().__init__(variable_num)
self.max_search_range = np.array([5.12] * self.variable_num)
self.min_search_range = np.array([-5.12] * self.variable_num)
self.optimal_solution = np.array([0.] * self.variable_num)
self.global_optimum_solution = 0.
self.func_name = 'WeightedSphere'
def get_func_val(self, variables):
tmp = 0
for i in range(self.variable_num):
tmp += (i + 1) * np.power(variables[i], 2)
return tmp
class HyperEllipsodic(WeightedSphere):
def __init__(self, variable_num):
super().__init__(variable_num)
self.func_name = 'HyperEllipsodic'
##### Class Sum of different power function #####
class SumOfDifferentPower(OptimalBasic):
def __init__(self, variable_num):
super().__init__(variable_num)
self.max_search_range = np.array([1.] * self.variable_num)
self.min_search_range = np.array([-1.] * self.variable_num)
self.optimal_solution = np.array([0.] * self.variable_num)
self.global_optimum_solution = 0.
self.func_name = 'SumOfDifferentPower'
def get_func_val(self, variables):
tmp = 0
for i in range(self.variable_num):
tmp += np.power(np.absolute(variables[i]), i + 2)
return tmp
##### Class Griewank function #####
class Griewank(OptimalBasic):
def __init__(self, variable_num):
super().__init__(variable_num)
self.max_search_range = np.array([600.] * self.variable_num)
self.min_search_range = np.array([-600.] * self.variable_num)
self.optimal_solution = np.array([0.] * self.variable_num)
self.global_optimum_solution = 0.
self.plot_place = 10.
self.func_name = 'Griewank'
def get_func_val(self, variables):
tmp1 = 0
tmp2 = 1
for i in range(self.variable_num):
tmp1 += np.power(variables[i], 2)
tmp2 = tmp2 * np.cos(variables[i] / np.sqrt(i + 1))
return tmp1 / 4000 - tmp2
##### Class Michalewicz function #####
class Michalewicz(OptimalBasic):
def __init__(self, variable_num):
super().__init__(variable_num)
self.max_search_range = np.array([np.pi] * self.variable_num)
self.min_search_range = np.array([0.] * self.variable_num)
self.optimal_solution = np.array([0.] * self.variable_num)
self.global_optimum_solution = -1.8013 # In case of variable_num == 2
self.plot_place = 0.1
self.func_name = 'Michalewicz'
def get_func_val(self, variables):
m = 10
tmp1 = 0
for i in range(self.variable_num):
tmp1 += np.sin(variables[i]) * np.power(
np.sin((i + 1) * np.power(variables[i], 2) / np.pi), 2 * m)
return -tmp1
##### Class Perm function #####
class Perm(OptimalBasic):
def __init__(self, variable_num, beta):
super().__init__(variable_num)
self.beta = beta
self.max_search_range = np.array([1.] * self.variable_num)
self.min_search_range = np.array([-1.] * self.variable_num)
tmp = []
for i in range(self.variable_num):
tmp.append(1 / (i + 1))
self.optimal_solution = np.array(tmp)
self.global_optimum_solution = 0.
self.plot_place = 0.1
self.func_name = 'Perm'
def get_func_val(self, variables):
tmp1 = 0
tmp2 = 0
for j in range(self.variable_num):
for i in range(self.variable_num):
tmp1 += (i + 1 + self.beta) * (
np.power(variables[i], j + 1) - np.power(1 / (i + 1),
j + 1))
tmp2 += np.power(tmp1, 2)
tmp1 = 0
return tmp2
##### Class Rastrigin function #####
class Rastrigin(OptimalBasic):
def __init__(self, variable_num):
super().__init__(variable_num)
self.max_search_range = np.array([5.12] * self.variable_num)
self.min_search_range =
|
np.array([-5.12] * self.variable_num)
|
numpy.array
|
# -*- coding: utf-8 -*-
"""
Created on Sat Aug 3 14:10:12 2019
@author: Dominic
"""
from math import log, sqrt, exp
from numba import njit, float64, int64
import numpy as np
##########################################################################
# CIR Process
# dr = a(b-r) + sigma sqrt(r) dW
# Note that r can hit zero if 2.0 * a * b < sigma*sigma:
##########################################################################
# TO DO - DECIDE WHETHER TO OO MODEL
# CAN DO Z SCALING INSIDE NUMPY ?
# ANTITHETICS
from enum import Enum
class FinCIRNumericalScheme(Enum):
EULER = 1
LOGNORMAL = 2
MILSTEIN = 3
KAHLJACKEL = 4
EXACT = 5 # SAMPLES EXACT DISTRIBUTION
##########################################################################
# THIS CLASS IS NOT USED BUT MAY BE USED IF WE CREATE AN OO FRAMEWORK
class FinModelRatesCIR():
def __init__(self, a, b, sigma):
self._a = a
self._b = b
self._sigma = sigma
##########################################################################
@njit(fastmath=True, cache=True)
def meanr(r0, a, b, t):
''' Mean value of a CIR process after time t '''
mr = r0 * exp(-a * t) + b * (1.0 - exp(-a * t))
return mr
##########################################################################
@njit(fastmath=True, cache=True)
def variancer(r0, a, b, sigma, t):
''' Variance of a CIR process after time t '''
vr = r0 * sigma * sigma * (exp(-a * t) - exp(-2.0 * a * t)) / a
vr += b * sigma * sigma * ((1.0 - exp(-a * t))**2) / 2.0 / a
return vr
##########################################################################
@njit(
float64(
float64,
float64,
float64,
float64,
float64),
fastmath=True,
cache=True)
def zeroPrice(r0, a, b, sigma, t):
''' Price of a zero coupon bond in CIR model. '''
h = sqrt(a * a + 2.0 * sigma * sigma)
denom = 2.0 * h + (a + h) * (exp(h * t) - 1.0)
A = (2.0 * h * exp((a + h) * t / 2.0) /
denom)**(2.0 * a * b / sigma / sigma)
B = 2.0 * (exp(h * t) - 1.0) / denom
zcb = A * exp(-r0 * B)
return zcb
##########################################################################
@njit(
float64(
float64,
float64,
float64,
float64,
float64),
fastmath=True,
cache=True)
def draw(rt, a, b, sigma, dt):
''' Draw a next rate from the CIR model in Monte Carlo. '''
sigma2 = sigma * sigma
d = 4.0 * a * b / sigma2
l = 4.0 * a * exp(-a * dt) / sigma2 / (1.0 - exp(-a * dt)) * rt
c = sigma2 * (1.0 - exp(-a * dt)) / 4.0 / a
if d > 1:
Z =
|
np.random.normal()
|
numpy.random.normal
|
# ========================================
# [] File Name : map.py
#
# [] Creation Date : February 2018
#
# [] Created By : <NAME> (<EMAIL>)
# ========================================
"""
Implementation of a linear transformation on a multivariate Gaussian distribution.
"""
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from scipy.stats import multivariate_normal
color_map=[cm.winter,cm.autumn]
MEAN_VECTOR =
|
np.array([0,0])
|
numpy.array
|
# -*- coding: utf-8 -*-
import os
import numpy as np
import scipy.signal
import scipy.io
import time
import struct
from copy import deepcopy
# constants
NUM_HEADER_BYTES = 1024
SAMPLES_PER_RECORD = 1024
BYTES_PER_SAMPLE = 2
RECORD_SIZE = 4 + 8 + SAMPLES_PER_RECORD * BYTES_PER_SAMPLE + 10 # size of each continuous record in bytes
RECORD_MARKER = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 255])
# constants for pre-allocating matrices:
MAX_NUMBER_OF_SPIKES = int(1e6)
MAX_NUMBER_OF_RECORDS = int(1e6)
MAX_NUMBER_OF_EVENTS = int(1e6)
def load(filepath, dtype = float):
# redirects to code for individual file types
if 'continuous' in filepath:
data = loadContinuous(filepath, dtype)
elif 'spikes' in filepath:
data = loadSpikes(filepath)
elif 'events' in filepath:
data = loadEvents(filepath)
else:
raise Exception("Not a recognized file type. Please input a .continuous, .spikes, or .events file")
return data
def loadContinuous(filepath, dtype = float):
assert dtype in (float, np.int16), \
'Invalid data type specified for loadContinous, valid types are float and np.int16'
print("Loading continuous data...")
ch = { }
#read in the data
f = open(filepath,'rb')
fileLength = os.fstat(f.fileno()).st_size
# calculate number of samples
recordBytes = fileLength - NUM_HEADER_BYTES
if recordBytes % RECORD_SIZE != 0:
raise Exception("File size is not consistent with a continuous file: may be corrupt")
nrec = recordBytes // RECORD_SIZE
nsamp = nrec * SAMPLES_PER_RECORD
# pre-allocate samples
samples = np.zeros(nsamp, dtype)
timestamps = np.zeros(nrec)
recordingNumbers = np.zeros(nrec)
indices = np.arange(0, nsamp + 1, SAMPLES_PER_RECORD, np.dtype(np.int64))
header = readHeader(f)
recIndices = np.arange(0, nrec)
for recordNumber in recIndices:
timestamps[recordNumber] = np.fromfile(f,np.dtype('<i8'),1) # little-endian 64-bit signed integer
N = np.fromfile(f,np.dtype('<u2'),1)[0] # little-endian 16-bit unsigned integer
#print index
if N != SAMPLES_PER_RECORD:
raise Exception('Found corrupted record in block ' + str(recordNumber))
recordingNumbers[recordNumber] = (np.fromfile(f,np.dtype('>u2'),1)) # big-endian 16-bit unsigned integer
if dtype == float: # Convert data to float array and convert bits to voltage.
data = np.fromfile(f,np.dtype('>i2'),N) * float(header['bitVolts']) # big-endian 16-bit signed integer, multiplied by bitVolts
else: # Keep data in signed 16 bit integer format.
data = np.fromfile(f,np.dtype('>i2'),N) # big-endian 16-bit signed integer
samples[indices[recordNumber]:indices[recordNumber+1]] = data
marker = f.read(10) # dump
#print recordNumber
#print index
ch['header'] = header
ch['timestamps'] = timestamps
ch['data'] = samples # OR use downsample(samples,1), to save space
ch['recordingNumber'] = recordingNumbers
f.close()
return ch
def loadSpikes(filepath):
'''
Loads spike waveforms and timestamps from filepath (should be .spikes file)
'''
data = { }
print('loading spikes...')
f = open(filepath, 'rb')
header = readHeader(f)
if float(header[' version']) < 0.4:
raise Exception('Loader is only compatible with .spikes files with version 0.4 or higher')
data['header'] = header
numChannels = int(header['num_channels'])
numSamples = 40 # **NOT CURRENTLY WRITTEN TO HEADER**
spikes = np.zeros((MAX_NUMBER_OF_SPIKES, numSamples, numChannels))
timestamps = np.zeros(MAX_NUMBER_OF_SPIKES)
source = np.zeros(MAX_NUMBER_OF_SPIKES)
gain = np.zeros((MAX_NUMBER_OF_SPIKES, numChannels))
thresh = np.zeros((MAX_NUMBER_OF_SPIKES, numChannels))
sortedId =
|
np.zeros((MAX_NUMBER_OF_SPIKES, numChannels))
|
numpy.zeros
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Test numpy functions and ufuncs on Masked arrays and quantities.
The tests here are fairly detailed but do not aim for complete
coverage. Complete coverage of all numpy functions is done
with less detailed tests in test_function_helpers.
"""
import pytest
import numpy as np
from numpy.testing import assert_array_equal
from astropy import units as u
from astropy.units import Quantity
from astropy.utils.masked.core import Masked
from .test_masked import (MaskedArraySetup, QuantitySetup, LongitudeSetup,
assert_masked_equal)
class MaskedUfuncTests(MaskedArraySetup):
@pytest.mark.parametrize('ufunc', (np.add, np.subtract, np.divide,
np.arctan2, np.minimum))
def test_2op_ufunc(self, ufunc):
ma_mb = ufunc(self.ma, self.mb)
expected_data = ufunc(self.a, self.b)
expected_mask = (self.ma.mask | self.mb.mask)
# Note: assert_array_equal also checks type, i.e., that, e.g.,
# Longitude decays into an Angle.
assert_array_equal(ma_mb.unmasked, expected_data)
assert_array_equal(ma_mb.mask, expected_mask)
@pytest.mark.parametrize('ufunc', (np.add, np.subtract, np.divide,
np.arctan2, np.minimum))
def test_ufunc_inplace(self, ufunc):
ma_mb = ufunc(self.ma, self.mb)
out = Masked(np.zeros_like(ma_mb.unmasked))
result = ufunc(self.ma, self.mb, out=out)
assert result is out
assert_masked_equal(result, ma_mb)
def test_ufunc_inplace_no_masked_input(self):
a_b = np.add(self.a, self.b)
out = Masked(np.zeros_like(a_b))
result = np.add(self.a, self.b, out=out)
assert result is out
assert_array_equal(result.unmasked, a_b)
assert_array_equal(result.mask, np.zeros(a_b.shape, bool))
def test_ufunc_inplace_error(self):
out = np.zeros(self.ma.shape)
with pytest.raises(TypeError):
np.add(self.ma, self.mb, out=out)
@pytest.mark.parametrize('ufunc', (np.add.outer, np.minimum.outer))
def test_2op_ufunc_outer(self, ufunc):
ma_mb = ufunc(self.ma, self.mb)
expected_data = ufunc(self.a, self.b)
expected_mask = np.logical_or.outer(self.mask_a, self.mask_b)
# Note: assert_array_equal also checks type, i.e., that, e.g.,
# Longitude decays into an Angle.
assert_array_equal(ma_mb.unmasked, expected_data)
assert_array_equal(ma_mb.mask, expected_mask)
def test_3op_ufunc(self):
ma_mb = np.clip(self.ma, self.b, self.c)
expected_data = np.clip(self.a, self.b, self.c)
expected_mask = self.mask_a
assert_array_equal(ma_mb.unmasked, expected_data)
assert_array_equal(ma_mb.mask, expected_mask)
@pytest.mark.parametrize('axis', (0, 1, None))
def test_add_reduce(self, axis):
ma_reduce = np.add.reduce(self.ma, axis=axis)
expected_data = np.add.reduce(self.a, axis=axis)
expected_mask = np.logical_or.reduce(self.ma.mask, axis=axis)
assert_array_equal(ma_reduce.unmasked, expected_data)
assert_array_equal(ma_reduce.mask, expected_mask)
out = Masked(np.zeros_like(ma_reduce.unmasked),
np.ones_like(ma_reduce.mask))
ma_reduce2 = np.add.reduce(self.ma, axis=axis, out=out)
assert ma_reduce2 is out
assert_masked_equal(ma_reduce2, ma_reduce)
def test_add_reduce_no_masked_input(self):
a_reduce = np.add.reduce(self.a, axis=0)
out = Masked(np.zeros_like(a_reduce), np.ones(a_reduce.shape, bool))
result = np.add.reduce(self.a, axis=0, out=out)
assert result is out
assert_array_equal(out.unmasked, a_reduce)
assert_array_equal(out.mask, np.zeros(a_reduce.shape, bool))
@pytest.mark.parametrize('axis', (0, 1, None))
def test_minimum_reduce(self, axis):
ma_reduce = np.minimum.reduce(self.ma, axis=axis)
expected_data = np.minimum.reduce(self.a, axis=axis)
expected_mask = np.logical_or.reduce(self.ma.mask, axis=axis)
assert_array_equal(ma_reduce.unmasked, expected_data)
assert_array_equal(ma_reduce.mask, expected_mask)
@pytest.mark.parametrize('axis', (0, 1, None))
def test_maximum_reduce(self, axis):
ma_reduce = np.maximum.reduce(self.ma, axis=axis)
expected_data = np.maximum.reduce(self.a, axis=axis)
expected_mask = np.logical_or.reduce(self.ma.mask, axis=axis)
assert_array_equal(ma_reduce.unmasked, expected_data)
assert_array_equal(ma_reduce.mask, expected_mask)
class TestMaskedArrayUfuncs(MaskedUfuncTests):
# multiply.reduce does not work with units, so test only for plain array.
@pytest.mark.parametrize('axis', (0, 1, None))
def test_multiply_reduce(self, axis):
ma_reduce = np.multiply.reduce(self.ma, axis=axis)
expected_data = np.multiply.reduce(self.a, axis=axis)
expected_mask = np.logical_or.reduce(self.ma.mask, axis=axis)
assert_array_equal(ma_reduce.unmasked, expected_data)
assert_array_equal(ma_reduce.mask, expected_mask)
def test_ufunc_not_implemented_for_other(self):
"""
If the unmasked operation returns NotImplemented, this
should lead to a TypeError also for the masked version.
"""
a = np.array([1, 2])
b = 3 * u.m
with pytest.raises(TypeError):
a & b
ma = Masked(a)
with pytest.raises(TypeError):
ma & b
class TestMaskedQuantityUfuncs(MaskedUfuncTests, QuantitySetup):
def test_ufunc_inplace_error2(self):
out = Masked(np.zeros(self.ma.shape))
with pytest.raises(TypeError):
np.add(self.ma, self.mb, out=out)
class TestMaskedLongitudeUfuncs(MaskedUfuncTests, LongitudeSetup):
def test_ufunc_inplace_quantity_initial(self):
out = Masked(np.zeros(self.ma.shape) << u.m)
result =
|
np.add(self.ma, self.mb, out=out)
|
numpy.add
|
def makeuvdeprojplot(datavisloc, modelvisloc, paradoffsetloc, uvmax, uvbin_size, warr, ylimreal, ylimimag, outfile, sourcetag, phaseshift):
import matplotlib.pyplot as pl
import sys
import numpy as np
import os
tag=sourcetag
u=[[] for x in datavisloc]
v=[[] for x in datavisloc]
Re=[[] for x in datavisloc]
Im=[[] for x in datavisloc]
w=[[] for x in datavisloc]
Re_mod=[[] for x in datavisloc]
Im_mod=[[] for x in datavisloc]
for i in np.arange(len(datavisloc)):
u[i], v[i], Re[i], Im[i], w[i] = np.load(datavisloc[i])
u[i], v[i], Re_mod[i], Im_mod[i], w_mod = np.load(modelvisloc[i])
inc_mod, PA_mod, dRA_mod, dDec_mod = np.load(paradoffsetloc[i])
inc=inc_mod
PA=PA_mod
if phaseshift:
#########
# Here shift both model and data using best-fit shifts. This ensures that visibilities are well-centered when making uvplots (and also, importantly, that no relative shifts are present among datasets for multiple datasets).
#########
theta = np.asarray(u[i])*2.0*np.pi*dRA_mod + np.asarray(v[i])*2.0*np.pi*dDec_mod
## Minus sign below as we are shifting by the negative of the shift that is usually applied to the model
vmodelsh=(np.asarray(Re_mod[i])+1j*np.asarray(Im_mod[i]))*(np.cos(-theta)+1j*np.sin(-theta))
vdatash=(np.asarray(Re[i])+1j*np.asarray(Im[i])) * (np.cos(-theta)+1j*np.sin(-theta))
Re_mod[i]=np.real(vmodelsh)
Im_mod[i]=np.imag(vmodelsh)
Re[i]=np.real(vdatash)
Im[i]=np.imag(vdatash)
else:
print('Not shifting dataset '+str(i))
#Here fixing weights by factor derived from fitting
w[i]*=warr[i]
#Save SHIFTED visibilities
np.save('./evaluation/'+tag+'_uvtable_modelsh'+str(i)+'.npy', [u[i], v[i], Re_mod[i], Im_mod[i], w[i]])
np.save('./evaluation/'+tag+'_uvtable_datash'+str(i)+'.npy', [u[i], v[i], Re[i], Im[i], w[i]])
#Concatenate all datasets for plotting
u=np.asarray([y for x in u for y in x])#np.concatenate((u1,u2,u3,u4,u5,u6,u7))
v=np.asarray([y for x in v for y in x])#np.concatenate((v1,v2,v3,v4,v5,v6,v7))
Re=np.asarray([y for x in Re for y in x])#np.concatenate((Re1,Re2,Re3,Re4,Re5,Re6,Re7))
Re_mod=np.asarray([y for x in Re_mod for y in x])#np.concatenate((Re_mod1,Re_mod2,Re_mod3,Re_mod4,Re_mod5,Re_mod6,Re_mod7))
Im=np.asarray([y for x in Im for y in x])#np.concatenate((Im1,Im2,Im3,Im4,Im5,Im6,Im7))
Im_mod=np.asarray([y for x in Im_mod for y in x])#np.concatenate((Im_mod1,Im_mod2,Im_mod3,Im_mod4,Im_mod5,Im_mod6,Im_mod7))
w=np.asarray([y for x in w for y in x])#np.concatenate((w1,w2,w3,w4,w5,w6,w7))
#Now recalculate SHIFTED residuals for plotting. REMEMBER: printed residuals for residual imaging are NOT SHIFTED, i.e., the phase center is not at the best-fit stellar location.
Re_resid=Re-Re_mod
Im_resid=Im-Im_mod
#Carry out deprojection
cos_inc = np.cos(inc)
if PA == 0:
u_deproj=u
v_deproj=v
else:
cospa=np.cos(PA)
sinpa=np.sin(PA)
#Rotation by PA here, not of the data, but of u and v!!
u_deproj=u*cospa-v*sinpa
v_deproj=u*sinpa+v*cospa
#NB! Rotation by PA useless if we are not deprojecting by inclination below, as we are just measuring uvdist which doesn't change if we rotate the disk in uv plane.
u_deproj*=cos_inc
#print('Deprojection turned ON.')
#Calculate deprojected u-v distance
uvdist=np.sqrt(u_deproj**2.0+v_deproj**2.0)
#Figure out min and max boundaries in uv distance for plotting
uvdistmin=uvdist.min()
uvdistmax=np.min([uvdist.max(),uvmax])
#Bin uvdistances
nbins=np.ceil((uvdistmax-uvdistmin)/uvbin_size).astype('int')
nbins_mod=50
bin_uvdist = np.zeros(nbins)
bin_weights = np.zeros(nbins)
bin_uvdist_mod = np.zeros(nbins_mod)
bin_weights_mod = np.zeros(nbins_mod)
bin_count = np.zeros(nbins, dtype='int')
bin_count_mod = np.zeros(nbins_mod, dtype='int')
uv_intervals=[]
uv_intervals_mod=[]
uv_bin_edges=np.arange(nbins+1, dtype='float64')*uvbin_size+uvdistmin
uv_bin_edges_mod=np.arange(nbins_mod+1, dtype='float64')*((uvdistmax-uvdistmin)/np.float(nbins_mod))+uvdistmin
#Calculate which data points go in which bin, and figure out average uv distance for that bin for more appropriate plotting
for i in range(nbins):
uv_interval = np.where((uvdist >= uv_bin_edges[i]) & (uvdist < uv_bin_edges[i+1]))
bin_count[i] = len(uv_interval[0])
if bin_count[i] != 0:
bin_uvdist[i] = uvdist[uv_interval].sum()/bin_count[i]
bin_weights[i] = np.sum(w[uv_interval])
else:
bin_uvdist[i] = uv_bin_edges[i]+0.5*uvbin_size
uv_intervals.append(uv_interval)
#Do it for model on a finer grid
for i in range(nbins_mod):
uv_interval_mod = np.where((uvdist >= uv_bin_edges_mod[i]) & (uvdist < uv_bin_edges_mod[i+1]))
bin_count_mod[i] = len(uv_interval_mod[0])
if bin_count_mod[i] != 0:
bin_uvdist_mod[i] = uvdist[uv_interval_mod].sum()/bin_count_mod[i]
bin_weights_mod[i] = np.sum(w[uv_interval_mod])
else:
bin_uvdist_mod[i] = uv_bin_edges_mod[i]+0.5*((uvdistmax-uvdistmin)/np.float(nbins_mod))
uv_intervals_mod.append(uv_interval_mod)
#Calculate real and imaginary of data, model and errors for each bin
bin_re, bin_re_err, bin_re_mod, bin_re_modfine, bin_re_resid = np.zeros(nbins), np.zeros(nbins), np.zeros(nbins), np.zeros(nbins_mod), np.zeros(nbins)
bin_im, bin_im_err, bin_im_mod, bin_im_modfine, bin_im_resid = np.zeros(nbins), np.zeros(nbins), np.zeros(nbins), np.zeros(nbins_mod), np.zeros(nbins)
for i in range(nbins):
if bin_count[i] != 0:
bin_re[i] = np.sum(Re[uv_intervals[i]]*w[uv_intervals[i]])/bin_weights[i]
bin_re_err[i] = 1./
|
np.sqrt(bin_weights[i])
|
numpy.sqrt
|
# -*- coding: utf-8 -*-
# BCDI: tools for pre(post)-processing Bragg coherent X-ray diffraction imaging data
# (c) 07/2017-06/2019 : CNRS UMR 7344 IM2NP
# (c) 07/2019-05/2021 : DESY PHOTON SCIENCE
# (c) 06/2021-present : DESY CFEL
# authors:
# <NAME>, <EMAIL>
# <NAME>, <EMAIL>
# <NAME>, <EMAIL>
"""Postprocessing of the output from the facet analyzer plugin for Paraview."""
import h5py
import ipywidgets as widgets
from ipywidgets import Layout, interactive
import matplotlib.pyplot as plt
import numpy as np
import pathlib
import pandas as pd
from pandas import DataFrame
from typing import Any, Dict, List, Optional, Tuple, Union
import vtk
from bcdi.utils import validation as valid
class Facets:
"""
Import and stores data output of facet analyzer plugin for further analysis.
Extract the strain component and the displacement on the facets, and retrieves the
correct facet normals based on a user input (geometric transformation into the
crystal frame). It requries as input a VTK file extracted from the FacetAnalyser
plugin from ParaView. See: https://doi.org/10.1016/j.ultramic.2012.07.024
Original tutorial on how to open vtk files:
http://forrestbao.blogspot.com/2011/12/reading-vtk-files-in-python-via-python.html
Expected directory structure:
- vtk file should have been saved in in Sxxxx/postprocessing
- the analysis output will be saved in Sxxxx/postprocessing/facet_analysis
Several plotting options are attributes of this class, feel free to change them
(cmap, strain_range, disp_range_avg, disp_range, strain_range_avg, comment,
title_fontsize, axes_fontsize, legend_fontsize, ticks_fontsize)
:param filename: str, name of the VTK file
:param pathdir: str, path to the VTK file
:param savedir: str, path where to save results. If None, they will be saved in
pathdir/facets_analysis
:param lattice: float, atomic spacing of the material in angstroms
(only cubic lattices are supported).
"""
def __init__(
self,
filename: str,
pathdir: str = "./",
savedir: str = None,
lattice: float = 3.912,
) -> None:
# Create other required parameters with default None value
self.nb_facets: Optional[int] = None
self.vtk_data: Optional[Dict[str, List[Any]]] = None
self.strain_mean_facets: List[np.ndarray] = []
self.disp_mean_facets: List[np.ndarray] = []
self.field_data: DataFrame = pd.DataFrame()
self.u0: np.ndarray = np.empty(3, dtype=float)
self.v0: np.ndarray = np.empty(3, dtype=float)
self.w0: np.ndarray = np.empty(3, dtype=float)
self.u: np.ndarray = np.empty(3, dtype=float)
self.v: np.ndarray = np.empty(3, dtype=float)
self.norm_u: np.ndarray = np.empty(3, dtype=float)
self.norm_v: np.ndarray = np.empty(3, dtype=float)
self.norm_w: np.ndarray = np.empty(3, dtype=float)
self.rotation_matrix: Optional[np.ndarray] = None
self.hkl_reference: Optional[Tuple[float, float, float]] = None
self.hkls: str = ""
self.planar_dist = None
self.ref_normal = None
self.theoretical_angles: Optional[Dict[str, float]] = None
# Check input parameters
valid.valid_container(
filename, container_types=str, min_length=1, name="filename"
)
valid.valid_container(
pathdir, container_types=str, min_length=1, name="pathdir"
)
if not pathdir.endswith("/"):
pathdir += "/"
valid.valid_container(
savedir, container_types=str, min_length=1, allow_none=True, name="savedir"
)
if savedir is not None and not savedir.endswith("/"):
savedir += "/"
valid.valid_item(lattice, allowed_types=float, min_excluded=0, name="lattice")
self.pathsave = savedir or pathdir + "facets_analysis/"
self.path_to_data = pathdir + filename
self.filename = filename
self.lattice = lattice
# Plotting options
self.strain_range = 0.001
self.disp_range_avg = 0.2
self.disp_range = 0.35
self.strain_range_avg = 0.0005
self.comment = ""
self.title_fontsize = 24
self.axes_fontsize = 18
self.legend_fontsize = 11
self.ticks_fontsize = 14
self.cmap = "viridis"
self.particle_cmap = "gist_ncar"
# Load the data
self.load_vtk()
# Add edges and corners data if not there already
self.save_edges_corners_data()
# Create widget for particle viewing
self.window = interactive(
self.view_particle,
elev=widgets.IntSlider(
value=0,
step=1,
min=0,
max=360,
continuous_update=False,
description="Elevation angle in the z plane:",
layout=Layout(width="45%"),
readout=True,
style={"description_width": "initial"},
orientation="horizontal",
),
azim=widgets.IntSlider(
value=0,
step=1,
min=0,
max=360,
continuous_update=False,
description="Azimuth angle in the (x, y) plane:",
layout=Layout(width="45%"),
readout=True,
style={"description_width": "initial"},
orientation="horizontal",
),
elev_axis=widgets.Dropdown(
options=["x", "y", "z"],
value="z",
description="Elevated axis",
continuous_update=False,
style={"description_width": "initial"},
),
facet_id_range=widgets.IntRangeSlider(
value=[1, self.nb_facets],
step=1,
min=1,
max=self.nb_facets,
continuous_update=False,
description="Facets ids to show:",
layout=Layout(width="45%"),
readout=True,
style={"description_width": "initial"},
orientation="horizontal",
),
show_edges_corners=widgets.Checkbox(
value=False,
description="Show edges and corners",
layout=Layout(width="40%"),
style={"description_width": "initial"},
),
)
def load_vtk(self) -> None:
"""
Load the VTK file.
In paraview, the facets have an index that starts at 1, the index 0 corresponds
to the edges and corners of the facets.
"""
pathlib.Path(self.pathsave).mkdir(parents=True, exist_ok=True)
reader = vtk.vtkGenericDataObjectReader()
reader.SetFileName(self.path_to_data)
reader.ReadAllScalarsOn()
reader.ReadAllVectorsOn()
reader.ReadAllTensorsOn()
reader.Update()
vtkdata = reader.GetOutput()
# Get point data
try:
point_data = vtkdata.GetPointData()
print("Loading data...")
except AttributeError:
raise NameError("This file does not exist or is not right.")
print("Number of points = %s" % str(vtkdata.GetNumberOfPoints()))
print("Number of cells = %s" % str(vtkdata.GetNumberOfCells()))
self.vtk_data = {
"x": [vtkdata.GetPoint(i)[0] for i in range(vtkdata.GetNumberOfPoints())],
"y": [vtkdata.GetPoint(i)[1] for i in range(vtkdata.GetNumberOfPoints())],
"z": [vtkdata.GetPoint(i)[2] for i in range(vtkdata.GetNumberOfPoints())],
"strain": [
point_data.GetArray("strain").GetValue(i)
for i in range(vtkdata.GetNumberOfPoints())
],
"disp": [
point_data.GetArray("disp").GetValue(i)
for i in range(vtkdata.GetNumberOfPoints())
],
}
# Get cell data
cell_data = vtkdata.GetCellData()
self.vtk_data["facet_probabilities"] = [
cell_data.GetArray("FacetProbabilities").GetValue(i)
for i in range(vtkdata.GetNumberOfCells())
]
self.vtk_data["facet_id"] = [
cell_data.GetArray("FacetIds").GetValue(i)
for i in range(vtkdata.GetNumberOfCells())
]
self.vtk_data["x0"] = [
vtkdata.GetCell(i).GetPointId(0) for i in range(vtkdata.GetNumberOfCells())
]
self.vtk_data["y0"] = [
vtkdata.GetCell(i).GetPointId(1) for i in range(vtkdata.GetNumberOfCells())
]
self.vtk_data["z0"] = [
vtkdata.GetCell(i).GetPointId(2) for i in range(vtkdata.GetNumberOfCells())
]
self.nb_facets = int(max(self.vtk_data["facet_id"]))
print("Number of facets = %s" % str(self.nb_facets))
# Get means
facet_indices = np.arange(1, int(self.nb_facets) + 1, 1)
# indices from 1 to n_facets
strain_mean = np.zeros(self.nb_facets) # stored later in field data
strain_std = np.zeros(self.nb_facets) # stored later in field data
disp_mean = np.zeros(self.nb_facets) # stored later in field data
disp_std = np.zeros(self.nb_facets) # stored later in field data
for ind in facet_indices:
print("Facet = %d" % ind)
results = self.extract_facet(int(ind), plot=False)
if results is not None:
strain_mean[ind - 1] = results["strain_mean"]
strain_std[ind - 1] = results["strain_std"]
disp_mean[ind - 1] = results["disp_mean"]
disp_std[ind - 1] = results["disp_std"]
# Get field data
field_data = vtkdata.GetFieldData()
self.field_data["facet_id"] = [
field_data.GetArray("FacetIds").GetValue(i) for i in range(self.nb_facets)
]
self.field_data["strain_mean"] = strain_mean
self.field_data["strain_std"] = strain_std
self.field_data["disp_mean"] = disp_mean
self.field_data["disp_std"] = disp_std
self.field_data["n0"] = [
field_data.GetArray("facetNormals").GetValue(3 * i)
for i in range(self.nb_facets)
]
self.field_data["n1"] = [
field_data.GetArray("facetNormals").GetValue(3 * i + 1)
for i in range(self.nb_facets)
]
self.field_data["n2"] = [
field_data.GetArray("facetNormals").GetValue(3 * i + 2)
for i in range(self.nb_facets)
]
self.field_data["c0"] = [
field_data.GetArray("FacetCenters").GetValue(3 * i)
for i in range(self.nb_facets)
]
self.field_data["c1"] = [
field_data.GetArray("FacetCenters").GetValue(3 * i + 1)
for i in range(self.nb_facets)
]
self.field_data["c2"] = [
field_data.GetArray("FacetCenters").GetValue(3 * i + 2)
for i in range(self.nb_facets)
]
self.field_data["interplanar_angles"] = [
field_data.GetArray("interplanarAngles").GetValue(i)
for i in range(self.nb_facets)
]
self.field_data["abs_facet_size"] = [
field_data.GetArray("absFacetSize").GetValue(i)
for i in range(self.nb_facets)
]
self.field_data["rel_facet_size"] = [
field_data.GetArray("relFacetSize").GetValue(i)
for i in range(self.nb_facets)
]
self.field_data = self.field_data.astype({"facet_id": np.int8})
# Get normals
# Don't use array index but facet number in case we sort the dataframe !!
normals = {
f"facet_{row.facet_id}": np.array([row["n0"], row["n1"], row["n2"]])
for j, row in self.field_data.iterrows()
}
# Update legend
legend: List[str] = []
for e in normals.keys():
legend = legend + [" ".join(str("{:.2f}".format(e)) for e in normals[e])]
self.field_data["legend"] = legend
def set_rotation_matrix(
self,
u0: np.ndarray,
v0: np.ndarray,
w0: np.ndarray,
u: np.ndarray,
v: np.ndarray,
) -> None:
"""
Define the rotation matrix.
u and v should be the vectors perpendicular to two facets. The rotation matrix
is then used if the argument rotate_particle is set to True in the method
load_vtk.
:param u0: numpy.ndarray, shape (3,)
:param v0: numpy.ndarray, shape (3,)
:param w0: numpy.ndarray, shape (3,)
:param u: numpy.ndarray, shape (3,)
:param v: numpy.ndarray, shape (3,)
"""
# Check parameters
valid.valid_ndarray(arrays=(u0, v0, w0, u, v), shape=(3,))
# Input theoretical values for three facets' normals
self.u0 = u0
self.v0 = v0
self.w0 = w0
print("Cross product of u0 and v0:", np.cross(self.u0, self.v0))
# Current values for the first two facets' normals,
# to compute the rotation matrix
self.u = u
self.v = v
self.norm_u = self.u / np.linalg.norm(self.u)
self.norm_v = self.v / np.linalg.norm(self.v)
self.norm_w = np.cross(self.norm_u, self.norm_v)
print("Normalized cross product of u and v:", self.norm_w)
# Transformation matrix
tensor0 = np.array([self.u0, self.v0, self.w0])
tensor1 = np.array([self.norm_u, self.norm_v, self.norm_w])
inv_tensor1 = np.linalg.inv(tensor1)
self.rotation_matrix = np.dot(np.transpose(tensor0), np.transpose(inv_tensor1))
def rotate_particle(self) -> None:
"""
Rotate the nanocrystal.
The rotation is so that the base of the normals to the facets is computed with
the new rotation matrix.
"""
# Get normals, again to make sure that we have the good ones
normals = {
f"facet_{row.facet_id}": np.array([row["n0"], row["n1"], row["n2"]])
for j, row in self.field_data.iterrows()
if row.facet_id != 0
}
try:
for e in normals.keys():
normals[e] = np.dot(self.rotation_matrix, normals[e])
except AttributeError:
print(
"""You need to define the rotation matrix first if you want to rotate
the particle. Please choose vectors from the normals in field data"""
)
# Save the new normals
for k, v in normals.items():
# we make sure that we use the same facets !!
mask = self.field_data["facet_id"] == int(k.split("facet_")[-1])
self.field_data.loc[mask, "n0"] = v[0]
self.field_data.loc[mask, "n1"] = v[1]
self.field_data.loc[mask, "n2"] = v[2]
# Update legend
self.field_data.loc[mask, "legend"] = " ".join(
["{:.2f}".format(e) for e in v]
)
def fixed_reference(
self,
hkl_reference: Tuple[float, float, float] = (1, 1, 1),
plot: bool = True,
) -> None:
"""
Compute the interplanar angles between each normal and a fixed reference vector.
:param hkl_reference: tuple of three real numbers, reference crystallographic
direction
:param plot: True to see plots
"""
# Check parameters
valid.valid_container(
hkl_reference,
container_types=(tuple, list),
item_types=(int, float),
length=3,
name="hkl_reference",
)
valid.valid_item(plot, allowed_types=bool, name="plot")
self.hkl_reference = hkl_reference
self.hkls = " ".join(str(e) for e in self.hkl_reference)
self.planar_dist = self.lattice / np.sqrt(
self.hkl_reference[0] ** 2
+ self.hkl_reference[1] ** 2
+ self.hkl_reference[2] ** 2
)
self.ref_normal = self.hkl_reference / np.linalg.norm(self.hkl_reference)
# Get normals, again to make sure that we have the good ones
normals = {
f"facet_{row.facet_id}": np.array([row["n0"], row["n1"], row["n2"]])
for j, row in self.field_data.iterrows()
if row.facet_id != 0
}
# Interplanar angle recomputed from a fixed reference plane,
# between the experimental facets
new_angles = [np.nan]
for e in normals.keys():
value = np.rad2deg(
np.arccos(
np.dot(self.ref_normal, normals[e] / np.linalg.norm(normals[e]))
)
)
new_angles.append(value)
# Convert nan to zeros
mask = np.isnan(new_angles)
for j, m in enumerate(mask):
if m:
new_angles[j] = 0
self.field_data["interplanar_angles"] = new_angles
# Save angles for indexation, using facets that we should see or
# usually see on Pt nanoparticles (WK form)
expected_normals = [
[1, 0, 0],
[-1, 0, 0],
[1, 1, 0],
[-1, 1, 0],
[-1, -1, 0],
[1, -1, 1],
[-1, 1, -1],
[2, 1, 0],
[1, 1, 3],
[1, -1, 3],
[1, -1, -3],
[-1, -1, 3],
[1, 1, -3],
[-1, -1, -3],
[1, 1, 5],
[1, -1, 5],
[1, -1, -5],
[-1, -1, 5],
[1, 1, -5],
[-1, -1, -5],
]
# Stores the theoretical angles between normals
self.theoretical_angles = {}
for n in expected_normals:
self.theoretical_angles[str(n)] = np.rad2deg(
np.arccos(np.dot(self.ref_normal, n / np.linalg.norm(n)))
)
# Make a plot
if plot is True:
fig = plt.figure(figsize=(10, 6))
ax = fig.add_subplot(1, 1, 1)
ax.set_title(
"Interplanar angles between [111] and other possible facets",
fontsize=self.title_fontsize,
)
# Default value is red
for norm, (norm_str, angle) in zip(
expected_normals, self.theoretical_angles.items()
):
# add colors ass a fct of multiplicity
if [abs(x) for x in norm] == [1, 1, 1]:
color = "#7fc97f"
elif [abs(x) for x in norm] == [1, 1, 0]:
color = "#beaed4"
elif [abs(x) for x in norm] == [1, 0, 0]:
color = "#fdc086"
elif [abs(x) for x in norm] == [2, 1, 0]:
color = "#f0027f"
elif [abs(x) for x in norm] == [1, 1, 3]:
color = "#386cb0"
elif [abs(x) for x in norm] == [1, 1, 5]:
color = "k"
else:
color = "r"
ax.scatter(angle, norm_str, color=color)
# Major ticks every 20, minor ticks every 5
major_ticks = np.arange(0, 180, 20)
minor_ticks = np.arange(0, 180, 5)
ax.set_xticks(major_ticks)
ax.set_xticks(minor_ticks, minor=True)
# ax.set_yticks(major_ticks)
# ax.set_yticks(minor_ticks, minor=True)
# Or if you want different settings for the grids:
ax.grid(which="minor", alpha=0.2)
ax.grid(which="major", alpha=0.5)
plt.show()
def test_vector(self, vec: np.ndarray) -> None:
"""
Computes value of a vector passed through the rotation matrix.
:param vec: numpy ndarray of shape (3,).
e.g. np.array([-0.833238, -0.418199, -0.300809])
"""
# Check parameter
valid.valid_ndarray(vec, shape=(3,), name="vec")
try:
print(np.dot(self.rotation_matrix, vec / np.linalg.norm(vec)))
except AttributeError:
print("You need to define the rotation matrix before")
except TypeError:
print("You need to define the rotation matrix before")
def extract_facet(
self,
facet_id: int,
plot: bool = False,
elev: int = 0,
azim: int = 0,
output: bool = True,
save: bool = True,
) -> Union[Dict, None]:
"""
Extract data from one facet.
It extracts the facet direction [x, y, z], the strain component, the
displacement and their means, and also plots it.
:param facet_id: id of facet in paraview
:param plot: True to see plots:
:param elev: elevation angle in the z plane (in degrees).
:param azim: azimuth angle in the (x, y) plane (in degrees).
:param output: True to return facet data
:param save: True to save plot
"""
# Check parameters
valid.valid_item(facet_id, allowed_types=int, name="facet_id")
valid.valid_item(elev, allowed_types=int, name="elev")
valid.valid_item(azim, allowed_types=int, name="azim")
valid.valid_item(plot, allowed_types=bool, name="plot")
valid.valid_item(output, allowed_types=bool, name="output")
valid.valid_item(save, allowed_types=bool, name="save")
# Retrieve voxels that correspond to that facet index
voxel_indices = []
if self.vtk_data is None:
raise ValueError("vtk_data undefined, run load_vtk() first")
for i, _ in enumerate(self.vtk_data["facet_id"]):
if int(self.vtk_data["facet_id"][i]) == facet_id:
voxel_indices.append(self.vtk_data["x0"][i])
voxel_indices.append(self.vtk_data["y0"][i])
voxel_indices.append(self.vtk_data["z0"][i])
#
voxel_indices_new = list(set(voxel_indices))
results = {
"x": np.zeros(len(voxel_indices_new)),
"y": np.zeros(len(voxel_indices_new)),
"z": np.zeros(len(voxel_indices_new)),
"strain": np.zeros(len(voxel_indices_new)),
"disp": np.zeros(len(voxel_indices_new)),
}
for j, _ in enumerate(voxel_indices_new):
results["x"][j] = self.vtk_data["x"][int(voxel_indices_new[j])]
results["y"][j] = self.vtk_data["y"][int(voxel_indices_new[j])]
results["z"][j] = self.vtk_data["z"][int(voxel_indices_new[j])]
results["strain"][j] = self.vtk_data["strain"][int(voxel_indices_new[j])]
results["disp"][j] = self.vtk_data["disp"][int(voxel_indices_new[j])]
results["strain_mean"] = np.mean(results["strain"])
results["strain_std"] = np.std(results["strain"])
results["disp_mean"] = np.mean(results["disp"])
results["disp_std"] = np.std(results["disp"])
# plot single result
if plot:
fig = plt.figure(figsize=(10, 10))
ax = fig.add_subplot(projection="3d")
ax.view_init(elev=elev, azim=azim)
ax.scatter(
self.vtk_data["x"],
self.vtk_data["y"],
self.vtk_data["z"],
s=0.2,
antialiased=True,
depthshade=True,
)
ax.scatter(
results["x"],
results["y"],
results["z"],
s=50,
c=results["strain"],
cmap=self.cmap,
vmin=-0.025,
vmax=0.025,
antialiased=True,
depthshade=True,
)
plt.tick_params(axis="both", which="major", labelsize=self.ticks_fontsize)
plt.tick_params(axis="both", which="minor", labelsize=self.ticks_fontsize)
plt.title(f"Strain for facet n°{facet_id}", fontsize=self.title_fontsize)
plt.tight_layout()
if save:
plt.savefig(
f"{self.pathsave}facet_n°{facet_id}.png", bbox_inches="tight"
)
plt.show()
plt.close()
try:
row = self.field_data.loc[self.field_data["facet_id"] == facet_id]
n0 = row.n0.values[0]
n1 = row.n1.values[0]
n2 = row.n2.values[0]
n = np.array([n0, n1, n2])
print(f"Facet normal: {np.round(n, 2)}")
except IndexError:
pass # we are on the corners and edges
except Exception as e:
raise e
# pass
if not output:
return None
return results
def view_particle(
self,
facet_id_range: Tuple[int, int],
elev_axis: str,
show_edges_corners: bool,
elev: int = 0,
azim: int = 0,
) -> None:
"""
Visualization of the nanocrystal.
x, y and z correspond to the frame used in paraview before saving the facet
analyser plugin data.
:param elev: elevation angle in the z plane (in degrees).
:param azim: azimuth angle in the (x, y) plane (in degrees).
:param facet_id_range: tuple of two facets numbers, facets with numbers between
these two values will be plotted (higher boundary is excluded)
:param elev_axis: "x", "y" or "z"
:param show_edges_corners: set it to True to plot also edges and corners
"""
# Check some parameters
valid.valid_container(
facet_id_range,
container_types=(tuple, list),
item_types=int,
length=2,
min_included=0,
name="facet_id_range",
)
valid.valid_item(elev, allowed_types=int, name="elev")
valid.valid_item(azim, allowed_types=int, name="azim")
valid.valid_item(
show_edges_corners, allowed_types=bool, name="show_edges_corners"
)
if elev_axis not in {"x", "y", "z"}:
raise ValueError(f"unsupported value for 'elev_axis': {elev_axis}")
plt.close()
fig = plt.figure(figsize=(15, 15))
ax = fig.add_subplot(projection="3d")
ax.view_init(elev, azim)
ax.set_xlabel("X axis", fontsize=self.axes_fontsize)
ax.set_ylabel("Y axis", fontsize=self.axes_fontsize)
ax.set_zlabel("Z axis", fontsize=self.axes_fontsize)
def plot_facet_id(facet_id: int) -> None:
"""
Plot the voxels belonging to a specific facet.
It plots together the normal to that facet and it's id.
:param facet_id: number of the facet
"""
# Retrieve voxels for each facet
voxel_indices = []
if self.vtk_data is None:
raise ValueError("vtk_data undefined, run load_vtk() first")
for idx, _ in enumerate(self.vtk_data["facet_id"]):
if int(self.vtk_data["facet_id"][idx]) == facet_id:
voxel_indices.append(self.vtk_data["x0"][idx])
voxel_indices.append(self.vtk_data["y0"][idx])
voxel_indices.append(self.vtk_data["z0"][idx])
# Delete doubles
voxel_indices_new = list(set(voxel_indices))
results = {
"x": np.zeros(len(voxel_indices_new)),
"y": np.zeros(len(voxel_indices_new)),
"z": np.zeros(len(voxel_indices_new)),
"facet_id": np.zeros(len(voxel_indices_new)),
}
for idx, _ in enumerate(voxel_indices_new):
results["x"][idx] = self.vtk_data["x"][int(voxel_indices_new[idx])]
results["y"][idx] = self.vtk_data["y"][int(voxel_indices_new[idx])]
results["z"][idx] = self.vtk_data["z"][int(voxel_indices_new[idx])]
results["facet_id"][idx] = facet_id
# Plot all the voxels with the color of their facet
if elev_axis == "z":
ax.scatter(
results["x"],
results["y"],
results["z"],
s=50,
c=results["facet_id"],
cmap=self.particle_cmap,
vmin=facet_id_range[0],
vmax=facet_id_range[1],
antialiased=True,
depthshade=True,
)
if elev_axis == "x":
ax.scatter(
results["y"],
results["z"],
results["x"],
s=50,
c=results["facet_id"],
cmap=self.particle_cmap,
vmin=facet_id_range[0],
vmax=facet_id_range[1],
antialiased=True,
depthshade=True,
)
if elev_axis == "y":
ax.scatter(
results["z"],
results["x"],
results["y"],
s=50,
c=results["facet_id"],
cmap=self.particle_cmap,
vmin=facet_id_range[0],
vmax=facet_id_range[1],
antialiased=True,
depthshade=True,
)
# Plot the normal to each facet at their center,
# do it after so that is it the top layer
row = self.field_data.loc[self.field_data["facet_id"] == facet_id]
if facet_id != 0:
if elev_axis == "x":
# Normal
n = np.array([row.n1.values[0], row.n2.values[0], row.n0.values[0]])
# Center of mass
com = np.array(
[row.c1.values[0], row.c2.values[0], row.c0.values[0]]
)
elif elev_axis == "y":
# Normal
n = np.array([row.n2.values[0], row.n0.values[0], row.n1.values[0]])
# Center of mass
com = np.array(
[row.c2.values[0], row.c0.values[0], row.c1.values[0]]
)
else: # "z":
# Normal
n = np.array([row.n0.values[0], row.n1.values[0], row.n2.values[0]])
# Center of mass
com = np.array(
[row.c0.values[0], row.c1.values[0], row.c2.values[0]]
)
n_str = str(facet_id) + str(n.round(2).tolist())
ax.text(com[0], com[1], com[2], n_str, color="red", fontsize=20)
for i in range(facet_id_range[0], facet_id_range[1]):
plot_facet_id(i)
if show_edges_corners:
plot_facet_id(0)
plt.tick_params(axis="both", which="major", labelsize=self.ticks_fontsize)
plt.tick_params(axis="both", which="minor", labelsize=self.ticks_fontsize)
ax.set_title("Particle voxels", fontsize=self.title_fontsize)
plt.tight_layout()
plt.show()
def plot_strain(
self,
figsize: Tuple[float, float] = (12, 10),
elev: int = 0,
azim: int = 0,
save: bool = True,
) -> None:
"""
Plot two views of the surface strain of the nanocrystal.
The first one with the surface coloured by the mean strain per facet. The second
one with the surface coloured by the strain per voxel.
:param figsize: figure size in inches (width, height)
:param elev: elevation angle in the z plane (in degrees).
:param azim: azimuth angle in the (x, y) plane (in degrees).
:param save: True to save the figures
"""
# Check parameters
valid.valid_container(
figsize,
container_types=(tuple, list),
item_types=(int, float),
length=2,
min_included=0,
name="figsize",
)
valid.valid_item(elev, allowed_types=int, name="elev")
valid.valid_item(azim, allowed_types=int, name="azim")
valid.valid_item(save, allowed_types=bool, name="save")
if self.nb_facets is None:
raise ValueError("nb_facets is None, run load_vtk() first")
# 3D strain
p = None
fig_name = (
"strain_3D_" + self.hkls + self.comment + "_" + str(self.strain_range)
)
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(projection="3d")
for ind in range(1, self.nb_facets):
results = self.extract_facet(ind, plot=False)
if results is not None:
p = ax.scatter(
results["x"],
results["y"],
results["z"],
s=50,
c=results["strain"],
cmap=self.cmap,
vmin=-self.strain_range,
vmax=self.strain_range,
antialiased=True,
depthshade=True,
)
fig.colorbar(p)
ax.view_init(elev=elev, azim=azim)
plt.title("Strain for each voxel", fontsize=self.title_fontsize)
ax.tick_params(axis="both", which="major", labelsize=self.ticks_fontsize)
ax.tick_params(axis="both", which="minor", labelsize=self.ticks_fontsize)
if save:
plt.savefig(self.pathsave + fig_name + ".png", bbox_inches="tight")
plt.show()
# Average strain
fig_name = (
"strain_3D_avg_"
+ self.hkls
+ self.comment
+ "_"
+ str(self.strain_range_avg)
)
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(projection="3d")
for ind in range(1, self.nb_facets):
results = self.extract_facet(ind, plot=False)
if results is not None:
strain_mean_facet =
|
np.zeros(results["strain"].shape)
|
numpy.zeros
|
#!/usr/bin/env python
# stdlib imports
import os.path
import sys
import time as time
import copy
import numpy as np
import openquake.hazardlib.geo as geo
from openquake.hazardlib.geo import point
from impactutils.time.ancient_time import HistoricTime
from impactutils.vectorutils.vector import Vector
from impactutils.vectorutils.ecef import ecef2latlon
from shakelib.rupture.origin import Origin
from shakelib.rupture.quad_rupture import QuadRupture
from shakelib.directivity.bayless2013 import Bayless2013
homedir = os.path.dirname(os.path.abspath(__file__)) # where is this script?
shakedir = os.path.abspath(os.path.join(homedir, '..', '..', '..'))
sys.path.insert(0, shakedir)
def test_ss3():
magnitude = 7.2
dip = np.array([90])
rake = 180.0
width = np.array([15])
rupx = np.array([0, 0])
rupy = np.array([0, 80])
zp = np.array([0])
epix = np.array([0])
epiy = np.array([0.2 * rupy[1]])
# Convert to lat/lon
proj = geo.utils.get_orthographic_projection(-122, -120, 39, 37)
tlon, tlat = proj(rupx, rupy, reverse=True)
epilon, epilat = proj(epix, epiy, reverse=True)
# Origin:
origin = Origin({'lat': epilat[0],
'lon': epilon[0],
'depth': 10,
'mag': magnitude,
'eventsourcecode': 'ss3',
'rake': rake})
rup = QuadRupture.fromTrace(
np.array([tlon[0]]), np.array([tlat[0]]),
np.array([tlon[1]]), np.array([tlat[1]]),
zp, width, dip, origin, reference='ss3')
x = np.linspace(-60, 60, 21)
y = np.linspace(-60, 138, 34)
site_x, site_y = np.meshgrid(x, y)
slon, slat = proj(site_x, site_y, reverse=True)
deps = np.zeros_like(slon)
test1 = Bayless2013(origin, rup, slat, slon, deps, T=1.0)
# Test fd
fd = test1.getFd()
fd_test = np.array(
[[0.00000000e+00, 0.00000000e+00, 2.14620746e-03,
6.47899336e-03, 1.23119791e-02, 1.91676140e-02,
2.64009788e-02, 3.32427846e-02, 3.88863288e-02,
4.26104002e-02, 4.39120296e-02, 4.26104002e-02,
3.88863288e-02, 3.32427846e-02, 2.64009788e-02,
1.91676140e-02, 1.23119791e-02, 6.47899336e-03,
2.14620746e-03, 0.00000000e+00, 0.00000000e+00],
[0.00000000e+00, 8.57780996e-04, 3.99405791e-03,
9.31948105e-03, 1.65406113e-02, 2.51316805e-02,
3.43205435e-02, 4.31274592e-02, 5.04747209e-02,
5.53634169e-02, 5.70796092e-02, 5.53634169e-02,
5.04747209e-02, 4.31274592e-02, 3.43205435e-02,
2.51316805e-02, 1.65406113e-02, 9.31948105e-03,
3.99405791e-03, 8.57780996e-04, 0.00000000e+00],
[-7.32594549e-04, 1.80425497e-04, 3.76908220e-03,
1.00175179e-02, 1.86854835e-02, 2.92291145e-02,
4.07487277e-02, 5.20057177e-02, 6.15509770e-02,
6.79776087e-02, 7.02477931e-02, 6.79776087e-02,
6.15509770e-02, 5.20057177e-02, 4.07487277e-02,
2.92291145e-02, 1.86854835e-02, 1.00175179e-02,
3.76908220e-03, 1.80425497e-04, -7.32594549e-04],
[-3.29238561e-03, -2.60643191e-03, 1.16635260e-03,
8.15185259e-03, 1.82290773e-02, 3.08983182e-02,
4.51608038e-02, 5.94769126e-02, 7.18919113e-02,
8.03888307e-02, 8.34165399e-02, 8.03888307e-02,
7.18919113e-02, 5.94769126e-02, 4.51608038e-02,
3.08983182e-02, 1.82290773e-02, 8.15185259e-03,
1.16635260e-03, -2.60643191e-03, -3.29238561e-03],
[-7.68543266e-03, -7.63179286e-03, -4.08866637e-03,
3.27605236e-03, 1.45558215e-02, 2.94068040e-02,
4.68176355e-02, 6.49397159e-02, 7.72066272e-02,
8.50445368e-02, 8.77974692e-02, 8.50445368e-02,
7.72066272e-02, 6.49397159e-02, 4.68176355e-02,
2.94068040e-02, 1.45558215e-02, 3.27605236e-03,
-4.08866637e-03, -7.63179286e-03, -7.68543266e-03],
[-1.38078234e-02, -1.49011067e-02, -1.21731364e-02,
-5.02168047e-03, 6.98177526e-03, 2.38268531e-02,
4.30419205e-02, 6.00041964e-02, 7.44541603e-02,
8.42939552e-02, 8.77989590e-02, 8.42939552e-02,
7.44541603e-02, 6.00041964e-02, 4.30419205e-02,
2.38268531e-02, 6.98177526e-03, -5.02168047e-03,
-1.21731364e-02, -1.49011067e-02, -1.38078234e-02],
[-2.13780396e-02, -2.42165379e-02, -2.30613142e-02,
-1.70011475e-02, -5.15036128e-03, 1.25885635e-02,
3.24536739e-02, 5.25619351e-02, 7.05100243e-02,
8.31900906e-02, 8.78003567e-02, 8.31900906e-02,
7.05100243e-02, 5.25619351e-02, 3.24536739e-02,
1.25885635e-02, -5.15036128e-03, -1.70011475e-02,
-2.30613142e-02, -2.42165379e-02, -2.13780396e-02],
[-2.98882710e-02, -3.50862342e-02, -3.63793490e-02,
-3.25716319e-02, -2.22546618e-02, -3.59274163e-03,
1.83064517e-02, 4.20112440e-02, 6.46115966e-02,
8.14746164e-02, 8.78016623e-02, 8.14746164e-02,
6.46115966e-02, 4.20112440e-02, 1.83064517e-02,
-3.59274163e-03, -2.22546618e-02, -3.25716319e-02,
-3.63793490e-02, -3.50862342e-02, -2.98882710e-02],
[-3.85810679e-02, -4.66488633e-02, -5.12430987e-02,
-5.10089462e-02, -4.20856023e-02, -2.36905234e-02,
-6.33876287e-04, 2.66765430e-02, 5.53289928e-02,
7.86066125e-02, 8.78028757e-02, 7.86066125e-02,
5.53289928e-02, 2.66765430e-02, -6.33876287e-04,
-2.36905234e-02, -4.20856023e-02, -5.10089462e-02,
-5.12430987e-02, -4.66488633e-02, -3.85810679e-02],
[-4.64803335e-02, -5.76615888e-02, -6.61458422e-02,
-7.06512643e-02, -6.38427394e-02, -4.77258398e-02,
-2.55483969e-02, 4.05840724e-03, 3.98470070e-02,
7.33053399e-02, 8.78039969e-02, 7.33053399e-02,
3.98470070e-02, 4.05840724e-03, -2.55483969e-02,
-4.77258398e-02, -6.38427394e-02, -7.06512643e-02,
-6.61458422e-02, -5.76615888e-02, -4.64803335e-02],
[-5.25038299e-02, -6.66129442e-02, -7.90147081e-02,
-8.87629178e-02, -8.59653118e-02, -7.42828398e-02,
-5.64316505e-02, -2.87083225e-02, 1.25945312e-02,
6.19971667e-02, 8.78050260e-02, 6.19971667e-02,
1.25945312e-02, -2.87083225e-02, -5.64316505e-02,
-7.42828398e-02, -8.59653118e-02, -8.87629178e-02,
-7.90147081e-02, -6.66129442e-02, -5.25038299e-02],
[-5.69779111e-02, -7.36791817e-02, -8.97495345e-02,
-1.04799583e-01, -1.07737239e-01, -1.02875880e-01,
-9.46568471e-02, -7.95630162e-02, -4.96285112e-02,
6.59954795e-03, 5.25569882e-02, 6.59954795e-03,
-4.96285112e-02, -7.95630162e-02, -9.46568471e-02,
-1.02875880e-01, -1.07737239e-01, -1.04799583e-01,
-8.97495345e-02, -7.36791817e-02, -5.69779111e-02],
[-5.90357675e-02, -7.69727119e-02, -9.48442826e-02,
-1.12607620e-01, -1.18744885e-01, -1.18201834e-01,
-1.17217017e-01, -1.15152899e-01, -1.09694433e-01,
-8.82341332e-02, -1.61624035e-02, -8.82341332e-02,
-1.09694433e-01, -1.15152899e-01, -1.17217017e-01,
-1.18201834e-01, -1.18744885e-01, -1.12607620e-01,
-9.48442826e-02, -7.69727119e-02, -5.90357675e-02],
[-5.92189452e-02, -7.72680305e-02, -9.53051857e-02,
-1.13322519e-01, -1.19770917e-01, -1.19670660e-01,
-1.19486798e-01, -1.19092639e-01, -1.17989113e-01,
-1.12555820e-01, -4.50009776e-02, -1.12555820e-01,
-1.17989113e-01, -1.19092639e-01, -1.19486798e-01,
-1.19670660e-01, -1.19770917e-01, -1.13322519e-01,
-9.53051857e-02, -7.72680305e-02, -5.92189452e-02],
[-5.79249958e-02, -7.51927112e-02, -9.20842554e-02,
-1.08361430e-01, -1.12722790e-01, -1.09732675e-01,
-1.04531672e-01, -9.44729544e-02, -7.23277773e-02,
-2.05699911e-02, 3.58249631e-02, -2.05699911e-02,
-7.23277773e-02, -9.44729544e-02, -1.04531672e-01,
-1.09732675e-01, -1.12722790e-01, -1.08361430e-01,
-9.20842554e-02, -7.51927112e-02, -5.79249958e-02],
[-5.42527703e-02, -6.93641123e-02, -8.31684773e-02,
-9.49114165e-02, -9.41989454e-02, -8.48645354e-02,
-7.00894708e-02, -4.58286259e-02, -6.37563061e-03,
4.68887998e-02, 7.77968419e-02, 4.68887998e-02,
-6.37563061e-03, -4.58286259e-02, -7.00894708e-02,
-8.48645354e-02, -9.41989454e-02, -9.49114165e-02,
-8.31684773e-02, -6.93641123e-02, -5.42527703e-02],
[-4.82490057e-02, -5.99997941e-02, -6.91786120e-02,
-7.44891242e-02, -6.73705808e-02, -5.13001284e-02,
-2.84188057e-02, 3.60143816e-03, 4.47470123e-02,
8.58663851e-02, 1.04548354e-01, 8.58663851e-02,
4.47470123e-02, 3.60143816e-03, -2.84188057e-02,
-5.13001284e-02, -6.73705808e-02, -7.44891242e-02,
-6.91786120e-02, -5.99997941e-02, -4.82490057e-02],
[-4.03203010e-02, -4.79063206e-02, -5.16352259e-02,
-4.98707253e-02, -3.67295509e-02, -1.57342058e-02,
1.13668830e-02, 4.46551184e-02, 8.10450840e-02,
1.11780747e-01, 1.24226598e-01, 1.11780747e-01,
8.10450840e-02, 4.46551184e-02, 1.13668830e-02,
-1.57342058e-02, -3.67295509e-02, -4.98707253e-02,
-5.16352259e-02, -4.79063206e-02, -4.03203010e-02],
[-3.10250239e-02, -3.40796094e-02, -3.22089254e-02,
-2.37094100e-02, -5.85463114e-03, 1.77402761e-02,
4.57786845e-02, 7.69637052e-02, 1.07537652e-01,
1.30906328e-01, 1.39800436e-01, 1.30906328e-01,
1.07537652e-01, 7.69637052e-02, 4.57786845e-02,
1.77402761e-02, -5.85463114e-03, -2.37094100e-02,
-3.22089254e-02, -3.40796094e-02, -3.10250239e-02],
[-2.09301700e-02, -1.94475962e-02, -1.22970199e-02,
2.07296407e-03, 2.31516868e-02, 4.74574033e-02,
7.44743481e-02, 1.02380049e-01, 1.27776301e-01,
1.46003379e-01, 1.52690015e-01, 1.46003379e-01,
1.27776301e-01, 1.02380049e-01, 7.44743481e-02,
4.74574033e-02, 2.31516868e-02, 2.07296407e-03,
-1.22970199e-02, -1.94475962e-02, -2.09301700e-02],
[-1.05257992e-02, -4.74329696e-03, 7.12107274e-03,
2.63431361e-02, 4.93709790e-02, 7.31527220e-02,
9.82233938e-02, 1.22728059e-01, 1.43894925e-01,
1.58465026e-01, 1.63685984e-01, 1.58465026e-01,
1.43894925e-01, 1.22728059e-01, 9.82233938e-02,
7.31527220e-02, 4.93709790e-02, 2.63431361e-02,
7.12107274e-03, -4.74329696e-03, -1.05257992e-02],
[-1.89098657e-04, 9.52392382e-03, 2.54577716e-02,
4.85730869e-02, 7.26048516e-02, 9.51726659e-02,
1.17988523e-01, 1.39380421e-01, 1.57176612e-01,
1.69076915e-01, 1.73274075e-01, 1.69076915e-01,
1.57176612e-01, 1.39380421e-01, 1.17988523e-01,
9.51726659e-02, 7.26048516e-02, 4.85730869e-02,
2.54577716e-02, 9.52392382e-03, -1.89098657e-04],
[9.81732797e-03, 2.30419581e-02, 4.24234701e-02,
6.86213308e-02, 9.30164618e-02, 1.14050063e-01,
1.34620894e-01, 1.53304069e-01, 1.68420867e-01,
1.78321253e-01, 1.81774183e-01, 1.78321253e-01,
1.68420867e-01, 1.53304069e-01, 1.34620894e-01,
1.14050063e-01, 9.30164618e-02, 6.86213308e-02,
4.24234701e-02, 2.30419581e-02, 9.81732797e-03],
[1.93290725e-02, 3.56493099e-02, 5.79271157e-02,
8.65611122e-02, 1.10914315e-01, 1.30317702e-01,
1.48798006e-01, 1.65173224e-01, 1.78147031e-01,
1.86513895e-01, 1.89408199e-01, 1.86513895e-01,
1.78147031e-01, 1.65173224e-01, 1.48798006e-01,
1.30317702e-01, 1.10914315e-01, 8.65611122e-02,
5.79271157e-02, 3.56493099e-02, 1.93290725e-02],
[2.68168937e-02, 4.52356810e-02, 6.92261217e-02,
9.89630241e-02, 1.23093435e-01, 1.40640067e-01,
1.56998943e-01, 1.71215219e-01, 1.82297185e-01,
1.89360704e-01, 1.91789146e-01, 1.89360704e-01,
1.82297185e-01, 1.71215219e-01, 1.56998943e-01,
1.40640067e-01, 1.23093435e-01, 9.89630241e-02,
6.92261217e-02, 4.52356810e-02, 2.68168937e-02],
[3.19403269e-02, 5.15051953e-02, 7.61032066e-02,
1.05705197e-01, 1.31722206e-01, 1.47466588e-01,
1.61892450e-01, 1.74235616e-01, 1.83735386e-01,
1.89735533e-01, 1.91788616e-01, 1.89735533e-01,
1.83735386e-01, 1.74235616e-01, 1.61892450e-01,
1.47466588e-01, 1.31722206e-01, 1.05705197e-01,
7.61032066e-02, 5.15051953e-02, 3.19403269e-02],
[3.48604070e-02, 5.49292382e-02, 7.94274234e-02,
1.08149011e-01, 1.38923419e-01, 1.53070440e-01,
1.65849067e-01, 1.76646162e-01, 1.84871647e-01,
1.90029617e-01, 1.91787948e-01, 1.90029617e-01,
1.84871647e-01, 1.76646162e-01, 1.65849067e-01,
1.53070440e-01, 1.38923419e-01, 1.08149011e-01,
7.94274234e-02, 5.49292382e-02, 3.48604070e-02],
[3.53402022e-02, 5.53653759e-02, 7.91965502e-02,
1.06486934e-01, 1.36563003e-01, 1.57713955e-01,
1.69087164e-01, 1.78598269e-01, 1.85784340e-01,
1.90264452e-01, 1.91787141e-01, 1.90264452e-01,
1.85784340e-01, 1.78598269e-01, 1.69087164e-01,
1.57713955e-01, 1.36563003e-01, 1.06486934e-01,
7.91965502e-02, 5.53653759e-02, 3.53402022e-02],
[3.32889822e-02, 5.28319225e-02, 7.55769079e-02,
1.01077605e-01, 1.28592068e-01, 1.57023616e-01,
1.71766715e-01, 1.80199729e-01, 1.86528091e-01,
1.90454829e-01, 1.91786196e-01, 1.90454829e-01,
1.86528091e-01, 1.80199729e-01, 1.71766715e-01,
1.57023616e-01, 1.28592068e-01, 1.01077605e-01,
7.55769079e-02, 5.28319225e-02, 3.32889822e-02],
[2.87295370e-02, 4.74613283e-02, 6.88388861e-02,
9.23568989e-02, 1.17254645e-01, 1.42483223e-01,
1.66695764e-01, 1.81528776e-01, 1.87141877e-01,
1.90611190e-01, 1.91785112e-01, 1.90611190e-01,
1.87141877e-01, 1.81528776e-01, 1.66695764e-01,
1.42483223e-01, 1.17254645e-01, 9.23568989e-02,
6.88388861e-02, 4.74613283e-02, 2.87295370e-02],
[2.17650266e-02, 3.94568191e-02, 5.93023344e-02,
8.07720575e-02, 1.03124482e-01, 1.25394282e-01,
1.46405870e-01, 1.64828303e-01, 1.79288925e-01,
1.88553222e-01, 1.91747252e-01, 1.88553222e-01,
1.79288925e-01, 1.64828303e-01, 1.46405870e-01,
1.25394282e-01, 1.03124482e-01, 8.07720575e-02,
5.93023344e-02, 3.94568191e-02, 2.17650266e-02],
[1.25495284e-02, 2.90572166e-02, 4.72972116e-02,
6.67423656e-02, 8.66951873e-02, 1.06290296e-01,
1.24520131e-01, 1.40293247e-01, 1.52531693e-01,
1.60303860e-01, 1.62970689e-01, 1.60303860e-01,
1.52531693e-01, 1.40293247e-01, 1.24520131e-01,
1.06290296e-01, 8.66951873e-02, 6.67423656e-02,
4.72972116e-02, 2.90572166e-02, 1.25495284e-02],
[1.26441934e-03, 1.65114811e-02, 3.31390978e-02,
5.06407706e-02, 6.83765492e-02, 8.55839448e-02,
1.01408074e-01, 1.14955639e-01, 1.25373662e-01,
1.31946425e-01, 1.34193829e-01, 1.31946425e-01,
1.25373662e-01, 1.14955639e-01, 1.01408074e-01,
8.55839448e-02, 6.83765492e-02, 5.06407706e-02,
3.31390978e-02, 1.65114811e-02, 1.26441934e-03],
[0.00000000e+00, 2.06213867e-03, 1.71162845e-02,
3.27888240e-02, 4.85026462e-02, 6.35932476e-02,
7.73387997e-02, 8.90069217e-02, 9.79166934e-02,
1.03509489e-01, 1.05416736e-01, 1.03509489e-01,
9.79166934e-02, 8.90069217e-02, 7.73387997e-02,
6.35932476e-02, 4.85026462e-02, 3.27888240e-02,
1.71162845e-02, 2.06213867e-03, 0.00000000e+00]]
)
np.testing.assert_allclose(fd, fd_test, rtol=1e-4)
def test_ss3_m6():
magnitude = 6.0
dip = np.array([90])
rake = 180.0
width = np.array([15])
rupx = np.array([0, 0])
rupy = np.array([0, 80])
zp = np.array([0])
epix = np.array([0])
epiy = np.array([0.2 * rupy[1]])
# Convert to lat/lon
proj = geo.utils.get_orthographic_projection(-122, -120, 39, 37)
tlon, tlat = proj(rupx, rupy, reverse=True)
epilon, epilat = proj(epix, epiy, reverse=True)
# Origin:
origin = Origin({'lat': epilat[0],
'lon': epilon[0],
'depth': 10,
'mag': magnitude,
'eventsourcecode': 'ss3',
'rake': rake})
rup = QuadRupture.fromTrace(
np.array([tlon[0]]), np.array([tlat[0]]),
np.array([tlon[1]]), np.array([tlat[1]]),
zp, width, dip, origin, reference='ss3')
x = np.linspace(0, 20, 6)
y = np.linspace(0, 90, 11)
site_x, site_y = np.meshgrid(x, y)
slon, slat = proj(site_x, site_y, reverse=True)
deps = np.zeros_like(slon)
test1 = Bayless2013(origin, rup, slat, slon, deps, T=1.0)
# Test fd
fd = test1.getFd()
fd_test = np.array(
[[0.05853668, 0.05032323, 0.0306438, 0.00839635, -0.01102162,
-0.02621319],
[0.01720501, -0.00687296, -0.03804823, -0.05547473, -0.0644932,
-0.06947135],
[-0.03000065, -0.07006634, -0.07708165, -0.07865941, -0.0792369,
-0.07950887],
[0.0398062, 0.02571145, -0.0018651, -0.0255418, -0.04176278,
-0.05235095],
[0.0696989, 0.06389524, 0.04890304, 0.02983134, 0.01098535,
-0.00545921],
[0.088278, 0.08511069, 0.07628596, 0.06350294, 0.04875897,
0.03373495],
[0.10179334, 0.09978475, 0.09401676, 0.0851842, 0.07422509,
0.06210369],
[0.11242209, 0.11102701, 0.10696056, 0.10055471, 0.09229027,
0.08271454],
[0.12118279, 0.12015315, 0.11712653, 0.11228058, 0.10588323,
0.09825795],
[0.12785957, 0.12706892, 0.12473264, 0.12095384, 0.11589197,
0.10974684],
[0.12785908, 0.12724852, 0.12543819, 0.12249026, 0.11850249,
0.11360047]])
np.testing.assert_allclose(
fd, fd_test, rtol=1e-4)
def test_ss3_move_hypo1():
magnitude = 7.2
dip = np.array([90])
rake = 180.0
width = np.array([15])
rupx = np.array([0, 0])
rupy = np.array([0, 80])
zp = np.array([0.0])
epix = np.array([1.0])
epiy = np.array([-1.0])
# Convert to lat/lon
proj = geo.utils.get_orthographic_projection(-122, -120, 39, 37)
tlon, tlat = proj(rupx, rupy, reverse=True)
epilon, epilat = proj(epix, epiy, reverse=True)
# Origin
origin = Origin({'lat': epilat[0],
'lon': epilon[0],
'depth': -1,
'mag': magnitude,
'eventsourcecode': 'ss3',
'rake': rake})
rup = QuadRupture.fromTrace(
np.array([tlon[0]]), np.array([tlat[0]]),
np.array([tlon[1]]), np.array([tlat[1]]),
zp, width, dip, origin, reference='ss3')
x = np.linspace(0, 20, 6)
y = np.linspace(0, 90, 11)
site_x, site_y = np.meshgrid(x, y)
slon, slat = proj(site_x, site_y, reverse=True)
deps = np.zeros_like(slon)
test1 = Bayless2013(origin, rup, slat, slon, deps, T=1.0)
phyp = copy.deepcopy(test1.phyp[0])
plat, plon, pdep = ecef2latlon(phyp.x, phyp.y, phyp.z)
px, py = proj(plon, plat, reverse=False)
np.testing.assert_allclose(plat, 38.004233219183604, rtol=1e-4)
np.testing.assert_allclose(plon, -120.98636122402166, rtol=1e-4)
np.testing.assert_allclose(pdep, 7.4999999989205968, rtol=1e-4)
# --------------------------------------------------------------------------
# Also for multiple segments
# --------------------------------------------------------------------------
dip = np.array([90., 90., 90.])
rake = 180.0
width = np.array([15., 15., 10.])
rupx = np.array([0., 0., 10., 20.])
rupy = np.array([0., 20., 60., 80.])
zp = np.array([0., 0., 0.])
epix = np.array([0.])
epiy = np.array([0.])
# Convert to lat/lon
proj = geo.utils.get_orthographic_projection(-122, -120, 39, 37)
tlon, tlat = proj(rupx, rupy, reverse=True)
epilon, epilat = proj(epix, epiy, reverse=True)
rup = QuadRupture.fromTrace(
np.array(tlon[0:3]), np.array(tlat[0:3]),
|
np.array(tlon[1:4])
|
numpy.array
|
import pyBigWig
import os
import sys
import numpy as np
import glob
def anchor (ref, ori): # input 1d np array
ref_new=ref.copy()
ref_new.sort()
ori_new=ori.copy()
ori_new[np.argsort(ori)]=ref_new[:]
return ori_new
chr_all=['chr1','chr2','chr3','chr4','chr5','chr6','chr7','chr8','chr9','chr10','chr11','chr12','chr13','chr14','chr15','chr16','chr17','chr18','chr19','chr20','chr21','chr22','chrX']
num_bp=np.array([248956422,242193529,198295559,190214555,181538259,170805979,159345973,145138636,138394717,133797422,135086622,133275309,114364328,107043718,101991189,90338345,83257441,80373285,58617616,64444167,46709983,50818468,156040895])
num_bp25=[9958257, 9687742, 7931823, 7608583, 7261531, 6832240, 6373839, 5805546, 5535789, 5351897, 5403465, 5331013, 4574574, 4281749, 4079648, 3613534, 3330298, 3214932, 2344705, 2577767, 1868400, 2032739, 6241636]
chr_len={}
for i in np.arange(len(chr_all)):
chr_len[chr_all[i]]=num_bp[i]
chr_len25={}
for i in np.arange(len(chr_all)):
chr_len25[chr_all[i]]=num_bp25[i]
# number of cells used to calculate avg
assay_all=['M01','M02','M16','M17','M18','M20','M22','M29']
tmp=[4,37,25,19,25,21,33,20]
dict_assay_count={}
for i in np.arange(len(assay_all)):
dict_assay_count[assay_all[i]]=tmp[i]
# number of models
model_all=['C_D','C_E','C_F','C_G','C_H','C_I', \
'CH_D','CH_E','CH_F','CH_G','CH_I', \
'CDEH_G','CDEH_I','DEFGHI_C', \
'DGH_C','DGH_F','DGH_I', \
'DGI_C','DGI_E','DGI_F','DGI_H', \
'F_C','F_D','F_E','F_G','F_H','F_I', \
'DGHKLMN_F','DGHKLMN_I','DGHK_F','DGHK_I','DGIK_E','DGIK_F','DGIK_H']
tmp=[15,11,15,11,22,12, \
15,11,14,11,12, \
9,9,9, \
11,18,17, \
11,15,16,17, \
15,20,16,18,20,17, \
7,6,11,11,11,10,11]
dict_model_count={}
for i in np.arange(len(model_all)):
dict_model_count[model_all[i]]=tmp[i]
path0='../data_challenge/baseline_avg_final/'
os.system('mkdir -p npy')
print(sys.argv)
id_all=sys.argv[1:]
for the_id in id_all:
print(the_id)
the_assay=the_id[3:]
the_cell=the_id[:3]
bw=pyBigWig.open(path0 + 'gold_anchored_' + the_assay + '.bigwig')
w1 = 1.0; w2 = 2.0; w3 = 1.0 # HERE weights for avg, lgbm, nn
for the_chr in chr_all:
print(the_chr)
## 1. stack
# 1.1 avg
avg = np.array(bw.values(the_chr, 0, chr_len25[the_chr]))
## 3.1 save npy
|
np.save('./npy/pred25bp_' + the_id + '_' + the_chr, avg)
|
numpy.save
|
#
# Author: <NAME>, 2002
#
from __future__ import division, print_function, absolute_import
import numpy as np
from scipy.lib.six import xrange
from numpy import pi, asarray, floor, isscalar, iscomplex, real, imag, sqrt, \
where, mgrid, cos, sin, exp, place, seterr, issubdtype, extract, \
less, vectorize, inexact, nan, zeros, sometrue, atleast_1d
from ._ufuncs import ellipkm1, mathieu_a, mathieu_b, iv, jv, gamma, psi, zeta, \
hankel1, hankel2, yv, kv, gammaln, ndtri, errprint, poch, binom
from . import _ufuncs
import types
from . import specfun
from . import orthogonal
import warnings
__all__ = ['agm', 'ai_zeros', 'assoc_laguerre', 'bei_zeros', 'beip_zeros',
'ber_zeros', 'bernoulli', 'berp_zeros', 'bessel_diff_formula',
'bi_zeros', 'clpmn', 'comb', 'digamma', 'diric', 'ellipk', 'erf_zeros',
'erfcinv', 'erfinv', 'errprint', 'euler', 'fresnel_zeros',
'fresnelc_zeros', 'fresnels_zeros', 'gamma', 'gammaln', 'h1vp',
'h2vp', 'hankel1', 'hankel2', 'hyp0f1', 'iv', 'ivp', 'jn_zeros',
'jnjnp_zeros', 'jnp_zeros', 'jnyn_zeros', 'jv', 'jvp', 'kei_zeros',
'keip_zeros', 'kelvin_zeros', 'ker_zeros', 'kerp_zeros', 'kv',
'kvp', 'lmbda', 'lpmn', 'lpn', 'lqmn', 'lqn', 'mathieu_a',
'mathieu_b', 'mathieu_even_coef', 'mathieu_odd_coef', 'ndtri',
'obl_cv_seq', 'pbdn_seq', 'pbdv_seq', 'pbvv_seq', 'perm',
'polygamma', 'pro_cv_seq', 'psi', 'riccati_jn', 'riccati_yn',
'sinc', 'sph_harm', 'sph_in', 'sph_inkn',
'sph_jn', 'sph_jnyn', 'sph_kn', 'sph_yn', 'y0_zeros', 'y1_zeros',
'y1p_zeros', 'yn_zeros', 'ynp_zeros', 'yv', 'yvp', 'zeta',
'SpecialFunctionWarning']
class SpecialFunctionWarning(Warning):
pass
warnings.simplefilter("always", category=SpecialFunctionWarning)
def sinc(x):
"""Returns sin(pi*x)/(pi*x) at all points of array x.
"""
x = asarray(x)
w = pi * x
# w might contain 0, and so temporarily turn off warnings
# while calculating sin(w)/w.
old_settings = seterr(all='ignore')
s = sin(w) / w
seterr(**old_settings)
return where(x == 0, 1.0, s)
def diric(x,n):
"""Returns the periodic sinc function, also called the Dirichlet function:
diric(x) = sin(x *n / 2) / (n sin(x / 2))
where n is a positive integer.
"""
x,n = asarray(x), asarray(n)
n = asarray(n + (x-x))
x = asarray(x + (n-n))
if issubdtype(x.dtype, inexact):
ytype = x.dtype
else:
ytype = float
y = zeros(x.shape,ytype)
mask1 = (n <= 0) | (n != floor(n))
place(y,mask1,nan)
z = asarray(x / 2.0 / pi)
mask2 = (1-mask1) & (z == floor(z))
zsub = extract(mask2,z)
nsub = extract(mask2,n)
place(y,mask2,pow(-1,zsub*(nsub-1)))
mask = (1-mask1) & (1-mask2)
xsub = extract(mask,x)
nsub = extract(mask,n)
place(y,mask,sin(nsub*xsub/2.0)/(nsub*sin(xsub/2.0)))
return y
def jnjnp_zeros(nt):
"""Compute nt (<=1200) zeros of the Bessel functions Jn and Jn'
and arange them in order of their magnitudes.
Returns
-------
zo[l-1] : ndarray
Value of the lth zero of Jn(x) and Jn'(x). Of length `nt`.
n[l-1] : ndarray
Order of the Jn(x) or Jn'(x) associated with lth zero. Of length `nt`.
m[l-1] : ndarray
Serial number of the zeros of Jn(x) or Jn'(x) associated
with lth zero. Of length `nt`.
t[l-1] : ndarray
0 if lth zero in zo is zero of Jn(x), 1 if it is a zero of Jn'(x). Of
length `nt`.
See Also
--------
jn_zeros, jnp_zeros : to get separated arrays of zeros.
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt > 1200):
raise ValueError("Number must be integer <= 1200.")
nt = int(nt)
n,m,t,zo = specfun.jdzo(nt)
return zo[1:nt+1],n[:nt],m[:nt],t[:nt]
def jnyn_zeros(n,nt):
"""Compute nt zeros of the Bessel functions Jn(x), Jn'(x), Yn(x), and
Yn'(x), respectively. Returns 4 arrays of length nt.
See jn_zeros, jnp_zeros, yn_zeros, ynp_zeros to get separate arrays.
"""
if not (isscalar(nt) and isscalar(n)):
raise ValueError("Arguments must be scalars.")
if (floor(n) != n) or (floor(nt) != nt):
raise ValueError("Arguments must be integers.")
if (nt <= 0):
raise ValueError("nt > 0")
return specfun.jyzo(abs(n),nt)
def jn_zeros(n,nt):
"""Compute nt zeros of the Bessel function Jn(x).
"""
return jnyn_zeros(n,nt)[0]
def jnp_zeros(n,nt):
"""Compute nt zeros of the Bessel function Jn'(x).
"""
return jnyn_zeros(n,nt)[1]
def yn_zeros(n,nt):
"""Compute nt zeros of the Bessel function Yn(x).
"""
return jnyn_zeros(n,nt)[2]
def ynp_zeros(n,nt):
"""Compute nt zeros of the Bessel function Yn'(x).
"""
return jnyn_zeros(n,nt)[3]
def y0_zeros(nt,complex=0):
"""Returns nt (complex or real) zeros of Y0(z), z0, and the value
of Y0'(z0) = -Y1(z0) at each zero.
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("Arguments must be scalar positive integer.")
kf = 0
kc = (complex != 1)
return specfun.cyzo(nt,kf,kc)
def y1_zeros(nt,complex=0):
"""Returns nt (complex or real) zeros of Y1(z), z1, and the value
of Y1'(z1) = Y0(z1) at each zero.
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("Arguments must be scalar positive integer.")
kf = 1
kc = (complex != 1)
return specfun.cyzo(nt,kf,kc)
def y1p_zeros(nt,complex=0):
"""Returns nt (complex or real) zeros of Y1'(z), z1', and the value
of Y1(z1') at each zero.
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("Arguments must be scalar positive integer.")
kf = 2
kc = (complex != 1)
return specfun.cyzo(nt,kf,kc)
def bessel_diff_formula(v, z, n, L, phase):
# from AMS55.
# L(v,z) = J(v,z), Y(v,z), H1(v,z), H2(v,z), phase = -1
# L(v,z) = I(v,z) or exp(v*pi*i)K(v,z), phase = 1
# For K, you can pull out the exp((v-k)*pi*i) into the caller
p = 1.0
s = L(v-n, z)
for i in xrange(1, n+1):
p = phase * (p * (n-i+1)) / i # = choose(k, i)
s += p*L(v-n + i*2, z)
return s / (2.**n)
def jvp(v,z,n=1):
"""Return the nth derivative of Jv(z) with respect to z.
"""
if not isinstance(n,int) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if n == 0:
return jv(v,z)
else:
return bessel_diff_formula(v, z, n, jv, -1)
# return (jvp(v-1,z,n-1) - jvp(v+1,z,n-1))/2.0
def yvp(v,z,n=1):
"""Return the nth derivative of Yv(z) with respect to z.
"""
if not isinstance(n,int) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if n == 0:
return yv(v,z)
else:
return bessel_diff_formula(v, z, n, yv, -1)
# return (yvp(v-1,z,n-1) - yvp(v+1,z,n-1))/2.0
def kvp(v,z,n=1):
"""Return the nth derivative of Kv(z) with respect to z.
"""
if not isinstance(n,int) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if n == 0:
return kv(v,z)
else:
return (-1)**n * bessel_diff_formula(v, z, n, kv, 1)
def ivp(v,z,n=1):
"""Return the nth derivative of Iv(z) with respect to z.
"""
if not isinstance(n,int) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if n == 0:
return iv(v,z)
else:
return bessel_diff_formula(v, z, n, iv, 1)
def h1vp(v,z,n=1):
"""Return the nth derivative of H1v(z) with respect to z.
"""
if not isinstance(n,int) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if n == 0:
return hankel1(v,z)
else:
return bessel_diff_formula(v, z, n, hankel1, -1)
# return (h1vp(v-1,z,n-1) - h1vp(v+1,z,n-1))/2.0
def h2vp(v,z,n=1):
"""Return the nth derivative of H2v(z) with respect to z.
"""
if not isinstance(n,int) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if n == 0:
return hankel2(v,z)
else:
return bessel_diff_formula(v, z, n, hankel2, -1)
# return (h2vp(v-1,z,n-1) - h2vp(v+1,z,n-1))/2.0
def sph_jn(n,z):
"""Compute the spherical Bessel function jn(z) and its derivative for
all orders up to and including n.
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n < 1):
n1 = 1
else:
n1 = n
if iscomplex(z):
nm,jn,jnp,yn,ynp = specfun.csphjy(n1,z)
else:
nm,jn,jnp = specfun.sphj(n1,z)
return jn[:(n+1)], jnp[:(n+1)]
def sph_yn(n,z):
"""Compute the spherical Bessel function yn(z) and its derivative for
all orders up to and including n.
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n < 1):
n1 = 1
else:
n1 = n
if iscomplex(z) or less(z,0):
nm,jn,jnp,yn,ynp = specfun.csphjy(n1,z)
else:
nm,yn,ynp = specfun.sphy(n1,z)
return yn[:(n+1)], ynp[:(n+1)]
def sph_jnyn(n,z):
"""Compute the spherical Bessel functions, jn(z) and yn(z) and their
derivatives for all orders up to and including n.
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n < 1):
n1 = 1
else:
n1 = n
if
|
iscomplex(z)
|
numpy.iscomplex
|
import logging
import numpy as np
import threading
import queue
import zmq
__author__ = "<NAME>"
__copyright__ = "Copyright (c) 2017, Technische Universität Berlin"
__version__ = "0.1.0"
__email__ = "<EMAIL>"
class RegMonDataReceiver(threading.Thread):
def __init__(self, publishData=False):
threading.Thread.__init__(self)
self.log = logging.getLogger("{module}.{name}".format(
module=self.__class__.__module__, name=self.__class__.__name__))
self.signalQueue = queue.Queue()
self.outQueues = []
self.publishData = publishData
self.pubPort = 56789
self.context = zmq.Context()
self.pubSocket = self.context.socket(zmq.PUB)
self.pubSocket.bind("tcp://*:%s" % self.pubPort)
def get_signal_queue(self):
return self.signalQueue
def add_signal_receiver_queue(self, queue):
self.outQueues.append(queue)
def prepare_signal(self, data):
timestamp = np.copy(data[:, [0]].astype(np.uint64))
timestamp = np.squeeze(timestamp)
channel = np.copy(data[:, [13]].astype(np.int))
channel = np.squeeze(channel)
length = np.shape(data)[0]
startTime = data[0][0]
stopTime = data[-1][0]
timeInterval = stopTime - startTime
# filter overflow data
macTimeRawData = data[:, [3]]
index = np.argmin(macTimeRawData)
# check if there is any overflow
if (index > 0):
# find all overflows in mactime
overflows = map( lambda x : x[0] > x[1], zip(macTimeRawData, macTimeRawData[1:]))
indexes = [i+1 for i,x in enumerate(overflows) if x == True]
# TODO: look for overflows in the other registers as well
splitData = np.split(data, indexes)
splitData = list(map(lambda x: np.diff(x, axis=0), splitData))
diff = np.concatenate(splitData, axis=0)
timestamp =
|
np.split(timestamp, indexes)
|
numpy.split
|
# -*- coding: utf-8 -*-
from random import randint, random
import numpy as np
from npstreams import isum, iprod, last, isub, iany, iall, prod
from npstreams import sum as nssum # avoiding name clashes
import pytest
def test_isum_trivial():
"""Test a sum of zeros"""
source = [np.zeros((16,), dtype=float) for _ in range(10)]
summed = last(isum(source))
assert np.allclose(summed, np.zeros_like(summed))
def test_isum_ignore_nans():
"""Test a sum of zeros with NaNs sprinkled"""
source = [np.zeros((16,), dtype=float) for _ in range(10)]
source.append(np.full((16,), fill_value=np.nan))
summed = last(isum(source, ignore_nan=True))
assert np.allclose(summed, np.zeros_like(summed))
def test_isum_length():
"""Test that the number of yielded elements is the same as source"""
source = [np.zeros((16,), dtype=float) for _ in range(10)]
summed = list(isum(source, axis=0))
assert 10 == len(summed)
def test_isum_dtype():
"""Test a sum of floating zeros with an int accumulator"""
source = [np.zeros((16,), dtype=float) for _ in range(10)]
summed = last(isum(source, dtype=int))
assert np.allclose(summed, np.zeros_like(summed))
assert summed.dtype == int
def test_isum_axis():
"""Test that isum(axis = 0) yields 0d arrays"""
source = [np.zeros((16,), dtype=float) for _ in range(10)]
summed = last(isum(source, axis=0))
assert np.allclose(summed, np.zeros_like(summed))
summed = last(isum(source, axis=None))
assert np.allclose(summed, 0)
def test_isum_return_shape():
"""Test that the shape of output is as expected"""
source = [np.zeros((16,), dtype=float) for _ in range(10)]
summed = last(isum(source, axis=0))
assert summed.shape == (1, 10)
@pytest.mark.parametrize("axis", (0, 1, 2, None))
def test_isum_against_numpy(axis):
"""Test that isum() returns the same as numpy.sum() for various axis inputs"""
stream = [np.random.random((16, 16)) for _ in range(10)]
stack = np.dstack(stream)
from_numpy = np.sum(stack, axis=axis)
from_isum = last(isum(stream, axis=axis))
assert np.allclose(from_isum, from_numpy)
def test_sum_trivial():
"""Test a sum of zeros"""
source = [np.zeros((16,), dtype=float) for _ in range(10)]
summed = nssum(source)
assert np.allclose(summed, np.zeros_like(summed))
def test_sum_ignore_nans():
"""Test a sum of zeros with NaNs sprinkled"""
source = [np.zeros((16,), dtype=float) for _ in range(10)]
source.append(np.full((16,), fill_value=np.nan))
summed = nssum(source, ignore_nan=True)
assert np.allclose(summed, np.zeros_like(summed))
def test_sum_dtype():
"""Test a sum of floating zeros with an int accumulator"""
source = [np.zeros((16,), dtype=float) for _ in range(10)]
summed = nssum(source, dtype=int)
assert np.allclose(summed, np.zeros_like(summed))
assert summed.dtype == int
def test_sum_axis():
"""Test that isum(axis = 0) yields 0d arrays"""
source = [np.zeros((16,), dtype=float) for _ in range(10)]
summed = nssum(source, axis=0)
assert np.allclose(summed, np.zeros_like(summed))
summed = nssum(source, axis=None)
assert np.allclose(summed, 0)
def test_sum_return_shape():
"""Test that the shape of output is as expected"""
source = [np.zeros((16,), dtype=float) for _ in range(10)]
summed = nssum(source, axis=0)
assert summed.shape == (1, 10)
@pytest.mark.parametrize("axis", (0, 1, 2, None))
def test_sum_against_numpy(axis):
"""Test that isum() returns the same as numpy.sum() for various axis inputs"""
stream = [np.random.random((16, 16)) for _ in range(10)]
stack = np.dstack(stream)
from_numpy = np.sum(stack, axis=axis)
from_sum = nssum(stream, axis=axis)
assert np.allclose(from_sum, from_numpy)
def test_iprod_trivial():
"""Test a product of ones"""
source = [np.ones((16,), dtype=float) for _ in range(10)]
product = last(iprod(source))
assert np.allclose(product, np.ones_like(product))
def test_iprod_ignore_nans():
"""Test that NaNs are ignored."""
source = [np.ones((16,), dtype=float) for _ in range(10)]
source.append(np.full_like(source[0], np.nan))
product = last(iprod(source, ignore_nan=True))
assert np.allclose(product, np.ones_like(product))
def test_iprod_dtype():
"""Test that dtype argument is working"""
source = [np.ones((16,), dtype=float) for _ in range(10)]
product = last(iprod(source, dtype=int))
assert np.allclose(product, np.ones_like(product))
assert product.dtype == int
def test_iprod_axis():
"""Test that iprod(axis = 0) yields 0d arrays"""
source = [np.ones((16,), dtype=float) for _ in range(10)]
summed = last(iprod(source, axis=0))
assert np.all(summed == 1)
summed = last(iprod(source, axis=None))
assert np.allclose(summed, np.ones_like(summed))
@pytest.mark.parametrize("axis", (0, 1, 2, None))
def test_iprod_against_numpy(axis):
"""Test that iprod() returns the same as numpy.prod() for various axis inputs"""
stream = [np.random.random((16, 16)) for _ in range(10)]
stack = np.dstack(stream)
from_numpy = np.prod(stack, axis=axis)
from_stream = last(iprod(stream, axis=axis))
assert np.allclose(from_stream, from_numpy)
def test_prod_trivial():
"""Test a product of ones"""
source = [np.ones((16,), dtype=float) for _ in range(10)]
product = prod(source)
assert np.allclose(product, np.ones_like(product))
def test_prod_ignore_nans():
"""Test that NaNs are ignored."""
source = [np.ones((16,), dtype=float) for _ in range(10)]
source.append(np.full_like(source[0], np.nan))
product = prod(source, ignore_nan=True)
assert np.allclose(product, np.ones_like(product))
def test_prod_dtype():
"""Test that dtype argument is working"""
source = [np.ones((16,), dtype=float) for _ in range(10)]
product = prod(source, dtype=int)
assert np.allclose(product,
|
np.ones_like(product)
|
numpy.ones_like
|
import networkx as nx
import numpy as np
import warnings
import itertools
from networkx.algorithms.dag import descendants
from pyparsing import OneOrMore, Word, Optional, Suppress, alphanums, nums
from pgmpy.base import DAG
from pgmpy.global_vars import HAS_PANDAS
if HAS_PANDAS:
import pandas as pd
class SEMGraph(DAG):
"""
Base class for graphical representation of Structural Equation Models(SEMs).
All variables are by default assumed to have an associated error latent variable, therefore
doesn't need to be specified.
Attributes
----------
latents: list
List of all the latent variables in the model except the error terms.
observed: list
List of all the observed variables in the model.
graph: nx.DirectedGraph
The graphical structure of the latent and observed variables except the error terms.
The parameteers are stored in the `weight` attribute of each edge.
err_graph: nx.Graph
An undirected graph representing the relations between the error terms of the model.
The node of the graph has the same name as the variable but represents the error terms.
The variance is stored in the `weight` attribute of the node and the covariance is stored
in the `weight` attribute of the edge.
full_graph_struct: nx.DiGraph
Represents the full graph structure. The names of error terms starts with `.` and
new nodes are added for each correlation which starts with `..`.
"""
def __init__(self, ebunch=[], latents=[], err_corr=[], err_var={}):
"""
Initializes a `SEMGraph` object.
Parameters
----------
ebunch: list/array-like
List of edges in form of tuples. Each tuple can be of two possible shape:
1. (u, v): This would add an edge from u to v without setting any parameter
for the edge.
2. (u, v, parameter): This would add an edge from u to v and set the edge's
parameter to `parameter`.
latents: list/array-like
List of nodes which are latent. All other variables are considered observed.
err_corr: list/array-like
List of tuples representing edges between error terms. It can be of the following forms:
1. (u, v): Add correlation between error terms of `u` and `v`. Doesn't set any variance or
covariance values.
2. (u, v, covar): Adds correlation between the error terms of `u` and `v` and sets the
parameter to `covar`.
err_var: dict (variable: variance)
Sets variance for the error terms in the model.
Examples
--------
Defining a model (Union sentiment model[1]) without setting any paramaters.
>>> from pgmpy.models import SEMGraph
>>> sem = SEMGraph(ebunch=[('deferenc', 'unionsen'), ('laboract', 'unionsen'),
... ('yrsmill', 'unionsen'), ('age', 'deferenc'),
... ('age', 'laboract'), ('deferenc', 'laboract')],
... latents=[],
... err_corr=[('yrsmill', 'age')],
... err_var={})
Defining a model (Education [2]) with all the parameters set. For not setting any
parameter `np.NaN` can be explicitly passed.
>>> sem_edu = SEMGraph(ebunch=[('intelligence', 'academic', 0.8), ('intelligence', 'scale_1', 0.7),
... ('intelligence', 'scale_2', 0.64), ('intelligence', 'scale_3', 0.73),
... ('intelligence', 'scale_4', 0.82), ('academic', 'SAT_score', 0.98),
... ('academic', 'High_school_gpa', 0.75), ('academic', 'ACT_score', 0.87)],
... latents=['intelligence', 'academic'],
... err_corr=[],
... err_var={'intelligence': 1})
References
----------
[1] <NAME>., & <NAME>. (1984). Textile Workers and Union Sentiment.
Social Forces, 63(2), 502–521
[2] https://en.wikipedia.org/wiki/Structural_equation_modeling#/
media/File:Example_Structural_equation_model.svg
"""
super(SEMGraph, self).__init__()
# Construct the graph and set the parameters.
self.graph = nx.DiGraph()
for t in ebunch:
if len(t) == 3:
self.graph.add_edge(t[0], t[1], weight=t[2])
elif len(t) == 2:
self.graph.add_edge(t[0], t[1], weight=np.NaN)
else:
raise ValueError(
f"Expected tuple length: 2 or 3. Got {t} of len {len(t)}"
)
self.latents = set(latents)
self.observed = set(self.graph.nodes()) - self.latents
# Construct the error graph and set the parameters.
self.err_graph = nx.Graph()
self.err_graph.add_nodes_from(self.graph.nodes())
for t in err_corr:
if len(t) == 2:
self.err_graph.add_edge(t[0], t[1], weight=np.NaN)
elif len(t) == 3:
self.err_graph.add_edge(t[0], t[1], weight=t[2])
else:
raise ValueError(
f"Expected tuple length: 2 or 3. Got {t} of len {len(t)}"
)
# Set the error variances
for var in self.err_graph.nodes():
self.err_graph.nodes[var]["weight"] = (
err_var[var] if var in err_var.keys() else np.NaN
)
self.full_graph_struct = self._get_full_graph_struct()
def _get_full_graph_struct(self):
"""
Creates a directed graph by joining `self.graph` and `self.err_graph`.
Adds new nodes to replace undirected edges (u <--> v) with two directed
edges (u <-- ..uv) and (..uv --> v).
Returns
-------
nx.DiGraph: A full directed graph strucuture with error nodes starting
with `.` and bidirected edges replaced with common cause
nodes starting with `..`.
Examples
--------
>>> from pgmpy.models import SEMGraph
>>> sem = SEMGraph(ebunch=[('deferenc', 'unionsen'), ('laboract', 'unionsen'),
... ('yrsmill', 'unionsen'), ('age', 'deferenc'),
... ('age', 'laboract'), ('deferenc', 'laboract')],
... latents=[],
... err_corr=[('yrsmill', 'age')])
>>> sem._get_full_graph_struct()
"""
full_graph = self.graph.copy()
mapping_dict = {"." + node: node for node in self.err_graph.nodes}
full_graph.add_edges_from([(u, v) for u, v in mapping_dict.items()])
for u, v in self.err_graph.edges:
cov_node = ".." + "".join(sorted([u, v]))
full_graph.add_edges_from([(cov_node, "." + u), (cov_node, "." + v)])
return full_graph
def get_scaling_indicators(self):
"""
Returns a scaling indicator for each of the latent variables in the model.
The scaling indicator is chosen randomly among the observed measurement
variables of the latent variable.
Examples
--------
>>> from pgmpy.models import SEMGraph
>>> model = SEMGraph(ebunch=[('xi1', 'eta1'), ('xi1', 'x1'), ('xi1', 'x2'),
... ('eta1', 'y1'), ('eta1', 'y2')],
... latents=['xi1', 'eta1'])
>>> model.get_scaling_indicators()
{'xi1': 'x1', 'eta1': 'y1'}
Returns
-------
dict: Returns a dict with latent variables as the key and their value being the
scaling indicator.
"""
scaling_indicators = {}
for node in self.latents:
for neighbor in self.graph.neighbors(node):
if neighbor in self.observed:
scaling_indicators[node] = neighbor
break
return scaling_indicators
def active_trail_nodes(self, variables, observed=[], avoid_nodes=[], struct="full"):
"""
Finds all the observed variables which are d-connected to `variables` in the `graph_struct`
when `observed` variables are observed.
Parameters
----------
variables: str or array like
Observed variables whose d-connected variables are to be found.
observed : list/array-like
If given the active trails would be computed assuming these nodes to be observed.
avoid_nodes: list/array-like
If specificed, the algorithm doesn't account for paths that have influence flowing
through the avoid node.
struct: str or nx.DiGraph instance
If "full", considers correlation between error terms for computing d-connection.
If "non_error", doesn't condised error correlations for computing d-connection.
If instance of nx.DiGraph, finds d-connected variables on the given graph.
Examples
--------
>>> from pgmpy.models import SEM
>>> model = SEMGraph(ebunch=[('yrsmill', 'unionsen'), ('age', 'laboract'),
... ('age', 'deferenc'), ('deferenc', 'laboract'),
... ('deferenc', 'unionsen'), ('laboract', 'unionsen')],
... latents=[],
... err_corr=[('yrsmill', 'age')])
>>> model.active_trail_nodes('age')
Returns
-------
dict: {str: list}
Returns a dict with `variables` as the key and a list of d-connected variables as the
value.
References
----------
Details of the algorithm can be found in 'Probabilistic Graphical Model
Principles and Techniques' - <NAME> Friedman
Page 75 Algorithm 3.1
"""
if struct == "full":
graph_struct = self.full_graph_struct
elif struct == "non_error":
graph_struct = self.graph
elif isinstance(struct, nx.DiGraph):
graph_struct = struct
else:
raise ValueError(
f"Expected struct to be str or nx.DiGraph. Got {type(struct)}"
)
ancestors_list = set()
for node in observed:
ancestors_list = ancestors_list.union(
nx.algorithms.dag.ancestors(graph_struct, node)
)
# Direction of flow of information
# up -> from parent to child
# down -> from child to parent
active_trails = {}
for start in variables if isinstance(variables, (list, tuple)) else [variables]:
visit_list = set()
visit_list.add((start, "up"))
traversed_list = set()
active_nodes = set()
while visit_list:
node, direction = visit_list.pop()
if node in avoid_nodes:
continue
if (node, direction) not in traversed_list:
if (
(node not in observed)
and (not node.startswith("."))
and (node not in self.latents)
):
active_nodes.add(node)
traversed_list.add((node, direction))
if direction == "up" and node not in observed:
for parent in graph_struct.predecessors(node):
visit_list.add((parent, "up"))
for child in graph_struct.successors(node):
visit_list.add((child, "down"))
elif direction == "down":
if node not in observed:
for child in graph_struct.successors(node):
visit_list.add((child, "down"))
if node in ancestors_list:
for parent in graph_struct.predecessors(node):
visit_list.add((parent, "up"))
active_trails[start] = active_nodes
return active_trails
def _iv_transformations(self, X, Y, scaling_indicators={}):
"""
Transforms the graph structure of SEM so that the d-separation criterion is
applicable for finding IVs. The method transforms the graph for finding MIIV
for the estimation of X \rightarrow Y given the scaling indicator for all the
parent latent variables.
Parameters
----------
X: node
The explantory variable.
Y: node
The dependent variable.
scaling_indicators: dict
Scaling indicator for each latent variable in the model.
Returns
-------
nx.DiGraph: The transformed full graph structure.
Examples
--------
>>> from pgmpy.models import SEMGraph
>>> model = SEMGraph(ebunch=[('xi1', 'eta1'), ('xi1', 'x1'), ('xi1', 'x2'),
... ('eta1', 'y1'), ('eta1', 'y2')],
... latents=['xi1', 'eta1'])
>>> model._iv_transformations('xi1', 'eta1',
... scaling_indicators={'xi1': 'x1', 'eta1': 'y1'})
"""
full_graph = self.full_graph_struct.copy()
if not (X, Y) in full_graph.edges():
raise ValueError(f"The edge from {X} -> {Y} doesn't exist in the graph")
if (X in self.observed) and (Y in self.observed):
full_graph.remove_edge(X, Y)
return full_graph, Y
elif Y in self.latents:
full_graph.add_edge("." + Y, scaling_indicators[Y])
dependent_var = scaling_indicators[Y]
else:
dependent_var = Y
for parent_y in self.graph.predecessors(Y):
# Remove edge even when the parent is observed ????
full_graph.remove_edge(parent_y, Y)
if parent_y in self.latents:
full_graph.add_edge("." + scaling_indicators[parent_y], dependent_var)
return full_graph, dependent_var
def get_ivs(self, X, Y, scaling_indicators={}):
"""
Returns the Instrumental variables(IVs) for the relation X -> Y
Parameters
----------
X: node
The variable name (observed or latent)
Y: node
The variable name (observed or latent)
scaling_indicators: dict (optional)
A dict representing which observed variable to use as scaling indicator for
the latent variables.
If not given the method automatically selects one of the measurement variables
at random as the scaling indicator.
Returns
-------
set: {str}
The set of Instrumental Variables for X -> Y.
Examples
--------
>>> from pgmpy.models import SEMGraph
>>> model = SEMGraph(ebunch=[('I', 'X'), ('X', 'Y')],
... latents=[],
... err_corr=[('X', 'Y')])
>>> model.get_ivs('X', 'Y')
{'I'}
"""
if not scaling_indicators:
scaling_indicators = self.get_scaling_indicators()
if (X in scaling_indicators.keys()) and (scaling_indicators[X] == Y):
warnings.warn(
f"{Y} is the scaling indicator of {X}. Please specify `scaling_indicators`"
)
transformed_graph, dependent_var = self._iv_transformations(
X, Y, scaling_indicators=scaling_indicators
)
if X in self.latents:
explanatory_var = scaling_indicators[X]
else:
explanatory_var = X
d_connected_x = self.active_trail_nodes(
[explanatory_var], struct=transformed_graph
)[explanatory_var]
# Condition on X to block any paths going through X.
d_connected_y = self.active_trail_nodes(
[dependent_var], avoid_nodes=[explanatory_var], struct=transformed_graph
)[dependent_var]
# Remove {X, Y} because they can't be IV for X -> Y
return d_connected_x - d_connected_y - {dependent_var, explanatory_var}
def moralize(self, graph="full"):
"""
TODO: This needs to go to a parent class.
Removes all the immoralities in the DirectedGraph and creates a moral
graph (UndirectedGraph).
A v-structure X->Z<-Y is an immorality if there is no directed edge
between X and Y.
Parameters
----------
graph:
Examples
--------
"""
if graph == "full":
graph = self.full_graph_struct
elif isinstance(graph, nx.DiGraph):
graph = graph
else:
graph = self.graph
moral_graph = graph.to_undirected()
for node in graph.nodes():
moral_graph.add_edges_from(
itertools.combinations(graph.predecessors(node), 2)
)
return moral_graph
def _nearest_separator(self, G, Y, Z):
"""
Finds the set of nearest separators for `Y` and `Z` in `G`.
Parameters
----------
G: nx.DiGraph instance
The graph in which to the find the nearest separation for `Y` and `Z`.
Y: str
The variable name for which the separators are needed.
Z: str
The other variable for which the separators are needed.
Returns
-------
set or None: If there is a nearest separator returns the set of separators else returns None.
"""
W = set()
ancestral_G = G.subgraph(
nx.ancestors(G, Y).union(nx.ancestors(G, Z)).union({Y, Z})
).copy()
# Optimization: Remove all error nodes which don't have any correlation as it doesn't add any new path. If not removed it can create a lot of
# extra paths resulting in a much higher runtime.
err_nodes_to_remove = set(self.err_graph.nodes()) - set(
[node for edge in self.err_graph.edges() for node in edge]
)
ancestral_G.remove_nodes_from(["." + node for node in err_nodes_to_remove])
M = self.moralize(graph=ancestral_G)
visited = set([Y])
to_visit = list(M.neighbors(Y))
# Another optimization over the original algo. Rather than going through all the paths does
# a DFS search to find a markov blanket of observed variables. This doesn't ensure minimal observed
# set.
while to_visit:
node = to_visit.pop()
if node == Z:
return None
visited.add(node)
if node in self.observed:
W.add(node)
else:
to_visit.extend(
[node for node in M.neighbors(node) if node not in visited]
)
# for path in nx.all_simple_paths(M, Y, Z):
# path_set = set(path)
# if (len(path) >= 3) and not (W & path_set):
# for index in range(1, len(path)-1):
# if path[index] in self.observed:
# W.add(path[index])
# break
if Y not in self.active_trail_nodes([Z], observed=W, struct=ancestral_G)[Z]:
return W
else:
return None
def get_conditional_ivs(self, X, Y, scaling_indicators={}):
"""
Returns the conditional IVs for the relation X -> Y
Parameters
----------
X: node
The observed variable's name
Y: node
The oberved variable's name
scaling_indicators: dict (optional)
A dict representing which observed variable to use as scaling indicator for
the latent variables.
If not provided, automatically finds scaling indicators by randomly selecting
one of the measurement variables of each latent variable.
Returns
-------
set: Set of 2-tuples representing tuple[0] is an IV for X -> Y given tuple[1].
References
----------
.. [1] <NAME>., <NAME>., & <NAME>. (2015, June). Efficiently finding
conditional instruments for causal inference. In Twenty-Fourth International Joint
Conference on Artificial Intelligence.
Examples
--------
>>> from pgmpy.models import SEMGraph
>>> model = SEMGraph(ebunch=[('I', 'X'), ('X', 'Y'), ('W', 'I')],
... latents=[],
... err_corr=[('W', 'Y')])
>>> model.get_ivs('X', 'Y')
[('I', {'W'})]
"""
if not scaling_indicators:
scaling_indicators = self.get_scaling_indicators()
if (X in scaling_indicators.keys()) and (scaling_indicators[X] == Y):
warnings.warn(
f"{Y} is the scaling indicator of {X}. Please specify `scaling_indicators`"
)
transformed_graph, dependent_var = self._iv_transformations(
X, Y, scaling_indicators=scaling_indicators
)
if (X, Y) in transformed_graph.edges:
G_c = transformed_graph.remove_edge(X, Y)
else:
G_c = transformed_graph
instruments = []
for Z in self.observed - {X, Y}:
W = self._nearest_separator(G_c, Y, Z)
# Condition to check if W d-separates Y from Z
if (not W) or (W.intersection(descendants(G_c, Y))) or (X in W):
continue
# Condition to check if X d-connected to I after conditioning on W.
elif X in self.active_trail_nodes([Z], observed=W, struct=G_c)[Z]:
instruments.append((Z, W))
else:
continue
return instruments
def to_lisrel(self):
"""
Converts the model from a graphical representation to an equivalent algebraic
representation. This converts the model into a Reticular Action Model (RAM) model
representation which is implemented by `pgmpy.models.SEMAlg` class.
Returns
-------
SEMAlg instance: Instance of `SEMAlg` representing the model.
Examples
--------
>>> from pgmpy.models import SEM
>>> sem = SEM.from_graph(ebunch=[('deferenc', 'unionsen'), ('laboract', 'unionsen'),
... ('yrsmill', 'unionsen'), ('age', 'deferenc'),
... ('age', 'laboract'), ('deferenc', 'laboract')],
... latents=[],
... err_corr=[('yrsmill', 'age')],
... err_var={})
>>> sem.to_lisrel()
# TODO: Complete this.
See Also
--------
to_standard_lisrel: Converts to the standard lisrel format and returns the parameters.
"""
nodelist = list(self.observed) + list(self.latents)
graph_adj = nx.to_numpy_matrix(self.graph, nodelist=nodelist, weight=None)
graph_fixed = nx.to_numpy_matrix(self.graph, nodelist=nodelist, weight="weight")
err_adj = nx.to_numpy_matrix(self.err_graph, nodelist=nodelist, weight=None)
np.fill_diagonal(err_adj, 1.0) # Variance exists for each error term.
err_fixed = nx.to_numpy_matrix(
self.err_graph, nodelist=nodelist, weight="weight"
)
# Add the variance of the error terms.
for index, node in enumerate(nodelist):
try:
err_fixed[index, index] = self.err_graph.nodes[node]["weight"]
except KeyError:
err_fixed[index, index] = 0.0
wedge_y = np.zeros((len(self.observed), len(nodelist)), dtype=int)
for index, obs_var in enumerate(self.observed):
wedge_y[index][nodelist.index(obs_var)] = 1.0
from pgmpy.models import SEMAlg
return SEMAlg(
eta=nodelist,
B=graph_adj.T,
zeta=err_adj.T,
wedge_y=wedge_y,
fixed_values={"B": graph_fixed.T, "zeta": err_fixed.T},
)
@staticmethod
def __standard_lisrel_masks(graph, err_graph, weight, var):
"""
This method is called by `get_fixed_masks` and `get_masks` methods.
Parameters
----------
weight: None | 'weight'
If None: Returns a 1.0 for an edge in the graph else 0.0
If 'weight': Returns the weight if a weight is assigned to an edge
else 0.0
var: dict
Dict with keys eta, xi, y, and x representing the variables in them.
Returns
-------
np.ndarray: Adjecency matrix of model's graph structure.
Notes
-----
B: Effect matrix of eta on eta
\gamma: Effect matrix of xi on eta
\wedge_y: Effect matrix of eta on y
\wedge_x: Effect matrix of xi on x
\phi: Covariance matrix of xi
\psi: Covariance matrix of eta errors
\theta_e: Covariance matrix of y errors
\theta_del: Covariance matrix of x errors
Examples
--------
"""
# Arrage the adjacency matrix in order y, x, eta, xi and then slice masks from it.
# y(p) x(q) eta(m) xi(n)
# y
# x
# eta \wedge_y B
# xi \wedge_x \Gamma
#
# But here we are slicing from the transpose of adjacency because we want incoming
# edges instead of outgoing because parameters come before variables in equations.
#
# y(p) x(q) eta(m) xi(n)
# y \wedge_y
# x \wedge_x
# eta B \Gamma
# xi
y_vars, x_vars, eta_vars, xi_vars = var["y"], var["x"], var["eta"], var["xi"]
p, q, m, n = (len(y_vars), len(x_vars), len(eta_vars), len(xi_vars))
nodelist = y_vars + x_vars + eta_vars + xi_vars
adj_matrix = nx.to_numpy_matrix(graph, nodelist=nodelist, weight=weight).T
B_mask = adj_matrix[p + q : p + q + m, p + q : p + q + m]
gamma_mask = adj_matrix[p + q : p + q + m, p + q + m :]
wedge_y_mask = adj_matrix[0:p, p + q : p + q + m]
wedge_x_mask = adj_matrix[p : p + q, p + q + m :]
err_nodelist = y_vars + x_vars + eta_vars + xi_vars
err_adj_matrix = nx.to_numpy_matrix(
err_graph, nodelist=err_nodelist, weight=weight
)
if not weight == "weight":
np.fill_diagonal(err_adj_matrix, 1.0)
theta_e_mask = err_adj_matrix[:p, :p]
theta_del_mask = err_adj_matrix[p : p + q, p : p + q]
psi_mask = err_adj_matrix[p + q : p + q + m, p + q : p + q + m]
phi_mask = err_adj_matrix[p + q + m :, p + q + m :]
return {
"B": B_mask,
"gamma": gamma_mask,
"wedge_y": wedge_y_mask,
"wedge_x": wedge_x_mask,
"phi": phi_mask,
"theta_e": theta_e_mask,
"theta_del": theta_del_mask,
"psi": psi_mask,
}
def to_standard_lisrel(self):
r"""
Transforms the model to the standard LISREL representation of latent and measurement
equations. The standard LISREL representation is given as:
..math::
\mathbf{\eta} = \mathbf{B \eta} + \mathbf{\Gamma \xi} + \mathbf{\zeta} \\
\mathbf{y} = \mathbf{\wedge_y \eta} + \mathbf{\epsilon} \\
\mathbf{x} = \mathbf{\wedge_x \xi} + \mathbf{\delta} \\
\mathbf{\Theta_e} = COV(\mathbf{\epsilon}) \\
\mathbf{\Theta_\delta} = COV(\mathbf{\delta}) \\
\mathbf{\Psi} = COV(\mathbf{\eta}) \\
\mathbf{\Phi} = COV(\mathbf{\xi}) \\
Since the standard LISREL representation has restrictions on the types of model,
this method adds extra latent variables with fixed loadings of `1` to make the model
consistent with the restrictions.
Returns
-------
var_names: dict (keys: eta, xi, y, x)
Returns the variable names in :math:`\mathbf{\eta}`, :math:`\mathbf{\xi}`,
:math:`\mathbf{y}`, :math:`\mathbf{x}`.
params: dict (keys: B, gamma, wedge_y, wedge_x, theta_e, theta_del, phi, psi)
Returns a boolean matrix for each of the parameters. A 1 in the matrix
represents that there is an edge in the model, 0 represents there is no edge.
fixed_values: dict (keys: B, gamma, wedge_y, wedge_x, theta_e, theta_del, phi, psi)
Returns a matrix for each of the parameters. A value in the matrix represents the
set value for the parameter in the model else it is 0.
See Also
--------
to_lisrel: Converts the model to `pgmpy.models.SEMAlg` instance.
Examples
--------
TODO: Finish this.
"""
lisrel_err_graph = self.err_graph.copy()
lisrel_latents = self.latents.copy()
lisrel_observed = self.observed.copy()
# Add new latent nodes to convert it to LISREL format.
mapping = {}
for u, v in self.graph.edges:
if (u not in self.latents) and (v in self.latents):
mapping[u] = "_l_" + u
elif (u not in self.latents) and (v not in self.latents):
mapping[u] = "_l_" + u
lisrel_latents.update(mapping.values())
lisrel_graph = nx.relabel_nodes(self.graph, mapping, copy=True)
for u, v in mapping.items():
lisrel_graph.add_edge(v, u, weight=1.0)
# Get values of eta, xi, y, x
latent_struct = lisrel_graph.subgraph(lisrel_latents)
latent_indegree = lisrel_graph.in_degree()
eta = []
xi = []
for node in latent_struct.nodes():
if latent_indegree[node]:
eta.append(node)
else:
xi.append(node)
x = set()
y = set()
for exo in xi:
x.update(
[x for x in lisrel_graph.neighbors(exo) if x not in lisrel_latents]
)
for endo in eta:
y.update(
[y for y in lisrel_graph.neighbors(endo) if y not in lisrel_latents]
)
# If some node has edges from both eta and xi, replace it with another latent variable
# otherwise it won't get included in any of the matrices.
# TODO: Patchy work. Find a better solution.
common_elements = set(x).intersection(set(y))
if common_elements:
mapping = {}
for var in common_elements:
mapping[var] = "_l_" + var
lisrel_graph = nx.relabel_nodes(lisrel_graph, mapping, copy=True)
for v, u in mapping.items():
lisrel_graph.add_edge(u, v, weight=1.0)
eta.extend(mapping.values())
x = list(set(x) - common_elements)
y.update(common_elements)
var_names = {"eta": eta, "xi": xi, "y": list(y), "x": list(x)}
edges_masks = self.__standard_lisrel_masks(
graph=lisrel_graph, err_graph=lisrel_err_graph, weight=None, var=var_names
)
fixed_masks = self.__standard_lisrel_masks(
graph=lisrel_graph,
err_graph=lisrel_err_graph,
weight="weight",
var=var_names,
)
return (var_names, edges_masks, fixed_masks)
class SEMAlg:
"""
Base class for algebraic representation of Structural Equation Models(SEMs). The model is
represented using the Reticular Action Model (RAM).
"""
def __init__(self, eta=None, B=None, zeta=None, wedge_y=None, fixed_values=None):
r"""
Initializes SEMAlg model. The model is represented using the Reticular Action Model(RAM)
which is given as:
..math::
\mathbf{\eta} = \mathbf{B \eta} + \mathbf{\zeta}
\mathbf{y} = \mathbf{\wedge_y \eta}
where :math:`\mathbf{\eta}` is the set of all the observed and latent variables in the
model, :math:`\mathbf{y}` are the set of observed variables, :math:`\mathbf{\zeta}` is
the error terms for :math:`\mathbf{\eta}`, and \mathbf{\wedge_y} is a boolean array to
select the observed variables from :math:`\mathbf{\eta}`.
Parameters
----------
The following set of parameters are used to set the learnable parameters in the model.
To specify the values of the parameter use the `fixed_values` parameter. Either `eta`,
`B`, `zeta`, and `wedge_y`, or `fixed_values` need to be specified.
eta: list/array-like
The name of the variables in the model.
B: 2-D array (boolean)
The learnable parameters in the `B` matrix.
zeta: 2-D array (boolean)
The learnable parameters in the covariance matrix of the error terms.
wedge_y: 2-D array
The `wedge_y` matrix.
fixed_params: dict (default: None)
A dict of fixed values for parameters.
If None all the parameters specified by `B`, and `zeta` are learnable.
Returns
-------
pgmpy.models.SEMAlg instance: An instance of the object with initalized values.
Examples
--------
>>> from pgmpy.models import SEMAlg
# TODO: Finish this example
"""
self.eta = eta
self.B = np.array(B)
self.zeta = np.array(zeta)
self.wedge_y = wedge_y
# Get the observed variables
self.y = []
for row_i in range(self.wedge_y.shape[0]):
for index, val in enumerate(self.wedge_y[row_i]):
if val:
self.y.append(self.eta[index])
if fixed_values:
self.B_fixed_mask = fixed_values["B"]
self.zeta_fixed_mask = fixed_values["zeta"]
else:
self.B_fixed_mask = np.zeros(self.B.shape)
self.zeta_fixed_mask = np.zeros(self.zeta.shape)
# Masks represent the parameters which need to be learnt while training.
self.B_mask = np.multiply(np.where(self.B_fixed_mask != 0, 0.0, 1.0), self.B)
self.zeta_mask = np.multiply(
np.where(self.zeta_fixed_mask != 0, 0.0, 1.0), self.zeta
)
def to_SEMGraph(self):
"""
Creates a graph structure from the LISREL representation.
Returns
-------
pgmpy.models.SEMGraph instance: A path model of the model.
Examples
--------
>>> from pgmpy.models import SEMAlg
>>> model = SEMAlg()
# TODO: Finish this example
"""
err_var = {var: np.diag(self.zeta)[i] for i, var in enumerate(self.eta)}
graph = nx.relabel_nodes(
nx.from_numpy_matrix(self.B.T, create_using=nx.DiGraph),
mapping={i: self.eta[i] for i in range(self.B.shape[0])},
)
# Fill zeta diagonal with 0's as they represent variance and would add self loops in the graph.
zeta = self.zeta.copy()
|
np.fill_diagonal(zeta, 0)
|
numpy.fill_diagonal
|
'''
This script makes an animation that redshifts a spectrum and, upon reaching
6.5 < z < ~11 marks the Epoch of Reionization. The spectrum that gets shifted
is a model of mine from a Cloudy simulation (v17, Ferland et al. 2017).
---> It takes a minute or two so be patient! (it's the animation package)
Additionally, I have included a module that applies IGM attentuation to the
spectrum as it shifts to larger redshifts.
Using the animation package requires a learning curve, so be sure to google
any time you encounter a problem or want to add something and you don't
know the proper notation!
Planned updates: Improving the colored backgrounds for UV and NIR.
I would like to add a quiescent spectrum eventually.
--> *may* make that spectrum disappear after it reaches
a certain redshift (for example, past z>6?)
Credit: <NAME>
<EMAIL>
Texas A&M University
'''
_author_ = '<NAME>'
import numpy as np
import matplotlib.pyplot as plt
from datetime import datetime as dt # timing how long it takes
from matplotlib.patches import Circle
from matplotlib import collections
import matplotlib.animation as animation
import matplotlib.cm as cm
import matplotlib.gridspec as gridspec
import igm_absorption as igm # another script written by <NAME>
# which adds in IGM absorption for the higher redshifts
# adding a timer to this to see how long it takes
start_it = dt.now()
print('\nStarting timer', start_it)
fig = plt.figure(figsize=(10,6))
ax = plt.gca()
ax = plt.axes(xlim=(0.1,2),ylim=(4e-16,1e-13))
# Cloudy model provided by Taylor Hutchison
# binary stellar population from BPASS with age=10Myr & IMF that goes to 300 Msolar,
# Z(stellar)=Z(nebular)=0.1 Zsolar, ionization parameter log_10(U)=-1.5, n_H=300 cm^(-3)
# (note: this is a very low Z and high ionization model)
con = np.loadtxt('age7z0.1zneb0.1u-1.5_300.con',usecols=[0,6])
wave,vfv = con[:,0],con[:,1]
nu = 2.998e+14 / wave
sed_0 = vfv / nu
# this is my way of applying a rainbow for the visual band
cmap = cm.get_cmap('rainbow')
vcolors = [cmap(i) for i in
|
np.linspace(0,1,300)
|
numpy.linspace
|
# Copyright (c) 2020 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
# NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
# USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
Quake 1 physics gym environment definitions.
Importing this module will register the 'Q1PhysEnv-v0' environment. Alternatively, you can use the `PhysEnv` or
`VectorPhysEnv` classes directly.
"""
import dataclasses
import enum
from typing import Optional, Tuple, Union
import gym.spaces
import numpy as np
from . import phys
__all__ = (
'ActionDecoder',
'Config',
'get_obs_scale',
'INITIAL_YAW_ZERO',
'Key',
'Obs',
'PhysEnv',
'VectorPhysEnv',
)
# These are chosen to match the initial state of the player on the 100m map.
_INITIAL_STATE = {'z_pos': np.float32(32.843201),
'vel': np.array([0, 0, -12], dtype=np.float32),
'on_ground': np.bool(False),
'jump_released': np.bool(True)}
INITIAL_YAW_ZERO =
|
np.float32(90)
|
numpy.float32
|
import numpy as np
import tensorflow as tf
from keras import backend as K
from keras import objectives
from keras.layers import Input, Lambda
from keras.layers.convolutional import Convolution1D
from keras.layers.core import Dense, Flatten, RepeatVector
from keras.layers.recurrent import GRU
from keras.layers.wrappers import TimeDistributed
from keras.models import Model
from keras.utils.training_utils import multi_gpu_model
import molecules.transformer as tr
from molecules.transformer import debugPrint
from utils import DefaultDecoderParams
class MoleculeVAE():
autoencoder = None
def __init__(self, tokens, params):
self.p = params
# self.p["len_limit"] = p["len_limit"]
self.charset_length = tokens.num()
x = Input(shape=(self.p["len_limit"], self.charset_length))
_, z, mean, logvar = self._buildEncoder(x)
self.encode_sample = Model(x, z)
self.encode = Model(x, [mean, logvar])
encoded_input = Input(shape=(self.p["latent_dim"],))
self.decode = Model(
encoded_input,
self._buildDecoder(encoded_input)
)
x1 = Input(shape=(self.p["len_limit"], self.charset_length))
vae_loss, z1, mean, logvar = self._buildEncoder(x1)
p1 = self._buildPropertyPredictor(z1)
self.autoencoder = Model(
x1,
[self._buildDecoder(z1), p1]
)
self.autoencoder.compile(optimizer='Adam',
loss=[vae_loss, 'mean_squared_error'],
loss_weights=[1.0, self.p["pp_weight"] / self.p["num_props"] ** 2],
metrics=['accuracy'])
def _buildEncoder(self, x):
h = Convolution1D(9, 9, activation='relu', name='conv_1')(x)
h = Convolution1D(9, 9, activation='relu', name='conv_2')(h)
h = Convolution1D(10, 11, activation='relu', name='conv_3')(h)
h = Flatten(name='flatten_1')(h)
h = Dense(435, activation='relu', name='dense_1')(h)
def sampling(args):
z_mean_, z_log_var_ = args
batch_size = K.shape(z_mean_)[0]
epsilon = K.random_normal(shape=(batch_size, self.p["latent_dim"]), mean=0., stddev=self.p["stddev"])
return z_mean_ + K.exp(z_log_var_ / 2) * epsilon
z_mean = Dense(self.p["latent_dim"], name='z_mean', activation='linear')(h)
z_log_var = Dense(self.p["latent_dim"], name='z_log_var', activation='linear')(h)
def vae_loss(x, x_decoded_mean):
x = K.flatten(x)
x_decoded_mean = K.flatten(x_decoded_mean)
xent_loss = self.p["len_limit"] * objectives.binary_crossentropy(x, x_decoded_mean)
kl_loss = - 0.5 * K.mean(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
return xent_loss + kl_loss
return vae_loss, Lambda(sampling, output_shape=(self.p["latent_dim"],), name='lambda')(
[z_mean, z_log_var]), z_mean, z_log_var
def _buildDecoder(self, z):
h = Dense(self.p["latent_dim"], name='latent_input', activation='relu')(z)
h = RepeatVector(self.p["len_limit"], name='repeat_vector')(h)
h = GRU(501, return_sequences=True, name='gru_1')(h)
h = GRU(501, return_sequences=True, name='gru_2')(h)
h = GRU(501, return_sequences=True, name='gru_3')(h)
return TimeDistributed(Dense(self.charset_length, activation='softmax'), name='decoder')(h)
def _buildPropertyPredictor(self, x):
h = Dense(self.p["latent_dim"], input_shape=(self.p["latent_dim"],), activation='linear')(x)
for _ in range(self.p["pp_layers"] - 1):
h = Dense(self.p["pp_layers"], activation='linear')(h)
return Dense(self.p["num_props"], activation='linear', name='props')(h)
# https://github.com/Lsdefine/attention-is-all-you-need-keras
class TriTransformer:
def __init__(self, i_tokens, params, o_tokens=None):
self.i_tokens = i_tokens
params["pp_weight"] = 0 if params["bottleneck"] == "none" else params["pp_weight"]
self.p = params
self.o_tokens = i_tokens # Autoencoder
# Positional and word embeddings
pos_emb = tr.Embedding(params["len_limit"], params["d_model"], trainable=False,
weights=[tr.GetPosEncodingMatrix(params["len_limit"], params["d_model"])])
self.word_emb = tr.Embedding(self.o_tokens.num(), self.p["d_model"])
self.decode = None
self.use_src_pos = False
if "gru" in self.p["bottleneck"]:
self.encoder = tr.GRUEncoder(layers=self.p["ID_layers"], d_gru=self.p["ID_d_model"],
latent_dim=self.p["latent_dim"],
attn=("attn" in self.p["bottleneck"]),
word_emb=self.word_emb)
elif self.p["bottleneck"] == "conv":
self.encoder = tr.ConvEncoder(layers=self.p["ID_layers"],
min_filt_size=self.p["ID_d_k"],
min_filt_num=self.p["ID_d_k"],
latent_dim=self.p["latent_dim"],
dense_dim=self.p["ID_d_model"],
word_emb=self.word_emb)
else:
self.use_src_pos = True
self.encoder = TransformerEncoder(params=params, tokens=i_tokens,
pos_emb=pos_emb, word_emb=self.word_emb)
# KL Loss variable, weights the amount of variational loss, used by VAE annealing callback
self.kl_loss_var = K.variable(0.0, dtype=np.float, name='kl_loss_weight') if self.p["stddev"] else None
# Sample from latent space
def sampling(args):
z_mean_, z_logvar_ = args
if not self.p["stddev"]:
return z_mean_
else:
if self.p["bottleneck"] == "none":
latent_shape = (K.shape(z_mean_)[0], K.shape(z_mean_)[1], self.p["d_model"])
else:
latent_shape = (K.shape(z_mean_)[0], self.p["latent_dim"])
epsilon = K.random_normal(shape=latent_shape, mean=0.,
stddev=self.p["stddev"] * self.kl_loss_var / self.p["kl_max_weight"])
return z_mean_ + K.exp(z_logvar_ / 2) * epsilon
self.sampler = Lambda(sampling)
# Use a default decoder configuration
# To standardise across different tests
# We are trying to assess the quality of the latent space
# TODO(Basil): Make this a command line parameter
use_default_decoder = False
if use_default_decoder:
self.p = DefaultDecoderParams()
dec_pos_emb = tr.Embedding(self.p["len_limit"], self.p["d_model"], trainable=False,
weights=[tr.GetPosEncodingMatrix(params["len_limit"], self.p["d_model"])])
dec_word_emb = tr.Embedding(self.o_tokens.num(), self.p["d_model"])
else:
dec_word_emb = self.word_emb
dec_pos_emb = pos_emb
self.false_embedder = tr.FalseEmbeddings(d_emb=self.p["d_model"], d_latent=self.p["latent_dim"])
if self.p["decoder"] == "TRANSFORMER_FILM":
self.decoder = tr.DecoderWithFILM(self.p["d_model"], self.p["d_inner_hid"], self.p["heads"],
self.p["d_k"], self.p["d_v"], self.p["latent_dim"], self.p["layers"],
self.p["dropout"], word_emb=dec_word_emb, pos_emb=dec_pos_emb)
else:
self.decoder = tr.TransformerDecoder(self.p["d_model"], self.p["d_inner_hid"], self.p["heads"],
self.p["d_k"],
self.p["d_v"], self.p["layers"], self.p["dropout"],
word_emb=dec_word_emb, pos_emb=dec_pos_emb)
self.p = params
self.target_layer = TimeDistributed(Dense(self.o_tokens.num(), use_bias=False))
self.metrics = {}
def get_pos_seq(self, x):
mask = K.cast(K.not_equal(x, 0), 'int32')
pos = K.cumsum(K.ones_like(x, 'int32'), 1)
return pos * mask
def build_models(self, active_layers=999):
tgt_seq_in = Input(shape=(None,), dtype='int32', name='tgt_seq')
tgt_seq = Lambda(lambda x: x[:, :-1])(tgt_seq_in)
tgt_true = Lambda(lambda x: x[:, 1:])(tgt_seq_in)
tgt_pos = Lambda(self.get_pos_seq)(tgt_seq)
if self.p["bottleneck"] == "conv":
# must fully specify shape for convolutional approach
src_seq = Input(shape=(self.p["len_limit"],), name='src_seq')
else:
src_seq = Input(shape=(None,), dtype='int32', name='src_seq')
if self.use_src_pos:
src_pos = Lambda(self.get_pos_seq)(src_seq)
z_mean, z_logvar, z_sampled, enc_attn = self.encoder(src_seq, src_pos)
else:
enc_attn = None
z_mean, z_logvar, z_sampled = self.encoder(src_seq)
# Sample
# May not need to as some models (E.g. ar1) return a sampled vector
if z_sampled is None:
z_sampled = self.sampler([z_mean, z_logvar])
# generate an 'input' sampled value so we can create a separate
# model to decode from latent space
if self.p["bottleneck"] == "none":
z_input = Input(shape=(None, self.p["d_model"]), dtype='float32', name='z_input')
else:
z_input = Input(shape=(self.p["latent_dim"],), dtype='float32', name='z_input')
# must calculate for both a latent input and the full end-to-end system
final_output = []
props = []
for l_vec in [z_input, z_sampled]:
# 'false embed' for decoder
dec_input = self.false_embedder(l_vec)
if self.use_FILM:
dec_output, dec_attn, encdec_attn = self.decoder(tgt_seq, tgt_pos, l_vec, dec_input, l_vec,
active_layers=active_layers, return_att=True)
else:
dec_output, dec_attn, encdec_attn = self.decoder(tgt_seq, tgt_pos, l_vec, dec_input,
active_layers=active_layers, return_att=True)
dec_output = debugPrint(dec_output, "DEC_OUTPUT")
# Do not perform softmax on output
# As it is performed in the loss function
final_output.append(self.target_layer(dec_output))
# Property prediction
if self.p["pp_weight"] is not None:
props.append(self._buildPropertyPredictor(l_vec))
# KL DIVERGENCE LOSS
def kl_loss(args):
z_mean_, z_log_var_ = args
return - 0.5 * self.kl_loss_var * tf.reduce_mean(
1 + z_log_var_ - K.square(z_mean_) - K.exp(z_log_var_),
name='KL_loss_sum')
losses = []
# RECONSTRUCTION LOSS
def get_loss(args):
y_pred, y_true = args
y_true = tf.cast(y_true, 'int32')
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y_true, logits=y_pred)
mask = tf.cast(tf.not_equal(y_true, 0), 'float32')
loss = tf.reduce_sum(loss * mask, -1) / tf.reduce_sum(mask, -1)
loss = K.mean(loss)
return loss
def get_acc(args):
y_pred, y_true = args
mask = tf.cast(tf.not_equal(y_true, 0), 'float32')
corr = K.cast(K.equal(K.cast(y_true, 'int32'), K.cast(K.argmax(y_pred, axis=-1), 'int32')), 'float32')
corr = K.sum(corr * mask, -1) / K.sum(mask, -1)
return K.mean(corr)
losses.append(Lambda(get_loss, name='Loss')([final_output[1], tgt_true]))
self.metrics["ppl"] = Lambda(K.exp)(losses[0])
# VARIATIONAL LOSS
if self.kl_loss_var is not None:
if self.p["WAE_s"] == 0 or self.p["WAE_kernel"] is None:
print("Using variational autoencoder")
kl = Lambda(kl_loss, name='VariationalLoss')([z_mean, z_logvar])
else:
kl = Lambda(self.mmd_penalty, name='WAELoss')(z_sampled)
self.metrics["wae"] = Lambda(self.wae_mmd_exact, name='VariationalLoss')(
[z_mean, z_logvar])
kl = Lambda(lambda x: self.kl_loss_var * x)(kl)
self.metrics["kl_loss"] = kl
losses.append(kl)
if self.p["pp_weight"]:
prop_input = Input(shape=[self.p["num_props"]])
pp_loss = Lambda(lambda x: tf.losses.mean_squared_error(x, props[1],
weights=self.p["pp_weight"] / (
self.p["num_props"] ** 2)))(prop_input)
self.metrics["pp_loss"] = pp_loss
losses.append(pp_loss)
loss = Lambda(tf.reduce_sum)(losses)
# Set up autoencoder model
if self.p["pp_weight"] is None:
self.autoencoder = Model([src_seq, tgt_seq_in], loss)
else:
self.autoencoder = Model([src_seq, tgt_seq_in, prop_input], loss)
self.autoencoder.add_loss([loss])
## METRICS
self.metrics["acc"] = Lambda(get_acc)([final_output[1], tgt_true])
self.metrics["z_mean"] = Lambda(tf.reduce_mean)(z_mean)
if self.kl_loss_var is not None: self.metrics["z_logvar"] = Lambda(tf.reduce_mean)(z_logvar)
# For outputting next symbol
self.output_model = Model([src_seq, tgt_seq_in], final_output[1])
# For getting attentions
attn_list = dec_attn + encdec_attn if enc_attn is None else enc_attn + dec_attn + encdec_attn
self.output_attns = Model([src_seq, tgt_seq_in], attn_list)
# ENCODING/DECODING MODELS
self.encode = Model(src_seq, [z_mean, z_logvar])
self.encode_sample = Model(src_seq, [z_sampled])
self.decode = Model([z_input, tgt_seq_in], final_output[0])
def _buildPropertyPredictor(self, x):
h = Dense(100, input_shape=(self.p["latent_dim"],), activation='relu')(x)
for _ in range(self.p["pp_layers"] - 1):
h = Dense(100, activation='relu')(h)
return Dense(self.p["num_props"], activation='linear', name='props')(h)
def compile_vae(self, optimizer='adam', N_GPUS=1):
self.encode_sample.compile('adam', 'mse')
self.encode.compile('adam', 'mse')
self.decode.compile('adam', 'mse')
if N_GPUS != 1:
self.autoencoder = multi_gpu_model(self.autoencoder, N_GPUS)
self.autoencoder.compile(optimizer, None)
# add metrics
for key in self.metrics:
self.autoencoder.metrics_names.append(key)
self.autoencoder.metrics_tensors.append(self.metrics[key])
def wae_mmd(self, sample_qz):
sample_pz = K.random_normal(shape=[self.p["batch_size"], self.p["latent_dim"]], mean=0.0,
stddev=self.p["stddev"],
dtype=tf.float32)
s = 10
# batch size
n = int(self.p["batch_size"])
ind = np.linspace(0, n - 1, n)
ind = np.array([i + n * ind for i in ind]).flatten().astype(dtype=np.int)
ind = np.expand_dims(ind, axis=1)
def K_MAT(A, B):
# A = K.reshape(A, [n, self.p["latent_dim"]]) # give tensorflow shape hints
A = K.tile(A, [n, 1])
B = K.tile(B, [n, 1])
# indices to reshuffle A
A = tf.gather_nd(A, indices=ind)
# A = A[ind, :]
# A should now be a matrix whose rows are [z1, ..., z1, z2, ..., z2...] etc
distances = K.sqrt(K.sum(K.square(A - B), axis=1))
distances = debugPrint(distances, "DISTANCES: ")
# Returns a vector of n^2 distances
# mat = tf.matmul(A - B, K.transpose(A - B))
return K.exp((-0.5 / s ** 2) * distances)
# Set diagonals to zero
MMD = []
for samples in [sample_qz, sample_pz]:
# there will be n diagonals in this matrix equal to 1
# these shouldn't be included in the sum anyway
# so simply subtract n after reduce_sum
K_sum = tf.reduce_sum(K_MAT(samples, samples)) - n
MMD.append(K_sum / (n * n + n))
# Final term
# -2/n * sum k(zp_l, zq_j) for all l, j
MMD.append(-2 * tf.reduce_sum(K_MAT(sample_pz, sample_qz)) / n ** 2)
return self.kl_loss_var * tf.reduce_sum(MMD)
def mmd_penalty(self, sample_qz):
'''
:param stddev:
:param kernel: RBF or IMQ
:param pz: for IMQ kernel: 'normal', 'sphere' or 'uniform'
:param sample_qz:
:param sample_pz:
:return:
'''
kernel = self.p["WAE_kernel"]
sample_pz = K.random_normal(shape=[self.p["batch_size"], self.p["latent_dim"]], mean=0.0,
stddev=self.p["stddev"],
dtype=tf.float32)
sigma2_p = self.p["WAE_s"] ** 2
n = self.p["batch_size"]
# n = tf.cast(n, tf.int32)
nf = float(n) # tf.cast(n, tf.float32)
half_size = (n * n - n) / 2
norms_pz = tf.reduce_sum(tf.square(sample_pz), axis=1, keep_dims=True)
dotprods_pz = tf.matmul(sample_pz, sample_pz, transpose_b=True)
distances_pz = norms_pz + K.transpose(norms_pz) - 2. * dotprods_pz
norms_qz = tf.reduce_sum(tf.square(sample_qz), axis=1, keep_dims=True)
dotprods_qz = tf.matmul(sample_qz, sample_qz, transpose_b=True)
distances_qz = norms_qz + K.transpose(norms_qz) - 2. * dotprods_qz
dotprods = tf.matmul(sample_qz, sample_pz, transpose_b=True)
distances = norms_qz + tf.transpose(norms_pz) - 2. * dotprods
if kernel == 'RBF':
print("Using RBF WAE loss (s = {})".format(self.p["WAE_s"]))
# Median heuristic for the sigma^2 of Gaussian kernel
hs = int(half_size) # tf.cast(half_size, tf.int32)
sigma2_k = tf.nn.top_k(K.flatten(distances), hs).values[hs - 1]
sigma2_k += tf.nn.top_k(K.flatten(distances_qz), hs).values[hs - 1]
# Maximal heuristic for the sigma^2 of Gaussian kernel
distances_qz /= sigma2_k
distances_pz /= sigma2_k
distances /= sigma2_k
res1 = K.exp(- 0.5 * distances_qz)
res1 += K.exp(- 0.5 * distances_pz)
res1 = tf.multiply(res1, 1. - K.eye(n))
res1 = tf.reduce_sum(res1) / (nf * nf - nf)
res2 = K.exp(-0.5 * distances)
res2 = tf.reduce_sum(res2) * 2. / (nf * nf)
stat = res1 - res2
elif "IMQ" in kernel:
pz = kernel.split("_")[1]
print("Using IMQ loss with {} Cbase(s = {})".format(pz, self.p["WAE_s"]))
if pz == 'normal':
Cbase = 2. * self.p["latent_dim"] * sigma2_p
elif pz == 'sphere':
Cbase = 2.
elif pz == 'uniform':
Cbase = self.p["latent_dim"]
stat = 0.
for scale in [.1, .2, .5, 1., 2., 5., 10.]:
C = Cbase * scale
res1 = C / (C + distances_qz)
res1 += C / (C + distances_pz)
res1 = tf.multiply(res1, 1. - tf.eye(n))
res1 = tf.reduce_sum(res1) / (nf * nf - nf)
res2 = C / (C + distances)
res2 = tf.reduce_sum(res2) * 2. / (nf * nf)
stat += res1 - res2
return self.kl_loss_var * stat
def wae_mmd_exact(self, args):
mu, logvar = args
var = K.exp(logvar)
s = self.p["WAE_s"]
s2 = s ** 2
s4 = s ** 4
prior_mu = K.zeros_like(mu)
prior_var = K.ones_like(var)
def expected_rbf(mx, vx, my=None, vy=None):
if my == None: my = mx
if vy == None: vy = vx
vxt = 1 / (1 / s2 + 1 / vx)
vyt = 1 / (1 / s2 + 1 / vy + vxt / s4)
myt = (my / vy - (mx * vxt) / (vx * s2)) * vyt
det = lambda x: K.prod(x, axis=1)
coeff = K.sqrt((det(vyt) * det(vxt)) / (det(vy) * det(vx)))
exponent = K.square(mx) * vxt / K.square(vx)
exponent += K.square(myt) / vyt
exponent -= K.square(my) / vy
exponent -= K.square(mx) / vx
return coeff * K.exp(0.5 * K.sum(exponent, axis=1))
return self.kl_loss_var * K.mean(expected_rbf(mu, var) +
expected_rbf(prior_mu, prior_var) -
2 * expected_rbf(mu, var, prior_mu, prior_var))
def get_moments(self, input):
if isinstance(input, str):
# have been given a string
input_seq = self.i_tokens.tokenize(input)
else:
input_seq = input
if self.p["bottleneck"] == "conv":
# Must pad with zeros
s = np.zeros(self.p["len_limit"])
s[0:len(input_seq)] = input_seq
input_seq = s
# get mean & logvar
input_seq =
|
np.expand_dims(input_seq, axis=0)
|
numpy.expand_dims
|
import numpy as np
class TimeStepper:
def __init__(self, manifold):
self.exp = manifold.exp
self.dexpinv = manifold.dexpinv
self.action = manifold.action
self.a = None
self.b = None
self.c = None
self.order = None
self.s = None
def step(self, f, t, y, h):
n = y.size
k = np.zeros((n, self.s))
for i in range(self.s):
u =
|
np.zeros(n)
|
numpy.zeros
|
import unittest
from unittest.mock import patch, MagicMock, call
from pprint import pprint, pformat
import numpy as np
from numpy import array
import sympy
from lib.nnbench import NNBench, NetMaker
import lib.nn
def arangep(n, starting_index=0):
sympy.sieve.extend_to_no(starting_index + n)
return np.array(sympy.sieve._list[starting_index:starting_index + n])
class NNBenchTest(unittest.TestCase):
def create_patch(self, name):
patcher = patch(name)
thing = patcher.start()
self.addCleanup(patcher.stop)
return thing
def setUp(self):
self.create_patch('lib.nn.Network')
def test_bench_checkpoint(self):
net = lib.nn.Network()
bench = NNBench(net)
# Checkpointing a net saves the internal state vector
sv = 'some state vector'
net.state_vector.return_value = sv
bench.checkpoint_net()
net.state_vector.assert_called_once()
# Rolling back a net sets the net's state vector from the saved value
bench.rollback_net()
net.set_state_from_vector.assert_called_once_with(sv)
def test_bench_network_input_width_detection(self):
# Mock up network with a determined input width
net = lib.nn.Network()
mock_layer = MagicMock()
mock_layer.M = np.zeros(2*3).reshape(3,2)
net.layers = [mock_layer]
# A bench of a net discovers the input width of the net
bench = NNBench(net)
self.assertEqual(bench.net.layers[0].M.shape, (3,2)) # verify we mocked it as intended
self.assertEqual(NNBench.net_input_width(bench.net), 2)
def test_nn_network_learn_input_form(self):
# Make a constant training batch for a two-in, three-out net,
# containing two examples
training_batch = (
|
np.arange(2*3)
|
numpy.arange
|
import numpy as np
from math import comb
import pickle as pkl
from tqdm import tqdm
def give_possible_options(combs, num_arrivals, num_services, arrivals, services, combnum, curr_comb, pkl_name_inter_depart):
if services == 1:
curr_comb = np.append(curr_comb, arrivals)
with open(pkl_name_inter_depart, 'rb') as f:
count, combp = pkl.load(f)
combp[count, :] = curr_comb
count += 1
with open(pkl_name_inter_depart, 'wb') as f:
pkl.dump((count, combp), f)
combs[combnum, num_services - services] = arrivals
combnum += 1
else:
for ind in range(arrivals, -1, -1):
if services == num_services:
curr_comb = np.array([])
give_possible_options(combs, num_arrivals, num_services, arrivals - ind,
services-1, combnum, np.append(curr_comb, ind), pkl_name_inter_depart)
return combs
def possibilites_after_initial_arrivals(num_arrivals, arrivals, services, curr_comb, combnum, pkl_name_inter_depart):
if services == 1:
## computing the values to add to cuu_comb
with open(pkl_name_inter_depart, 'rb') as f:
count, combp = pkl.load(f)
if arrivals == 1: # if there is only one customer to arrive then it can either during or after service
update_crr_comb = np.array([1, 0])
size_comb = np.append(curr_comb, update_crr_comb).shape[0]
combp = np.append(combp, np.append(curr_comb, update_crr_comb).reshape(1, size_comb), axis=0)
count += 1
# print(np.append(curr_comb, update_crr_comb))
update_crr_comb = np.array([0, 1])
combp = np.append(combp, np.append(curr_comb, update_crr_comb).reshape(1, size_comb), axis=0)
count += 1
# print(np.append(curr_comb, update_crr_comb))
else: # all customers arrived already
update_crr_comb =
|
np.array([0, 0])
|
numpy.array
|
#stdlib
import os
import requests
import shutil
import itertools
import argparse
import datetime as dt
#scientific stack
import numpy as np
import scipy
import scipy.interpolate
import spacepy.datamodel as dm
import spacepy.time as spt
import spacepy.pybats as swmf
from sklearn.neighbors import KernelDensity
#local
import perturbplot as pplot
def get_SC_OMNI(year=2000, bird='ACE', datadir='Data', force=False, verbose=False, **kwargs):
'''Download S/C specific OMNI file'''
valid_birds = ['ACE', 'IMP', 'GEOTAIL', 'WIND']
if bird.upper() not in valid_birds:
raise ValueError('Invalid satellite selected ({0})'.format(bird))
targ_fn = '{0}_min_b{1}.txt'.format(bird.lower(), year)
#now check whether we have this file already
if not force and os.path.isfile(os.path.join(datadir, targ_fn)):
if verbose: print('Data already present for {0} in {1} - not downloading'.format(bird, year))
return os.path.join(datadir, targ_fn)
#now download the file and save in datadir
omni_https = 'spdf.gsfc.nasa.gov'
sc_dir = 'pub/data/omni/high_res_omni/sc_specific/'
url = 'https://' + omni_https + '/' + sc_dir + '/' + targ_fn
r = requests.get(url, verify=False, stream=True)
r.raw.decode_content = True
with open(os.path.join(datadir, targ_fn), 'wb') as f:
shutil.copyfileobj(r.raw, f)
print('Retrieved {0}'.format(targ_fn))
return os.path.join(datadir, targ_fn)
def load_SC_OMNI(bird, year, outdata=None, **kwargs):
'''Load satellite specific OMNI data into dict'''
fname = get_SC_OMNI(year=year, bird=bird, **kwargs)
dum = np.genfromtxt(fname, usecols=(0,1,2,3,15,16,23,26,28,29,30),
names=('year','day','hour','minute','By_GSM','Bz_GSM','Vx_GSE','Den_P','X_GSE','Y_GSE','Z_GSE'),
converters={0: int, 1: int, 2: int, 3: int})
data = dm.fromRecArray(dum)
dates = spt.doy2date(data['year'], data['day'], dtobj=True)
times = [dt.timedelta(hours=x, minutes=y) for x,y in zip(data['hour'],data['minute'])]
data['DateTime'] = dates + times
for key in ['year', 'day', 'hour', 'minute']:
del data[key]
data['Bz_GSM'][np.abs(data['Bz_GSM'])>20] = np.nan
data['By_GSM'][np.abs(data['By_GSM'])>20] = np.nan
data['Vx_GSE'][np.abs(data['Vx_GSE'])>900] = np.nan
data['X_GSE'][np.abs(data['X_GSE'])>9000] = np.nan
data['Y_GSE'][np.abs(data['Y_GSE'])>9000] = np.nan
data['Z_GSE'][np.abs(data['Z_GSE'])>9000] = np.nan
if outdata:
for key in ['By_GSM', 'Bz_GSM', 'Vx_GSE', 'DateTime', 'Den_P', 'X_GSE', 'Y_GSE', 'Z_GSE']:
outdata[key] = np.concatenate([outdata[key], data[key]])
return outdata
return data
if __name__=='__main__':
#python perturbSWMF.py -p Nov2003 -f IMF.dat -n 6 -s 1977
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--seed', dest='seed', type=int, default=1234, help='Specify random seed. Integer. Default=1234')
parser.add_argument('-n', '--number', dest='Nensembles', type=int, default=8, help='Number of perturbed files to generate. Default=8')
parser.add_argument('-f', '--file', dest='fname', default='IMF_ev5.dat', help='Input SWMF IMF filename. Default "IMF_ev5.dat"')
parser.add_argument('-p', '--path', dest='path', default='SWMF_inputs', help='Path for input/output')
options = parser.parse_args()
np.random.seed(options.seed) #set seed for repeatability
#read SWMF ImfInput file
infilename = os.path.join(options.path, options.fname)
if os.path.isfile(infilename):
eventIMF = swmf.ImfInput(filename=infilename)
else:
raise IOError('Specified input file does not appear to exist or is not readable')
Ntimes = len(eventIMF['ux']) #3*1440 #N days at 1-min resolution
generateInputs = True
saveErrors = False
varlist = ['Vx_GSE', 'Bz_GSM', 'By_GSM']
Nvars = len(varlist)
map_dict = {'Vx_GSE': 'ux',
'Bz_GSM': 'bz',
'By_GSM': 'by'}
ylimdict = {'Vx_GSE': [-300, -800],
'Bz_GSM': [-20, 20],
'By_GSM': [-20, 20]}
xlimdict = {'Vx_GSE': [-60, 60],
'Bz_GSM': [-15, 15],
'By_GSM': [-15, 15]}
unitsdict = {'Vx_GSE': '[km/s]',
'Bz_GSM': '[nT]',
'By_GSM': '[nT]'}
#load ACE data into dict (ups: upstream)
upsdata = load_SC_OMNI('ace', 1999)
upsdata = load_SC_OMNI('ace', 2000, outdata=upsdata)
upsdata = load_SC_OMNI('ace', 2001, outdata=upsdata)
upsdata = load_SC_OMNI('ace', 2002, outdata=upsdata)
upsdata = load_SC_OMNI('ace', 2003, outdata=upsdata)
upsdata = load_SC_OMNI('ace', 2004, outdata=upsdata)
upsdata = load_SC_OMNI('ace', 2005, outdata=upsdata)
#load GEOTAIL data into dict (nmp: near magnetopause)
nmpdata = load_SC_OMNI('geotail', 1999)
nmpdata = load_SC_OMNI('geotail', 2000, outdata=nmpdata)
nmpdata = load_SC_OMNI('geotail', 2001, outdata=nmpdata)
nmpdata = load_SC_OMNI('geotail', 2002, outdata=nmpdata)
nmpdata = load_SC_OMNI('geotail', 2003, outdata=nmpdata)
nmpdata = load_SC_OMNI('geotail', 2004, outdata=nmpdata)
nmpdata = load_SC_OMNI('geotail', 2005, outdata=nmpdata)
print(nmpdata['DateTime'][0], nmpdata['DateTime'][-1])
savedata = dm.SpaceData()
for var in varlist[::-1]:
print('Processing {}'.format(var))
err = 'epsilon'
varlabel = var[0]+'$_'+var[1]+'$'
errlabel = r'$\varepsilon$'
plotinfo = {'var': var,
'err': err,
'varlabel': varlabel,
'errlabel': errlabel,
'xlimdict': xlimdict,
'ylimdict': ylimdict,
'units': unitsdict}
#Get error distrib for var as fn of var and plot
valid_inds = np.logical_and(np.isfinite(nmpdata[var]),
|
np.isfinite(upsdata[var])
|
numpy.isfinite
|
import os
import cv2
import yaml
import numpy as np
import tensorflow as tf
from tensorflow import keras
import matplotlib.pyplot as plt
from buddha_loader import load_ds
from TDDFA_ONNX import TDDFA_ONNX
from tensorflow.keras import layers
from mpl_toolkits.mplot3d import Axes3D
def normalize(points):
centred = points - np.mean(points, axis=0)
return centred / centred.max()
def apply_rand_T(vect):
alpha, beta, gamma = .5 * np.pi * np.random.random(3)
Rx = np.array([[np.cos(alpha), -np.sin(alpha), 0], [np.sin(alpha), np.cos(alpha), 0], [0, 0, 1]])
Ry = np.array([[np.cos(beta), 0, np.sin(beta)], [0, 1, 0], [-np.sin(beta), 0, np.cos(beta)]])
Rz = np.array([[1, 0, 0], [0, np.cos(gamma), -np.sin(gamma)], [0, np.sin(gamma), np.cos(gamma)]])
rot = [Rx, Ry, Rz]
trans = rot[
|
np.random.choice(2, 1)
|
numpy.random.choice
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
########################################################################
#
# implementation of AON algorithm for collisional growth of hydrometeors.
# can be employed in a box model or column model
# various variants of the algorithm are covered here (LinSamp, explicit overtakes)
#
########################################################################
#Python modules
import os, sys
import numpy as np
import math
import warnings
import random
import time
#Aggregation modules
import SIPinit as SI
import AON_Alg as AON
import Misc as FK
#GCCif (KERNEL == 1 || KERNEL == 2)
import Kernel as K
#GCCendif /* (KERNEL == 1 || KERNEL == 2) */
#Sedimentation module
#GCCif (COLUMN == 1 && PROCESS != 2)
import Sedimentation as SD
#GCCendif /* (COLUMN == 1 && PROCESS != 2) */
#Plotting module
import PlotSim as PS
#GCCif (WELLMIXED > 0)
#GCCif (PROCESS != 0)
print('if 2D-WELLMIXED approach is chosen, Sedimentation and Aggregation must be both activated (PROCESS = 0)')
sys.exit('STOPPED')
#GCCendif /* (PROCESS != 0) */
#GCCif (KERNEL == !1 && KERNEL != 2)
print('if 2D-WELLMIXED approach is chosen, hydrodynmic kernel must be chosen')
sys.exit('STOPPED')
#GCCendif /* (KERNEL == !1 && KERNEL != 2) */
#GCCif (LINEAR != 0)
print('if 2D-WELLMIXED approach is chosen, linear sampling option is not possible')
sys.exit('STOPPED')
#GCCendif /* (LINEAR != 0) */
####GCCif (AGG_MC != 0)
###print('if 2D-WELLMIXED approach is chosen, multiple collection option must be switched off')
###sys.exit('STOPPED')
####GCCendif /* (AGG_MC != 0) */
#GCCendif /* (WELLMIXED > 0) */
warnings.filterwarnings("ignore",category =RuntimeWarning)
dV_skal = 1
nr_SIPs_skal = 1
#the following statement includes a parameter file via a preprocessor directive
#GCCinclude "params.txt"
#>>>>>>>>>> derived parameters >>>>>>>>>>>>>>>>>>>>
#GCCif (INITV == 1)
#initial size distribution
#Mean mass in kg
xf0=FK.r2m(r0,const_mass2rad)
#initial concentration in m^-3
#N0=239e6
N0=LWC/xf0
#number of bins
n=n10*r10
#GCCendif /* (INITV == 1) */
#GCCif (COLUMN == 1)
#GCCif (PROCESS == 0)
i_process = 0
#GCCendif /* (PROCESS == 0) */
#GCCif (PROCESS == 1)
i_process = 1
#GCCendif /* (PROCESS == 1) */
#GCCif (PROCESS == 2)
i_process = 2
#GCCendif /* (PROCESS == 2) */
#GCCendif /* (COLUMN == 1) */
#total number of time steps
iend=int(Tsim/dt) + 1
#<<<<<<<<<<< derived parameters <<<<<<<<<<<<<<<<<<<<<
#GCCif (KERNEL == 0 || KERNEL == 3)
cck=0
m_low=0
eta_indize=0
m_kernel = 0
ikernel=0
#GCCendif /* (KERNEL == 0 || KERNEL == 3) */
#GCCif (KERNEL == 1)
#Aufruf Long Kernel
ikernel=1
[cck,m_kernel]=K.LongKernel(rho_w)
eta_indize=m_kernel[1]/m_kernel[0]
#m_low=m_kernel[0]/eta_indize
m_low=m_kernel[0]
#GCCendif /* (KERNEL == 1) */
#GCCif (KERNEL == 2)
#Aufruf Hall-Kernel
ikernel=2
[cck,m_kernel]=K.HallKernel(rho_w)
eta_indize=m_kernel[1]/m_kernel[0]
m_low=m_kernel[0]
#GCCendif /* (KERNEL == 2) */
fLog_currColls= None
fLog_accColls = None
fLog_currColls_WM2D = None
fLog_Combs = None
fLog_p = None
#GCCif (COUNT_COLLS == 1)
fLog_currColls= open('log_currColls.dat','w')
fLog_accColls = open('log_accColls.dat','w')
fLog_Combs = open('log_Combs.dat','w')
fLog_p = open('log_p.dat','w')
##GCCif (WELLMIXED > 0)
fLog_currColls_WM2D= open('log_currColls_WM2D.dat','w')
##GCCendif /* (WELLMIXED > 0)*/
#GCCendif /* (COUNT_COLLS == 1) */
#GCCif (COLUMN == 0)
#================================ BOX-MODEL ===========================================
#>>>>>>>>>>>> start of box model section>>>>>>>>>>>>>>>>>>>>
nz = 1
#GCCif (DISCRETE == 0)
# definition of bin grid for size distribution plots
mfix_plot=np.zeros(nplot)
mdelta_plot=np.zeros(nplot)
for i in range(0,nplot-1):
mfix_plot[i+1]=10**(i/n10_plot+min10_plot)
mdelta_plot[i]=mfix_plot[i+1]-mfix_plot[i]
#GCCendif /* (DISCRETE == 0) */
imod_GVplot = int(t_intervall_GVplot/dt)
nr_GVplot = int(1 + (Tsim-t_start_GVplot)/t_intervall_GVplot)
t_end_GVplot = t_start_GVplot + (nr_GVplot-1)*t_intervall_GVplot
t_vec_GVplot = np.arange(t_start_GVplot,t_end_GVplot+1,t_intervall_GVplot) #times at which SIP data is saved and size distributions can be plotted
nEK_sip_plot=np.zeros([nr_inst,nr_GVplot,nr_sip_max])
#stores SIP data of all realisations at the times defined in t_vec_GVplot
#GCCif (DISCRETE == 0)
mEK_sip_plot=np.zeros([nr_inst,nr_GVplot,nr_sip_max])
#GCCendif /* (DISCRETE == 0) */
#GCCif (DISCRETE >= 1)
mEK_sip_plot=np.zeros([nr_inst,nr_GVplot,nr_sip_max],dtype='int')
#GCCendif /* (DISCRETE >= 1) */
nr_SIPs_plot=np.zeros([nr_inst,nr_GVplot],dtype='int')
imod_MOMsave=int(t_intervall_MOMsave/dt)
nr_MOMsave = int(1 + (Tsim-t_start_MOMsave)/t_intervall_MOMsave)
t_end_MOMsave = t_start_MOMsave + (nr_MOMsave-1)*t_intervall_MOMsave
t_vec_MOMsave=np.arange(t_start_MOMsave,t_end_MOMsave+1,t_intervall_MOMsave)
#usually a finer time grid is used for saving the moment data
print(t_vec_MOMsave)
print(nr_MOMsave)
nr_MOMs=4 # evaluate moments of order 0 to nr_MOMs-1
MOMsave=np.zeros([nr_inst,nr_MOMsave,nr_MOMs])
#GCCif (DISCRETE == 0)
outp_format='%1.6e'
outp_format_long='%1.6e'
#GCCendif /* (DISCRETE == 0) */
#GCCif (DISCRETE >= 1)
outp_format='%4i'
outp_format_long='%6i'
#GCCendif /* (DISCRETE >= 1) */
fMom = open('Moments.dat', 'wb')
fGV = open('SIP.dat', 'wb')
fLog= open('log.txt','a')
starttime = time.time()
localtime = time.asctime( time.localtime(starttime) )
print("Start time computation:", localtime)
fLog.write(os.getcwd()+ '\n')
fLog.write("Start time computation: "+ localtime+ '\n')
fLog.close()
# loop over all realisations
for k in range(0,nr_inst):
if (iPM >= 1): print(os.getcwd())
if (k%50 == 0):
print('-------------------------------------- new instance ',k,'-------')
fLog= open('log.txt','a')
fLog.write('instance: ' + str(k)+ '\n')
fLog.close()
i_MOMsave=1
i_GVplot=1
random.seed(32+(k+nr_ins_start)*123433)
count_colls=0
#GCCif (COUNT_COLLS == 1)
count_colls=np.zeros(20,dtype=np.uint64)
#GCCendif /* (COUNT_COLLS == 1) */
###########################################################################
#
# Initialise SIP ensemble
#
###########################################################################
#GCCif (INITV == 1)
[nr_SIPs,nEK_sip_ins,mEK_sip_ins,EK_krit,mdelta,mfix]=SI.InitSIP_ExpVert_singleSIP_WS(imlow,n10,r10,min10,eta_nu,xf0,N0,dV,nr_sip_max)
#GCCendif /* if (INITV == 1) */
#GCCif (INITV == 2)
nr_SIPs, nEK_sip_ins, mEK_sip_ins = SI.InitSIP_Alfonso(dV_skal=dV_skal) #20 droplets with 17um and 10 droplets with 21.4um or similar setups
#GCCendif /* if (INITV == 2) */
MOMsave[k,0,:] = FK.Moments_k0_3(nEK_sip_ins,mEK_sip_ins)
if (iPM >= 1): print('initial moments: ', MOMsave[k,0,:])
if (iPM >= 1): print('nr_SIPs: ', nr_SIPs)
np.savetxt(fMom, MOMsave[k,0,:].reshape(1,4), fmt=outp_format_long) # reshape necessary to write all 4 values in one row
nr_SIPs_plot[k,0 ]= nr_SIPs
nEK_sip_plot[k,0,0:nr_SIPs]=nEK_sip_ins
mEK_sip_plot[k,0,0:nr_SIPs]=mEK_sip_ins
np.savetxt(fGV, [nr_SIPs], fmt='%5i')
np.savetxt(fGV, nEK_sip_ins[0:nr_SIPs].reshape(1,nr_SIPs), fmt=outp_format)
np.savetxt(fGV, mEK_sip_ins[0:nr_SIPs].reshape(1,nr_SIPs), fmt=outp_format)
#np.savetxt(fGV, nEK_sip_ins[0:nr_SIPs].reshape(nr_SIPs,1), fmt=outp_format)
#np.savetxt(fGV, mEK_sip_ins[0:nr_SIPs].reshape(nr_SIPs,1), fmt=outp_format)
ibreak = 0
iibreak = 0
#>>>>>>>>>>>>> time iteration
for it in range(0,iend):
t=it*dt
if (nr_SIPs > 1):
#print('nr_SIPs',nr_SIPs,type(nr_SIPs))
ibreak = AON.Aggregation(nr_SIPs,nEK_sip_ins,mEK_sip_ins,count_colls,cck,m_low,eta_indize,m_kernel)
if (it%imod_MOMsave ==0) & (it != 0):
MOMsave[k,i_MOMsave,:]=FK.Moments_k0_3(nEK_sip_ins,mEK_sip_ins)
if (iPM >= 2): print(it,i_MOMsave,MOMsave[k,i_MOMsave,:])
np.savetxt(fMom, MOMsave[k,i_MOMsave,:].reshape(1,4), fmt=outp_format_long)
i_MOMsave = i_MOMsave+1
#>>>>>>>>>>>>>>>>>>remove zero weight SIPs, if present at all>>>>>>>>>>>>>>>>>>>>>
if (min(nEK_sip_ins) == 0):
index_list=nEK_sip_ins.nonzero()
#print('nr_SIPs old: ',nr_SIPs)
nEK_sip_ins=nEK_sip_ins[index_list]
mEK_sip_ins=mEK_sip_ins[index_list]
nr_SIPs = nEK_sip_ins.size
#print('nr_SIPs new: ',nr_SIPs)
if (nr_SIPs == 1):
print('only one SIP remains, stop computation of current instance, proceed with next instance' )
print('nu: ', nEK_sip_ins, 'm: ', mEK_sip_ins, 'time:', t, 'it: ', it)
ibreak = 1
#>>>>>>>>save SIP data at specified points in time>>>>>>>>>>>>>
if (it%imod_GVplot == 0) & (it != 0):
nr_SIPs_plot[k,i_GVplot ]= nr_SIPs
nEK_sip_plot[k,i_GVplot,0:nr_SIPs]=nEK_sip_ins
mEK_sip_plot[k,i_GVplot,0:nr_SIPs]=mEK_sip_ins
if (iPM >= 2):
print('SIP output, #, it, time:', i_GVplot, it, it*dt)
np.savetxt(fGV, [nr_SIPs], fmt='%5i')
np.savetxt(fGV, nEK_sip_plot[k,i_GVplot,0:nr_SIPs].reshape(1,nr_SIPs), fmt=outp_format)
np.savetxt(fGV, mEK_sip_plot[k,i_GVplot,0:nr_SIPs].reshape(1,nr_SIPs), fmt=outp_format)
#np.savetxt(fGV, nEK_sip_ins[0:nr_SIPs].reshape(nr_SIPs,1), fmt=outp_format)
#np.savetxt(fGV, mEK_sip_ins[0:nr_SIPs].reshape(nr_SIPs,1), fmt=outp_format)
i_GVplot = i_GVplot+1
#if (ibreak == 1):
#print('break condition met at iteration it = ', it)
#>>>>>>end of time iteration of a single instance>>>>>>>>>
#>>>>>>>>>>analyse computing time>>>>>>>>>>>>>>>>>>>>
currenttime = time.time()
currenttime_str = time.asctime( time.localtime(currenttime))
endtime_expected=starttime+ (nr_inst/(k+1)) * (currenttime-starttime)
endtime_expected_str = time.asctime( time.localtime(endtime_expected))
if (iPM >= 2):
print("Instance {} of {} finished".format(k+1,nr_inst))
print("Start time/Current time/Expected end time: ")
#print(localtime, ' --- ', currenttime_str, ' --- ', endtime_expected_str)
fLog= open('log.txt','a')
fLog.write('total computing time in sec: '+ str(int(currenttime-starttime)) + '\n')
fLog.write(currenttime_str+ ' --- '+ endtime_expected_str+ '\n')
#GCCif (COUNT_COLLS == 1)
cc_sum=count_colls.sum()
fLog.write('a '+"{}".format(cc_sum)+'\n')
fLog.write('b '+" ".join("{}".format(x) for x in count_colls)+'\n')
cc_frac=count_colls/cc_sum
fLog.write('b '+" ".join("{:.4}".format(x) for x in cc_frac)+'\n')
#GCCendif /* (COUNT_COLLS == 1) */
fLog.close()
#<<<<<<<<<<<<<<<<<end of loop of all realisations<<<<<<<<<<<<<<<<<
fMom.close()
fGV.close()
localtime = time.asctime( time.localtime(time.time()) )
print("End time computation:", localtime)
#>>>>>>>>>>>>>>>>Generate meta data>>>>>>>>>>>>>
fMom = open('Moments_meta.dat', 'wb')
np.savetxt(fMom,np.array([dV,skal_m]),fmt = '%10e')
np.savetxt(fMom,np.array([nr_inst,nr_MOMsave]),fmt = '%4d')
np.savetxt(fMom, t_vec_MOMsave.reshape(1,nr_MOMsave),fmt = '%6d')
fMom.close()
fGV = open('SIP_meta.dat', 'wb')
np.savetxt(fGV,np.array([nr_inst,nr_GVplot]),fmt = '%4d')
np.savetxt(fGV, t_vec_GVplot.reshape(1,nr_GVplot),fmt = '%6d')
fGV.close()
#>>>>>>>Generate data file with mean moments>>>>>>>>>>>>>>>
FK.CIO_MOMmean(data_in=MOMsave,fp_out='',skal_m=skal_m,dV=dV)
fLog= open('log.txt','a')
currenttime = time.time()
fLog.write('total computing time in sec: '+ str(int(currenttime-starttime)) + '\n')
fLog.write('finalised\n')
fLog.close()
#----------------------------------------------------------------------------------
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>generate plot for a first analysis>>>>>>>>>>>>>>>>>>>>>>>
for i in range (4): MOMsave[:,:,i] = MOMsave[:,:,i]*skal_m**i/dV
PS.PlotMoments(MOMsave,t_vec_MOMsave)
PS.PlotGV(nEK_sip_plot,mEK_sip_plot,nr_SIPs_plot,t_vec_GVplot,skal_m=skal_m)
#GCCif (IREF >= 2)
PS.PlotRelStdDev(mEK_sip_plot, t_vec_GVplot)
#GCCendif /* (IREF >= 2) */
#<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<end of box model section<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
#=============================== BOX-MODEL =============================================
#GCCendif /* (COLUMN == 0) */
#GCCif (COLUMN == 1)
# start of column model section
#=============================== COLUMN-MODEL =============================================
imod_GVplot=int(t_intervall_GVplot/dt)
nr_GVplot = int(1 + (Tsim-t_start_GVplot)/t_intervall_GVplot)
t_end_GVplot = t_start_GVplot + (nr_GVplot-1)*t_intervall_GVplot
t_vec_GVplot=np.arange(t_start_GVplot,t_end_GVplot+1,t_intervall_GVplot) #times at which SIP data is saved and size distributions can be plotted
imod_MOMsave=int(t_intervall_MOMsave/dt)
nr_MOMsave = int(1 + (Tsim-t_start_MOMsave)/t_intervall_MOMsave)
t_end_MOMsave = t_start_MOMsave + (nr_MOMsave-1)*t_intervall_MOMsave
t_vec_MOMsave=np.arange(t_start_MOMsave,t_end_MOMsave+1,t_intervall_MOMsave)
#usually a finer time grid is used for saving the moment data
print(t_vec_MOMsave)
print(nr_MOMsave)
nr_MOMs=4 # evaluate moments of order 0 to nr_MOMs-1
MOMsave=np.zeros([nr_inst,nr_MOMsave,nz,nr_MOMs])
# track fluxes of moments 0 to nr_MOMs-1 together with SIP number at the lower and upper boundary
# "instantaneous" flux: actually the flux is an average over a time period of length t_intervall_GVplot
# in the "SIP world" with discrete crossings of SIPs across the boundaries it is not reasonable/possible to determine instantaneous fluxes
# at time t=0 all fluxes are set to zero and not output, hence array of length nr_GVplot-1 is used
FluxIn =np.zeros([nr_inst,nr_GVplot-1,nr_MOMs+1])
FluxOut =np.zeros([nr_inst,nr_GVplot-1,nr_MOMs+1])
FluxInAcc =np.zeros([nr_inst,nr_GVplot-1,nr_MOMs+1])
FluxOutAcc=np.zeros([nr_inst,nr_GVplot-1,nr_MOMs+1])
#GCCif (TRACKCENTER == 1)
# last index has 3 components: before aggregation, after aggregation, after sedimentation
zCenterIC_full=np.zeros([nr_inst,nr_MOMsave,nz,nr_MOMs,3])
zCenterSIP_full=np.zeros([nr_inst,nr_MOMsave,nz,nr_MOMs,3])
zCenterIC_av=np.zeros([nr_MOMsave,nr_MOMs,3])
zCenterIC_av_denom=np.zeros([nr_MOMsave,nr_MOMs,3])
zCenterSIP_av=np.zeros([nr_MOMsave,nr_MOMs,3])
zCenterSIP_av_denom=np.zeros([nr_MOMsave,nr_MOMs,3])
nr_SIPs_GB_save=np.zeros([nr_inst,nr_MOMsave,nz])
#GCCendif /* (TRACKCENTER == 1) */
outp_format='%1.6e'
outp_format_long='%1.6e'
outp_format_flux=('%1.4e','%1.4e','%1.4e','%1.4e','%9d')
#generate the general meta data file
fMeta = open('Meta.dat','w')
fMeta.write("{:5d} {:7.3f} {:7.1f} {:5d}\n".format(nz,dz,Tsim,nr_inst))
fMeta.write("{:7.3e} {:7.3e} {:7.3e} {:3d} {:3d} {:3d} \n".format(LWC,r0,xf0,ikernel,i_init_1D,i_process))
fMeta.close()
#generate the meta data files for particular meta data files
fMomMeta = open('Moments_meta.dat', 'wb')
np.savetxt(fMomMeta,np.array([dV,skal_m]),fmt = '%10e')
np.savetxt(fMomMeta,np.array([nr_inst,nr_MOMsave]),fmt = '%4d')
np.savetxt(fMomMeta, t_vec_MOMsave.reshape(1,nr_MOMsave),fmt = '%6d')
fMomMeta.close()
fGVMeta = open('SIP_meta.dat', 'wb')
np.savetxt(fGVMeta,np.array([nr_inst,nr_GVplot]),fmt = '%4d')
np.savetxt(fGVMeta, t_vec_GVplot.reshape(1,nr_GVplot),fmt = '%6d')
fGVMeta.close()
#open output files
fMom = open('Moments.dat', 'wb')
fGV = open('SIP.dat', 'wb')
fLog= open('log.txt','a')
fFluxIn = open('Fluxes_in.dat' , 'wb')
fFluxInAcc = open('Fluxes_in_acc.dat' , 'wb')
fFluxOut = open('Fluxes_out.dat' , 'wb')
fFluxOutAcc = open('Fluxes_out_acc.dat', 'wb')
#GCCif (TRACKCENTER == 1)
fCenterIC_full = open('centerIC_full.dat' , 'wb')
fCenterSIP_full= open('centerSIP_full.dat', 'wb')
fCenterIC_av = open('centerIC_av.dat' , 'wb')
fCenterSIP_av = open('centerSIP_av.dat', 'wb')
#GCCendif /* (TRACKCENTER == 1) */
#GCCif (TRACKOUT == 1)
fGVout = open('SIPout.dat', 'wb')
fGVoutMeta = open('SIPout_meta.dat', 'wb')
fGVoutMeta.close()
#GCCendif /* (TRACKOUT == 1) */
#GCCif (INFLUX_TOP == 1)
fGVin = open('SIPin.dat', 'wb')
fGVinMeta = open('SIPin_meta.dat', 'wb')
fGVinMeta.close()
#GCCendif /* (INFLUX_TOP == 1)*/
starttime = time.time()
localtime = time.asctime( time.localtime(starttime) )
print("Start time computation:", localtime)
fLog.write(os.getcwd()+ '\n')
fLog.write("Start time computation: "+ localtime+ '\n')
fLog.close()
#Instanzschleife
for k in range(0,nr_inst):
if (iPM >= 1): print(os.getcwd())
print('-------------------------------------- new instance ',k,'-------')
fLog= open('log.txt','a')
fLog.write('instance: ' + str(k)+ '\n')
fLog.close()
#GCCif (TRACKOUT == 1)
ntime_SIPout = 0
#GCCendif /* (TRACKOUT == 1) */
#GCCif (INFLUX_TOP == 1)
ntime_SIPin = 0
#GCCendif /* (INFLUX_TOP == 1)*/
i_MOMsave=1
i_GVplot=1
#GCCif (SPEED_VECTOR == 0)
random.seed(32+(k+nr_ins_start)*123433)
#GCCendif /* (SPEED_VECTOR == 0) */
#GCCif (SPEED_VECTOR == 1)
np.random.seed(32+(k+nr_ins_start)*123433)
#GCCendif /* (SPEED_VECTOR == 1) */
count_colls=0
#GCCif (COUNT_COLLS == 1)
count_colls=np.zeros(20,dtype=np.uint64)
#GCCendif /* (COUNT_COLLS == 1) */
###########################################################################
#
# Initialise SIP ensemble
#
###########################################################################
nEK_sip_ins = np.zeros(nr_sip_max)
mEK_sip_ins = np.zeros(nr_sip_max)
zEK_sip_ins = np.zeros(nr_sip_max)+zNan
#GCCif (PROCESS != 2)
wEK_sip_ins = np.zeros(nr_sip_max)
#GCCendif /* (PROCESS != 2)*/
zGBsep = np.array(np.arange(nz+1))*dz
#print('zGBsep',zGBsep)
iSIP_GBsep = np.zeros(nz+1,dtype='int')
nr_SIPs_GB = np.zeros(nz,dtype='int')
count_save = 0
# ColumnModel-Initialization
# ! i_init:
# ! 1 = empty domain
# ! 2 = top GB only
# ! 3 = top half domain
# ! 4 = linearly decaying over total column from 2*g_init to 0
# ! 5 = linearly decaying over top half from 2*g_init to 0
# ! 6 = total domain
# ! 7 = linearly decaying over top quarter from 2*g_init to 0
# ! 8 = sin()-hill over top half with 2*g_init max peak
# ! 9 = sin()-hill over top quarter with 2*g_init max peak
if (i_init_1D == 1): i_start = nz + 1
if (i_init_1D == 2): i_start = nz
if (i_init_1D == 3): i_start = nz / 2
if (i_init_1D == 4): i_start = 1
if (i_init_1D == 5): i_start = nz / 2
if (i_init_1D == 6): i_start = 1
if (i_init_1D == 7): i_start = 3 * nz / 4
if (i_init_1D == 8): i_start = nz / 2
if (i_init_1D == 9): i_start = 3 * nz / 4
i_start = i_start -1 # above block analagous to block in Botts F77 programm. Be aware that python indices start at 0, not at 1 like in Fortran
for iz in range(0,nz):
#print('init row ', iz,i_init_1D,i_start)
if (i_init_1D <= 3) or (i_init_1D == 6): N0_pick = N0
if (i_init_1D == 4) or (i_init_1D == 5) or (i_init_1D == 7) or (i_init_1D == 8) or (i_init_1D == 9):
izp=iz+1
if (i_init_1D == 4): gew = min(1.0,(1.0*izp/nz))
if (i_init_1D == 5): gew = max(0.0,min(1.0,(2.0*(1.0*izp/nz-0.5))))
if (i_init_1D == 7): gew = max(0.0,min(1.0,(4.0*(1.0*izp/nz-0.75))))
if (i_init_1D == 8): gew = math.sin(max(0.0,min(1.0,(2.0*(1.0*(izp - 0.5 )/nz-0.50))))*3.14)
if (i_init_1D == 9): gew = math.sin(max(0.0,min(1.0,(4.0*(1.0*(izp - 0.5 )/nz-0.75))))*3.14)
#print('init ', iz, gew)
N0_pick = N0 * 2 * gew
if (iz >= i_start):
[nr_SIPs,nEK_sip_tmp,mEK_sip_tmp,EK_krit,mdelta,mfix]=SI.InitSIP_ExpVert_singleSIP_WS(imlow,n10,r10,min10,eta_nu,xf0,N0_pick,dV,nr_sip_max,dV_skal=dV_skal)
print(iz,nz,'iz,nz')
# print('gew: ', gew)
#[nr_SIPs,nEK_sip_tmp,mEK_sip_tmp]=SI.InitSIP_uniform(n10,r10,min10,const_mass2rad, gew=gew)
nr_SIPs_GB[iz] = nr_SIPs
if (count_save+nr_SIPs > nr_sip_max):
print(count_save+nr_SIPs,nr_sip_max,iz)
sys.exit("aktuelle SIP-Anzahl groesser als nr_sip_max")
nEK_sip_ins[count_save:count_save+nr_SIPs]=nEK_sip_tmp[0:nr_SIPs]
mEK_sip_ins[count_save:count_save+nr_SIPs]=mEK_sip_tmp[0:nr_SIPs]
#print('count_save,nr_SIPs',count_save,nr_SIPs)
for j in range(0,nr_SIPs):
zEK_sip_ins[count_save+j]=(iz+random.random())*dz
#zEK_sip_ins[count_save+j]=(i+0.5)*dz
else:
nr_SIPs = 0
iSIP_GBsep[iz]=count_save
count_save += nr_SIPs
# total SIP number after end of column initialization
nr_SIPs_tot=count_save
#print('final nr_SIPs',count_save,nr_SIPs)
iSIP_GBsep[nz]=count_save
#iz=nz
#print(iz,iSIP_GBsep[iz],zGBsep[iz])
nr_SIPs_GB_max=nr_SIPs_GB.max()
rSIP_tmp=FK.m2r(mEK_sip_ins[:nr_SIPs_tot],const_mass2rad)
#GCCif (PROCESS != 2)
wEK_sip_ins[:nr_SIPs_tot]=SD.Fallg(rSIP_tmp)
#GCCendif /*(PROCESS != 2)*/
#print('r',rSIP_tmp)
#print('w',wEK_sip_ins[:nr_SIPs_tot])
#output at initialization
for iz in range(0,nz):
nr_SIPs = nr_SIPs_GB[iz]
ia=iSIP_GBsep[iz]
ie=iSIP_GBsep[iz+1]
#Output
if (nr_SIPs > 0): MOMsave[k,0,iz,:] = FK.Moments_k0_3(nEK_sip_ins[ia:ie],mEK_sip_ins[ia:ie])
if (iPM >= 1): print('initial moments: ', MOMsave[k,0,iz,:])
if (iPM >= 1): print('nr_SIPs: ', nr_SIPs)
np.savetxt(fMom, MOMsave[k,0,iz,:].reshape(1,4), fmt=outp_format_long) # reshape necessary to write all 4 values in one row
np.savetxt(fGV, np.array([nr_SIPs,iz]).reshape(1,2), fmt='%6i')
if (nr_SIPs > 0):
np.savetxt(fGV, nEK_sip_ins[ia:ie].reshape(1,nr_SIPs), fmt=outp_format)
np.savetxt(fGV, mEK_sip_ins[ia:ie].reshape(1,nr_SIPs), fmt=outp_format)
np.savetxt(fGV, zEK_sip_ins[ia:ie].reshape(1,nr_SIPs), fmt=outp_format)
fGVMeta = open('SIP_meta.dat', 'a')
fGVMeta.write("\n{:7d} {:5d}\n".format(nr_SIPs_GB.sum(),nr_SIPs_GB.max()))
fGVMeta.close()
#prepare SIP processing
#sort SIPs by z-position and find index range of each grid box
#GCCif (INFLUX_TOP != 1)
perm_sort=
|
np.argsort(zEK_sip_ins[:nr_SIPs_tot+1])
|
numpy.argsort
|
# Quantify the dots to select the best quantifitcation
# Will take mid pixel, mid 9 pixels and mic 25 pixels and divide them by the corners.
# bsub -q short -W 4:00 -R "rusage[mem=50000]" -oo multiple_dot_lists_quantify_corners_HFF_mean_density.out -eo multiple_dot_lists_quantify_corners_HFF_mean_density.err 'python multiple_dot_lists_quantify_corners_HFF_mean_density.py'
# %matplotlib inline
from matplotlib.gridspec import GridSpec
import matplotlib.pyplot as plt
import matplotlib as mpl
import seaborn as sns
mpl.style.use('seaborn-white')
import multiprocess as mp
import numpy as np
import pandas as pd
import bioframe
import cooltools
import cooler
#import bbi
from cooltools import snipping
import sys
import seaborn as sns
import csv
def pileup_multiple_dot_lists(cool_file,dot_file_list, exp_cool,resolution,flank,anchor_dist,anchor_flank,plot_name):
i=0
filename1=cool_file[0].split("/")[-2].split("_hg38")[0]
filename2=cool_file[1].split("/")[-2].split("_hg38")[0]
filename3=cool_file[2].split("/")[-2].split("_hg38")[0]
cool = [filename1,filename2,filename3]
exp_cool = [exp_cool[0], exp_cool[1], exp_cool[2]]
conditions = ['HiC-FA-DpnII', 'HiC-DSG-DpnII','MicroC-DSG-MNase']
print(filename1)
print(filename2)
print(filename3)
resolution=resolution
flank = flank
#resolution=sys.argv[4]
hg38 = bioframe.fetch_chromsizes('hg38')
chromsizes = bioframe.fetch_chromsizes('hg38')
chromosomes = list(chromsizes.index)
binsize = resolution
cooler_paths = {
'HiC-FA-DpnII' : cool_file[0],
'HiC-DSG-DpnII' : cool_file[1],
'MicroC-DSG-MNase' : cool_file[2],
}
exp_paths = {
'HiC-FA-DpnII' : exp_cool[0],
'HiC-DSG-DpnII' : exp_cool[1],
'MicroC-DSG-MNase' : exp_cool[2],
}
long_names = {
'HiC-FA-DpnII': 'HiC-FA-DpnII',
'HiC-DSG-DpnII': 'HiC-DSG-DpnII',
'MicroC-DSG-MNase': 'MicroC-DSG-MNase',
}
pal = sns.color_palette('colorblind')
colors = {
filename1: pal[0],
filename2 : '#333333',
filename3: pal[2],
}
clrs = {
cond: cooler.Cooler(cooler_paths[cond]) for cond in conditions
}
anchor_dist = anchor_dist
anchor_flank = flank
# dot file list
gs = plt.GridSpec(nrows=len(conditions), ncols=len(dot_file_list) + 1)
plt.figure(figsize=(6 * len(conditions)+1, 7))
mean_list={}
for dot_file in dot_file_list:
print(dot_file)
sites = pd.read_table(dot_file)
mid1=(sites['start1']+sites['end1'])/2
mid2=(sites['start2']+sites['end2'])/2
new_file=pd.DataFrame()
new_file = pd.concat([sites['chrom1'],mid1,sites['chrom2'],mid2],axis=1)
# "convergent" orientation of paired CTCF motifs
# sites = sites[(sites['strand1'] == '+') & (sites['strand2'] == '-')] ## not working
new_file.columns=['chrom1','mid1','chrom2','mid2']
#print(len(new_file))
new_file.head()
supports = [(chrom, 0, chromsizes[chrom]) for chrom in chromosomes]
snippet_flank = flank
windows1 = snipping.make_bin_aligned_windows(
binsize,
new_file['chrom1'],
new_file['mid1'],
flank_bp=snippet_flank)
# windows1['strand'] = sites['strand1']
windows2 = snipping.make_bin_aligned_windows(
binsize,
new_file['chrom2'],
new_file['mid2'],
flank_bp=snippet_flank)
windows = pd.merge(windows1, windows2, left_index=True, right_index=True, suffixes=('1', '2'))
windows = snipping.assign_regions(windows, supports)
windows = windows.dropna()
windows.head()
stacks = {}
piles = {}
# mid point distplot
k=0
r_list=[]
mean_1=[]
for cond in conditions:
expected = pd.read_table(exp_paths[cond])
snipper = snipping.ObsExpSnipper(clrs[cond], expected)
#print(snipper)
stack = snipping.pileup(windows, snipper.select, snipper.snip)
stacks[cond] = stack
piles[cond] = np.nanmean(stack, axis=2)
mid_pixel_norm=[]
sq_size=piles[cond].shape[0]
midpoint=np.int(np.floor(sq_size/2))
background_size_start=np.int(np.ceil(sq_size*40/100))
background_size_end=np.int(np.floor(sq_size*60/100))
print(midpoint)
print(background_size_start)
print(background_size_end)
slice_ = piles[cond]
# mid point of each dot
mid_pixel=slice_[midpoint,midpoint]
#mid_list_9pixels=np.nanmean(slice_[midpoint-1:midpoint+2,midpoint-1:midpoint+2])
# upper left
up_left=np.nanmean(slice_[:background_size_start,:background_size_start])
# upper right
up_right=np.nanmean(slice_[:background_size_start,background_size_end:])
# upper left
lower_left=np.nanmean(slice_[background_size_end:,:background_size_start])
# upper right
lower_right=np.nanmean(slice_[background_size_end:,background_size_end:])
# mid point of each dot
mid_pixel=slice_[midpoint,midpoint]
# Stripe up
stripe_up=np.nanmean(slice_[:background_size_start,background_size_start:background_size_end])
# stripe down
stripe_down=np.nanmean(slice_[background_size_end:,background_size_start:background_size_end])
# stripe left
stripe_left=np.nanmean(slice_[background_size_start:background_size_end,:background_size_start])
# stripe right
stripe_right=np.nanmean(slice_[background_size_start:background_size_end,background_size_end:])
stripes_mean=(stripe_up+stripe_right)/2
corners_mean=(up_left+up_right+lower_right)/3
mid_pixel_norm.append(mid_pixel/((stripes_mean+corners_mean)/2))
#mid_pixel_norm.append(mid_list_9pixels/((stripes_mean+corners_mean)/2))
ax = plt.subplot(gs[k,i])
new_list=mid_pixel_norm
m=
|
np.mean(new_list)
|
numpy.mean
|
'''
TODO:
* If uniform grid read grid cell size
* If
'''
import numpy as np
import datetime
from pyd3d.utils import formatSci, formatInt
class Grid(object):
"""Create a Delft3D grid file
Examples
--------
Create an empty grid
>>> grid = Grid()
Load a grid from file
>>> grid = Grid.read('filename.grd')
Write grid to file
>>> Grid.write(grid,'filename.grd')
"""
def __init__(self, **kwargs):
self.properties = kwargs.get("properties", {})
self.shape = kwargs.get("shape", None)
self.x = kwargs.get("x", None)
self.y = kwargs.get("y", None)
# following two for rectilinear grids!
self.x_gridstep = kwargs.get("x_gridstep", None)
self.y_gridstep = kwargs.get("y_gridstep", None)
def newRectilinear(self):
"""Makes rectilinear grid with numpy meshgrid"""
print("------ Making new Delft3D grid ------")
print("x_gridstep", self.x_gridstep)
print("y_gridstep", self.y_gridstep)
print("width", self.width)
print("length", self.length)
if self.width % self.x_gridstep:
raise Exception("Width is not a multiple of x_gridstep")
if self.length % self.y_gridstep:
raise Exception("Length is not a multiple of y_gridstep")
x_gridstep = self.x_gridstep
y_gridstep = self.y_gridstep
xList = np.array([i for i in range(0, self.width + x_gridstep, x_gridstep)])
yList = np.array([i for i in range(0, self.length + y_gridstep, y_gridstep)]) + 100 # + 100 is default start y in REFGRID
xDim, yDim = [len(xList), len(yList)]
print(f"MNKmax = {xDim + 1} {yDim + 1} SIGMALAYERS") # to mdf file
print("xDim", xDim)
print("yDim", yDim)
self.shape.append([xDim, yDim])
x_grid, y_grid =
|
np.meshgrid(xList, yList)
|
numpy.meshgrid
|
# Copyright (c) 2018, NVIDIA CORPORATION.
import numpy as np
import pandas as pd
try:
from distributed.protocol import serialize, deserialize
_have_distributed = True
except ImportError:
_have_distributed = False
import pytest
import cudf
from cudf.tests import utils
require_distributed = pytest.mark.skipif(not _have_distributed,
reason='no distributed')
@require_distributed
def test_serialize_dataframe():
df = cudf.DataFrame()
df['a'] = np.arange(100)
df['b'] = np.arange(100, dtype=np.float32)
df['c'] = pd.Categorical(['a', 'b', 'c', '_', '_'] * 20,
categories=['a', 'b', 'c'])
outdf = deserialize(*serialize(df))
pd.util.testing.assert_frame_equal(df.to_pandas(), outdf.to_pandas())
@require_distributed
def test_serialize_dataframe_with_index():
df = cudf.DataFrame()
df['a'] = np.arange(100)
df['b'] =
|
np.random.random(100)
|
numpy.random.random
|
import numpy as np
from statsmodels.genmod.bayes_mixed_glm import (BinomialBayesMixedGLM,
PoissonBayesMixedGLM)
import pandas as pd
from scipy import sparse
from numpy.testing import assert_allclose, assert_equal
from scipy.optimize import approx_fprime
def gen_simple_logit(nc, cs, s):
np.random.seed(3799)
exog_vc = np.kron(np.eye(nc), np.ones((cs, 1)))
exog_fe = np.random.normal(size=(nc * cs, 2))
vc = s * np.random.normal(size=nc)
lp = np.dot(exog_fe, np.r_[1, -1]) + np.dot(exog_vc, vc)
pr = 1 / (1 + np.exp(-lp))
y = 1 * (np.random.uniform(size=nc * cs) < pr)
ident = np.zeros(nc, dtype=np.int)
return y, exog_fe, exog_vc, ident
def gen_simple_poisson(nc, cs, s):
np.random.seed(3799)
exog_vc = np.kron(np.eye(nc), np.ones((cs, 1)))
exog_fe = np.random.normal(size=(nc * cs, 2))
vc = s * np.random.normal(size=nc)
lp = np.dot(exog_fe, np.r_[0.1, -0.1]) + np.dot(exog_vc, vc)
r = np.exp(lp)
y = np.random.poisson(r)
ident = np.zeros(nc, dtype=np.int)
return y, exog_fe, exog_vc, ident
def gen_crossed_logit(nc, cs, s1, s2):
np.random.seed(3799)
a = np.kron(np.eye(nc), np.ones((cs, 1)))
b = np.kron(np.ones((cs, 1)), np.eye(nc))
exog_vc = np.concatenate((a, b), axis=1)
exog_fe = np.random.normal(size=(nc * cs, 1))
vc = s1 * np.random.normal(size=2 * nc)
vc[nc:] *= s2 / s1
lp = np.dot(exog_fe, np.r_[-0.5]) + np.dot(exog_vc, vc)
pr = 1 / (1 + np.exp(-lp))
y = 1 * (np.random.uniform(size=nc * cs) < pr)
ident = np.zeros(2 * nc, dtype=np.int)
ident[nc:] = 1
return y, exog_fe, exog_vc, ident
def gen_crossed_poisson(nc, cs, s1, s2):
np.random.seed(3799)
a = np.kron(np.eye(nc), np.ones((cs, 1)))
b = np.kron(np.ones((cs, 1)), np.eye(nc))
exog_vc = np.concatenate((a, b), axis=1)
exog_fe = np.random.normal(size=(nc * cs, 1))
vc = s1 * np.random.normal(size=2 * nc)
vc[nc:] *= s2 / s1
lp = np.dot(exog_fe, np.r_[-0.5]) + np.dot(exog_vc, vc)
r = np.exp(lp)
y = np.random.poisson(r)
ident = np.zeros(2 * nc, dtype=np.int)
ident[nc:] = 1
return y, exog_fe, exog_vc, ident
def gen_crossed_logit_pandas(nc, cs, s1, s2):
np.random.seed(3799)
a = np.kron(np.arange(nc), np.ones(cs))
b = np.kron(np.ones(cs), np.arange(nc))
fe = np.ones(nc * cs)
vc = np.zeros(nc * cs)
for i in np.unique(a):
ii = np.flatnonzero(a == i)
vc[ii] += s1 * np.random.normal()
for i in np.unique(b):
ii = np.flatnonzero(b == i)
vc[ii] += s2 * np.random.normal()
lp = -0.5 * fe + vc
pr = 1 / (1 + np.exp(-lp))
y = 1 * (np.random.uniform(size=nc * cs) < pr)
ident = np.zeros(2 * nc, dtype=np.int)
ident[nc:] = 1
df = pd.DataFrame({"fe": fe, "a": a, "b": b, "y": y})
return df
def test_simple_logit_map():
y, exog_fe, exog_vc, ident = gen_simple_logit(10, 10, 2)
exog_vc = sparse.csr_matrix(exog_vc)
glmm = BinomialBayesMixedGLM(y, exog_fe, exog_vc, ident, vcp_p=0.5)
rslt = glmm.fit_map()
assert_allclose(
glmm.logposterior_grad(rslt.params),
np.zeros_like(rslt.params),
atol=1e-3)
# Test the predict method
for linear in False, True:
for exog in None, exog_fe:
pr1 = rslt.predict(linear=linear, exog=exog)
pr2 = glmm.predict(rslt.params, linear=linear, exog=exog)
assert_allclose(pr1, pr2)
if not linear:
assert_equal(pr1.min() >= 0, True)
assert_equal(pr1.max() <= 1, True)
def test_simple_poisson_map():
y, exog_fe, exog_vc, ident = gen_simple_poisson(10, 10, 0.2)
exog_vc = sparse.csr_matrix(exog_vc)
glmm1 = PoissonBayesMixedGLM(y, exog_fe, exog_vc, ident, vcp_p=0.5)
rslt1 = glmm1.fit_map()
assert_allclose(
glmm1.logposterior_grad(rslt1.params),
np.zeros_like(rslt1.params),
atol=1e-3)
# This should give the same answer as above
glmm2 = PoissonBayesMixedGLM(y, exog_fe, exog_vc, ident, vcp_p=0.5)
rslt2 = glmm2.fit_map()
assert_allclose(rslt1.params, rslt2.params, atol=1e-4)
# Test the predict method
for linear in False, True:
for exog in None, exog_fe:
pr1 = rslt1.predict(linear=linear, exog=exog)
pr2 = rslt2.predict(linear=linear, exog=exog)
pr3 = glmm1.predict(rslt1.params, linear=linear, exog=exog)
pr4 = glmm2.predict(rslt2.params, linear=linear, exog=exog)
assert_allclose(pr1, pr2, rtol=1e-5)
assert_allclose(pr2, pr3, rtol=1e-5)
assert_allclose(pr3, pr4, rtol=1e-5)
if not linear:
assert_equal(pr1.min() >= 0, True)
assert_equal(pr2.min() >= 0, True)
assert_equal(pr3.min() >= 0, True)
# Check dimensions and PSD status of cov_params
for rslt in rslt1, rslt2:
cp = rslt.cov_params()
p = len(rslt.params)
assert_equal(cp.shape, np.r_[p, p])
np.linalg.cholesky(cp)
def test_crossed_logit_map():
y, exog_fe, exog_vc, ident = gen_crossed_logit(10, 10, 1, 2)
exog_vc = sparse.csr_matrix(exog_vc)
glmm = BinomialBayesMixedGLM(y, exog_fe, exog_vc, ident, vcp_p=0.5)
rslt = glmm.fit_map()
assert_allclose(
glmm.logposterior_grad(rslt.params),
np.zeros_like(rslt.params),
atol=1e-4)
# Check dimensions and PSD status of cov_params
cp = rslt.cov_params()
p = len(rslt.params)
assert_equal(cp.shape, np.r_[p, p])
np.linalg.cholesky(cp)
def test_crossed_poisson_map():
y, exog_fe, exog_vc, ident = gen_crossed_poisson(10, 10, 1, 1)
exog_vc = sparse.csr_matrix(exog_vc)
glmm = PoissonBayesMixedGLM(y, exog_fe, exog_vc, ident, vcp_p=0.5)
rslt = glmm.fit_map()
assert_allclose(
glmm.logposterior_grad(rslt.params),
np.zeros_like(rslt.params),
atol=1e-4)
# Check dimensions and PSD status of cov_params
cp = rslt.cov_params()
p = len(rslt.params)
assert_equal(cp.shape, np.r_[p, p])
np.linalg.cholesky(cp)
def test_logit_map_crossed_formula():
data = gen_crossed_logit_pandas(10, 10, 1, 0.5)
fml = "y ~ fe"
fml_vc = {"a": "0 + C(a)", "b": "0 + C(b)"}
glmm = BinomialBayesMixedGLM.from_formula(fml, fml_vc, data, vcp_p=0.5)
rslt = glmm.fit_map()
assert_allclose(
glmm.logposterior_grad(rslt.params),
np.zeros_like(rslt.params),
atol=1e-4)
rslt.summary()
r = rslt.random_effects("a")
assert_allclose(
r.iloc[0, :].values, np.r_[-0.02004904, 0.094014], atol=1e-4)
# Check dimensions and PSD status of cov_params
cm = rslt.cov_params()
p = rslt.params.shape[0]
assert_equal(list(cm.shape), [p, p])
np.linalg.cholesky(cm)
def test_elbo_grad():
for f in range(2):
for j in range(2):
if f == 0:
if j == 0:
y, exog_fe, exog_vc, ident = gen_simple_logit(10, 10, 2)
else:
y, exog_fe, exog_vc, ident = gen_crossed_logit(
10, 10, 1, 2)
elif f == 1:
if j == 0:
y, exog_fe, exog_vc, ident = gen_simple_poisson(
10, 10, 0.5)
else:
y, exog_fe, exog_vc, ident = gen_crossed_poisson(
10, 10, 1, 0.5)
exog_vc = sparse.csr_matrix(exog_vc)
if f == 0:
glmm1 = BinomialBayesMixedGLM(
y, exog_fe, exog_vc, ident, vcp_p=0.5)
else:
glmm1 = PoissonBayesMixedGLM(
y, exog_fe, exog_vc, ident, vcp_p=0.5)
rslt1 = glmm1.fit_map()
for k in range(3):
if k == 0:
vb_mean = rslt1.params
vb_sd = np.ones_like(vb_mean)
elif k == 1:
vb_mean = np.zeros(len(vb_mean))
vb_sd = np.ones_like(vb_mean)
else:
vb_mean = np.random.normal(size=len(vb_mean))
vb_sd = np.random.uniform(1, 2, size=len(vb_mean))
mean_grad, sd_grad = glmm1.vb_elbo_grad(vb_mean, vb_sd)
def elbo(vec):
n = len(vec) // 2
return glmm1.vb_elbo(vec[:n], vec[n:])
x = np.concatenate((vb_mean, vb_sd))
g1 = approx_fprime(x, elbo, 1e-5)
n = len(x) // 2
mean_grad_n = g1[:n]
sd_grad_n = g1[n:]
assert_allclose(mean_grad, mean_grad_n, atol=1e-2, rtol=1e-2)
assert_allclose(sd_grad, sd_grad_n, atol=1e-2, rtol=1e-2)
def test_simple_logit_vb():
y, exog_fe, exog_vc, ident = gen_simple_logit(10, 10, 0)
exog_vc = sparse.csr_matrix(exog_vc)
glmm1 = BinomialBayesMixedGLM(
y, exog_fe, exog_vc, ident, vcp_p=0.5, fe_p=0.5)
rslt1 = glmm1.fit_map()
glmm2 = BinomialBayesMixedGLM(
y, exog_fe, exog_vc, ident, vcp_p=0.5, fe_p=0.5)
rslt2 = glmm2.fit_vb(rslt1.params)
rslt1.summary()
rslt2.summary()
assert_allclose(
rslt1.params[0:5],
np.r_[0.75330405, -0.71643228, -2.49091288, -0.00959806, 0.00450254],
rtol=1e-4,
atol=1e-4)
assert_allclose(
rslt2.params[0:5],
np.r_[0.79338836, -0.7599833, -0.64149356, -0.24772884, 0.10775366],
rtol=1e-4,
atol=1e-4)
for rslt in rslt1, rslt2:
cp = rslt.cov_params()
p = len(rslt.params)
if rslt is rslt1:
assert_equal(cp.shape, np.r_[p, p])
np.linalg.cholesky(cp)
else:
assert_equal(cp.shape, np.r_[p,])
assert_equal(cp > 0, True*np.ones(p))
def test_simple_poisson_vb():
y, exog_fe, exog_vc, ident = gen_simple_poisson(10, 10, 1)
exog_vc = sparse.csr_matrix(exog_vc)
glmm1 = PoissonBayesMixedGLM(y, exog_fe, exog_vc, ident, vcp_p=0.5)
rslt1 = glmm1.fit_map()
glmm2 = PoissonBayesMixedGLM(y, exog_fe, exog_vc, ident, vcp_p=0.5)
rslt2 = glmm2.fit_vb(rslt1.params)
rslt1.summary()
rslt2.summary()
assert_allclose(
rslt1.params[0:5],
np.r_[-0.07233493, -0.06706505, -0.47159649, 1.12575122, -1.02442201],
rtol=1e-4,
atol=1e-4)
assert_allclose(
rslt1.cov_params().flat[0:5],
np.r_[0.00790914, 0.00080666, -0.00050719, 0.00022648, 0.00046235],
rtol=1e-4,
atol=1e-4)
assert_allclose(
rslt2.params[0:5],
np.r_[-0.07088814, -0.06373107, -0.22770786, 1.12923746, -1.26161339],
rtol=1e-4,
atol=1e-4)
assert_allclose(
rslt2.cov_params()[0:5],
np.r_[0.00747782, 0.0092554, 0.04508904, 0.02934488, 0.20312746],
rtol=1e-4,
atol=1e-4)
for rslt in rslt1, rslt2:
cp = rslt.cov_params()
p = len(rslt.params)
if rslt is rslt1:
|
assert_equal(cp.shape, np.r_[p, p])
|
numpy.testing.assert_equal
|
# !/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 24 14:24:15 2018
@author: cham
"""
import bisect
import collections
from itertools import product
import numpy as np
from emcee import EnsembleSampler
from laspec.qconv import conv_spec_Gaussian, conv_spec_Rotation
from laspec.normalization import normalize_spectrum_spline
from laspec.ccf import RVM
from scipy.optimize import least_squares
from .regress import costfun, default_lnlike, best_match
# speed of light
SOL = 299792.458
def rand_pos(p0, nwalkers, eps=.1):
p0 = np.array(p0)
# do a 0.1 mag rand
return [p0 + (np.random.rand(*p0.shape)-0.5)*2*eps for i in range(nwalkers)]
def grid_to_meshflat(*grids):
""" convert a grid to meshflat arrays """
# get dimensions
Ns = [len(_) for _ in grids]
# make indices of grids
grids_ind = [np.arange(_, dtype=np.int) for _ in Ns]
# make mesh_grids
meshs = np.meshgrid(*grids)
# make mesh_ind
meshs_ind = np.meshgrid(*grids_ind)
# make flat_mesh_grids
flats = np.array([_.flatten() for _ in meshs]).T
# make flat_mesh_ind
flats_ind = np.array([_.flatten() for _ in meshs_ind]).T
# make a dict of flat_mesh_ind:i
ind_dict = dict()
for i in range(len(flats_ind)):
ind_dict[tuple(flats_ind[i])] = i
return grids, grids_ind, meshs, meshs_ind, flats, flats_ind, ind_dict
def bisect_interval(edges=[1, 2, 3], x=.1):
""" return the nearest edges using bisect """
if edges[0] < x <= edges[-1]:
_ = bisect.bisect_left(edges, x)
return _ - 1, _
elif edges[0] == x:
return 0, 1
else:
# null value, returned when x is not in bounds
return -9, -9
class Regli:
""" Regular Grid Linear Interpolator """
wave = None
def __init__(self, *grids):
self.ndim = len(grids)
_ = grid_to_meshflat(*grids)
self.grids = _[0]
self.grids_ind = _[1]
self.meshs = _[2]
self.meshs_ind = _[3]
self.flats = _[4]
self.flats_ind = _[5]
self.ind_dict = _[6]
self.grid_shape = tuple([len(g) for g in grids])
# set values
self.set_values(np.array([]), test=True)
# self.value_shape = 1
# self.set_values(values)
# define a subset for best match
self.best_match_mask = None
# mean error
self.me = 0.
# pre-proc parameters
self.set_pre_proc_ind()
def __repr__(self):
s = "\n".join([
"==== regli.Regli instance ====",
"".join(["grid shape: ", self.grid_shape.__repr__()]),
"".join(["value shape: ", self.values.shape.__repr__()]),
"==============================",
])
return s
def set_pre_proc_ind(self, pre_proc_ind=None):
if pre_proc_ind is None:
self.pre_proc_ind = pre_proc_ind
else:
self.pre_proc_ind = list(pre_proc_ind)
def pre_proc(self, pos):
if self.pre_proc_ind is None:
return np.array(pos)
else:
return np.array(pos)[list(self.pre_proc_ind)]
@staticmethod
def init_from_flats(input_flats, verbose=False):
""" try to parse flats and return a Regli instance
This is a slow process and takes 86 min to parse a 400,000 point grid.
Parameters
----------
input_flats:
flats
verbose:
if True, print verbose info
Return
------
"""
# determine n_dim
ndim = input_flats.shape[1]
# determine grid
grids = [np.unique(input_flats[:, i]) for i in range(ndim)]
# initiat Regli
r = Regli(*grids)
# determine eps
eps_auto = np.min([np.min(np.diff(grid)) for grid in grids]) * 0.3
# determine n_flats [number of grid points]
nrow = np.prod(r.grid_shape)
# ind_values = np.zeros((nrow,), np.int)
c_missing = 0
for i in range(len(r.flats)):
# for each flat
flat_ = r.flats[i]
flat_ind_ = r.flats_ind[i]
# evaluate norm, see if data exists
norm_ = np.linalg.norm(input_flats - flat_, np.inf, axis=1)
if np.min(norm_) < eps_auto:
# data exists
r.ind_dict[tuple(flat_ind_)] = np.argmin(norm_)
else:
# data missing
r.ind_dict[tuple(flat_ind_)] = None
c_missing += 1
if verbose:
print("@Regli: grid value missing --> ", flat_)
print("@Regli: {} values missing!".format(c_missing))
return r
@property
def rgi_shape(self):
return (*self.grid_shape, self.value_shape)
def set_values(self, values, test=False):
values = np.array(values)
assert values.ndim in (1, 2)
if values.ndim == 2:
self.values = np.array(values)
self.value_shape = self.values.shape[1]
self.set_wave(np.arange(self.values.shape[1])) # only available when 2D
elif values.ndim == 1:
if not test:
assert len(values) == len(self.flats)
self.values = np.array(values.reshape(-1, 1))
self.value_shape = 1
else:
raise ValueError("Values shape not correct!")
def interpns(self, poss):
return np.array([self.interpn(pos) for pos in poss])
def interp2(self, pos, null_value=np.nan):
e1 = bisect_interval(self.grids[0], pos[0])
e2 = bisect_interval(self.grids[1], pos[1])
if e1[0] < -1 or e2[0] < -1:
# out of bounds
return np.ones((self.values.shape[1],)) * null_value
else:
# calculate nodes
p1_0 = self.grids[0][e1[0]]
p1_1 = self.grids[0][e1[1]]
p2_0 = self.grids[1][e2[0]]
p2_1 = self.grids[1][e2[1]]
v_tot = (p1_1 - p1_0) * (p2_1 - p2_0)
v_00 = (p1_1 - pos[0]) * (p2_1 - pos[1])
v_01 = (p1_1 - pos[0]) * (pos[1] - p2_0)
v_10 = (pos[0] - p1_0) * (p2_1 - pos[1])
v_11 = (pos[0] - p1_0) * (pos[1] - p2_0)
# v_000+v_001+v_010+v_011+v_100+v_101+v_110+v_111
i_00 = self.ind_dict[e1[0], e2[0]]
i_01 = self.ind_dict[e1[0], e2[1]]
i_10 = self.ind_dict[e1[1], e2[0]]
i_11 = self.ind_dict[e1[1], e2[1]]
if None in (i_00, i_01, i_10, i_11):
return np.ones((self.values.shape[1],)) * null_value
w = np.array([v_00, v_01, v_10, v_11]).reshape(-1, 1) / v_tot
value_interp = np.sum(self.values[np.array([i_00, i_01, i_10, i_11])] * w, axis=0)
return value_interp
def interp3(self, pos, null_value=np.nan):
e1 = bisect_interval(self.grids[0], pos[0])
e2 = bisect_interval(self.grids[1], pos[1])
e3 = bisect_interval(self.grids[2], pos[2])
if e1[0] < -1 or e2[0] < -1 or e3[0] < -1:
# out of bounds
return np.ones((self.values.shape[1],)) * null_value
else:
# calculate nodes
p1_0 = self.grids[0][e1[0]]
p1_1 = self.grids[0][e1[1]]
p2_0 = self.grids[1][e2[0]]
p2_1 = self.grids[1][e2[1]]
p3_0 = self.grids[2][e3[0]]
p3_1 = self.grids[2][e3[1]]
v_tot = (p1_1 - p1_0) * (p2_1 - p2_0) * (p3_1 - p3_0)
v_000 = (p1_1 - pos[0]) * (p2_1 - pos[1]) * (p3_1 - pos[2])
v_100 = (pos[0] - p1_0) * (p2_1 - pos[1]) * (p3_1 - pos[2])
v_010 = (p1_1 - pos[0]) * (pos[1] - p2_0) * (p3_1 - pos[2])
v_110 = (pos[0] - p1_0) * (pos[1] - p2_0) * (p3_1 - pos[2])
v_001 = (p1_1 - pos[0]) * (p2_1 - pos[1]) * (pos[2] - p3_0)
v_101 = (pos[0] - p1_0) * (p2_1 - pos[1]) * (pos[2] - p3_0)
v_011 = (p1_1 - pos[0]) * (pos[1] - p2_0) * (pos[2] - p3_0)
v_111 = (pos[0] - p1_0) * (pos[1] - p2_0) * (pos[2] - p3_0)
# v_000+v_001+v_010+v_011+v_100+v_101+v_110+v_111
i_000 = self.ind_dict[e1[0], e2[0], e3[0]]
i_001 = self.ind_dict[e1[0], e2[0], e3[1]]
i_010 = self.ind_dict[e1[0], e2[1], e3[0]]
i_011 = self.ind_dict[e1[0], e2[1], e3[1]]
i_100 = self.ind_dict[e1[1], e2[0], e3[0]]
i_101 = self.ind_dict[e1[1], e2[0], e3[1]]
i_110 = self.ind_dict[e1[1], e2[1], e3[0]]
i_111 = self.ind_dict[e1[1], e2[1], e3[1]]
if None in (i_000, i_001, i_010, i_011, i_100, i_101, i_110, i_111):
return np.ones((self.values.shape[1],)) * null_value
w = np.array([v_000, v_001, v_010, v_011, v_100, v_101, v_110, v_111]).reshape(-1, 1) / v_tot
# specs = spec[np.array([i_000, i_001, i_010, i_011, i_100, i_101, i_110, i_111])]
# figure();plot(spec_wm);plot(specs.T)
value_interp = np.sum(self.values[np.array([i_000, i_001, i_010, i_011, i_100, i_101, i_110, i_111])] * w,
axis=0)
return value_interp
def interpn(self, pos, null_value=np.nan):
pos = np.array(pos).flatten()
# ndim x 2 edge array
edges_ind = np.array([bisect_interval(self.grids[_], pos[_]) for _ in range(self.ndim)])
edges = np.array([(self.grids[i][edges_ind[i]]) for i in range(self.ndim)])
if np.any(edges_ind[:, 0] < -1):
# out of bounds
return np.ones((self.values.shape[1],))*null_value
# make codes
codes = np.array([_ for _ in product((0, 1), repeat=self.ndim)])
# weight in each dimension
frac_dist = np.fliplr((edges - pos.reshape(-1, 1)) *
|
np.array([-1, 1])
|
numpy.array
|
#! /usr/bin/env python
# GPTune Copyright (c) 2019, The Regents of the University of California,
# through Lawrence Berkeley National Laboratory (subject to receipt of any
# required approvals from the U.S.Dept. of Energy) and the University of
# California, Berkeley. All rights reserved.
#
# If you have questions about your rights to use or distribute this software,
# please contact Berkeley Lab's Intellectual Property Office at <EMAIL>.
#
# NOTICE. This Software was developed under funding from the U.S. Department
# of Energy and the U.S. Government consequently retains certain rights.
# As such, the U.S. Government has been granted for itself and others acting
# on its behalf a paid-up, nonexclusive, irrevocable, worldwide license in
# the Software to reproduce, distribute copies to the public, prepare
# derivative works, and perform publicly and display publicly, and to permit
# other to do so.
#
################################################################################
from autotune.search import *
from autotune.space import *
from autotune.problem import *
# from gptune import GPTune
# from gptune import GPTune_MB
# from data import Data
# from data import Categoricalnorm
# from options import Options
# from computer import Computer
from gptune import * # import all
import sys
import os
import mpi4py
from mpi4py import MPI
import numpy as np
import matplotlib.pyplot as plt
import time
import argparse
from callopentuner import OpenTuner
from callhpbandster import HpBandSter, HpBandSter_bandit
import logging
import scipy
import openturns as ot
sys.path.insert(0, os.path.abspath(__file__ + "/../../../GPTune/"))
logging.getLogger('matplotlib.font_manager').disabled = True
# from GPTune import *
################################################################################
# Define Problem
# YL: for the spaces, the following datatypes are supported:
# Real(lower, upper, transform="normalize", name="yourname")
# Integer(lower, upper, transform="normalize", name="yourname")
# Categoricalnorm(categories, transform="onehot", name="yourname")
# Argmin{x} objectives(t,x), for x in [0., 1.]
def objectives(point):
"""
f(t,x) = exp(- (x + 1) ^ (t + 1) * cos(2 * pi * x)) * (sin( (t + 2) * (2 * pi * x) ) + sin( (t + 2)^(2) * (2 * pi * x) + sin ( (t + 2)^(3) * (2 * pi *x))))
"""
t = point['t']
x = point['x']
bmin = point['bmin']
bmax = point['bmax']
eta = point['eta']
if 'budget' in point:
bgt = point['budget']
else:
bgt = bmax
a = 2 * np.pi
b = a * t
c = a * x
d = np.exp(- (x + 1) ** (t + 1)) * np.cos(c)
e = np.sin((t + 2) * c) + np.sin((t + 2)**2 * c) + np.sin((t + 2)**3 * c)
f = d * e + 1
# print('test:',test)
"""
f(t,x) = x^2+t
"""
# t = point['t']
# x = point['x']
# f = 20*x**2+t
# time.sleep(1.0)
def perturb(bgt):
perturb_magnitude = 0.1
k1 = -perturb_magnitude/bmax
# return np.cos(c)*(-np.log10(bgt))*0.1
assert k1*bmax + perturb_magnitude == 0
return np.cos(c) * (k1*bgt + perturb_magnitude)
out = [f*(1+perturb(bgt))]
print(f"One demo run, x = {x:.4f}, t = {t:.4f}, budget = {bgt:.4f}, perturb = {perturb(bgt):.4f}, out = {out[0]:.4f}")
return out
""" Plot the objective function for t=1,2,3,4,5,6 """
def annot_min(x,y, ax=None):
xmin = x[np.argmin(y)]
ymin = y.min()
text= "x={:.3f}, y={:.3f}".format(xmin, ymin)
if not ax:
ax=plt.gca()
bbox_props = dict(boxstyle="square,pad=0.3", fc="w", ec="k", lw=0.72)
arrowprops=dict(arrowstyle="-",connectionstyle="angle,angleA=0,angleB=60")
kw = dict(xycoords='data',textcoords="offset points",arrowprops=arrowprops, bbox=bbox_props, ha="right", va="top")
ax.annotate(text, xy=(xmin, ymin), xytext=(210,5), **kw)
def main():
import matplotlib.pyplot as plt
args = parse_args()
ntask = args.ntask
nruns = args.nruns
npernode = args.npernode
TUNER_NAME = args.optimization
bmin = args.bmin
bmax = args.bmax
eta = args.eta
Nloop = args.Nloop
plot = args.plot
expid = args.expid
restart = args.restart
print(args)
(machine, processor, nodes, cores) = GetMachineConfiguration()
ot.RandomGenerator.SetSeed(args.seed)
print ("machine: " + machine + " processor: " + processor + " num_nodes: " + str(nodes) + " num_cores: " + str(cores))
os.environ['MACHINE_NAME'] = machine
os.environ['TUNER_NAME'] = TUNER_NAME
input_space = Space([Real(0., 10., transform="normalize", name="t")])
parameter_space = Space([Real(0., 1., transform="normalize", name="x")])
# input_space = Space([Real(0., 0.0001, "uniform", "normalize", name="t")])
# parameter_space = Space([Real(-1., 1., "uniform", "normalize", name="x")])
output_space = Space([Real(float('-Inf'), float('Inf'), name="y")])
constraints = {"cst1": "x >= 0. and x <= 1."}
constants={"nodes":nodes,"cores":cores,"npernode":npernode,"bmin":bmin,"bmax":bmax,"eta":eta}
# problem = TuningProblem(input_space, parameter_space,output_space, objectives, constraints, models) # with performance model
problem = TuningProblem(input_space, parameter_space,output_space, objectives, constraints, constants=constants) # no performance model
computer = Computer(nodes=nodes, cores=cores, hosts=None)
options = Options()
options['model_restarts'] = restart
options['distributed_memory_parallelism'] = False
options['shared_memory_parallelism'] = False
options['objective_evaluation_parallelism'] = False
options['objective_multisample_threads'] = 1
options['objective_multisample_processes'] = 1
options['objective_nprocmax'] = 1
options['model_processes'] = 1
# options['model_threads'] = 1
# options['model_restart_processes'] = 1
# options['search_multitask_processes'] = 1
# options['search_multitask_threads'] = 1
# options['search_threads'] = 16
# options['mpi_comm'] = None
#options['mpi_comm'] = mpi4py.MPI.COMM_WORLD
options['model_class'] = 'Model_LCM' #'Model_GPy_LCM'
options['verbose'] = False
# options['sample_algo'] = 'MCS'
# options['sample_class'] = 'SampleLHSMDU'
options['sample_class'] = 'SampleOpenTURNS'
options.validate(computer=computer)
options['budget_min'] = bmin
options['budget_max'] = bmax
options['budget_base'] = eta
smax = int(np.floor(np.log(options['budget_max']/options['budget_min'])/np.log(options['budget_base'])))
budgets = [options['budget_max'] /options['budget_base']**x for x in range(smax+1)]
NSs = [int((smax+1)/(s+1))*options['budget_base']**s for s in range(smax+1)]
NSs_all = NSs.copy()
budget_all = budgets.copy()
for s in range(smax+1):
for n in range(s):
NSs_all.append(int(NSs[s]/options['budget_base']**(n+1)))
budget_all.append(int(budgets[s]*options['budget_base']**(n+1)))
Ntotal = int(sum(NSs_all)*Nloop)
Btotal = int(Nloop*np.dot(np.array(NSs_all), np.array(budget_all))/options['budget_max']) # total number of evaluations at highest budget -- used for single-fidelity tuners
print("samples in one multi-armed bandit loop, NSs_all = ", NSs_all)
print("total number of samples: ", Ntotal)
print("total number of evaluations at highest budget: ", Btotal)
print(f"Sampler: {options['sample_class']}, {options['sample_algo']}")
print()
data = Data(problem)
# giventask = [[1.0], [5.0], [10.0]]
# giventask = [[1.0]]
# t_end = args.t_end
# giventask = [[i] for i in np.arange(1, ntask/2+1, 0.5).tolist()]
if ntask==1:
giventask = [[args.t]]
elif ntask==2:
giventask = [[3.], [3.1]]
elif ntask==3:
giventask = [[3.], [3.1], [3.2]]
elif ntask==10:
giventask = [[i] for i in np.arange(1, 1.5, 0.05).tolist()]
# giventask = [[1.0], [1.05], [1.1]]
NI=len(giventask)
assert NI == ntask # make sure number of tasks match
np.set_printoptions(suppress=False, precision=3)
if(TUNER_NAME=='GPTuneBand'):
NS = Nloop
data = Data(problem)
gt = GPTune_MB(problem, computer=computer, NS=Nloop, options=options)
(data, stats, data_hist)=gt.MB_LCM(NS = Nloop, Igiven = giventask)
print("Sampler class: ", options['sample_class'])
print("Model class: ", options['model_class'])
print("Tuner: ", TUNER_NAME)
print("stats: ", stats)
results_file = open(f"demo_ntask{args.ntask}_bandit{args.bmin}-{args.bmax}-{args.eta}_Nloop{args.Nloop}_expid{args.expid}.txt", "a")
results_file.write(f"Tuner: {TUNER_NAME}\n")
results_file.write(f"stats: {stats}\n")
""" Print all input and parameter samples """
for tid in range(NI):
print("tid: %d" % (tid))
print(f" t: {data.I[tid][0]:.2f}")
print(" Ps ", data.P[tid])
print(" Os ", data.O[tid].tolist())
nth = np.argmin(data.O[tid])
Popt = data.P[tid][nth]
# find which arm and which sample the optimal param is from
for arm in range(len(data_hist.P)):
try:
idx = (data_hist.P[arm]).index(Popt)
arm_opt = arm
except ValueError:
pass
print(' Popt ', Popt, 'Oopt ', min(data.O[tid])[0])
results_file.write(f"tid: {tid:d}\n")
results_file.write(f" t: {data.I[tid][0]:.2f}\n")
results_file.write(f" Os {data.O[tid].tolist()}\n")
if(TUNER_NAME=='GPTune'):
NS = Btotal
if args.nruns > 0:
NS = args.nruns
print("In GPTune, using the given number of nruns ", NS)
NS1 = max(NS//2, 1)
gt = GPTune(problem, computer=computer, data=data, options=options, driverabspath=os.path.abspath(__file__))
""" Building MLA with the given list of tasks """
(data, modeler, stats) = gt.MLA(NS=NS, NI=NI, Igiven=giventask, NS1=NS1)
print("model class: ", options['model_class'])
print("Model restart: ", restart)
print("Tuner: ", TUNER_NAME)
print("stats: ", stats)
results_file = open(f"demo_ntask{args.ntask}_bandit{args.bmin}-{args.bmax}-{args.eta}_Nloop{args.Nloop}_expid{args.expid}.txt", "a")
results_file.write(f"Tuner: {TUNER_NAME}\n")
results_file.write(f"stats: {stats}\n")
""" Print all input and parameter samples """
sum_Oopt = 0.
for tid in range(NI):
print("tid: %d" % (tid))
print(f" t: {data.I[tid][0]:.2f} ")
print(" Ps ", data.P[tid])
print(" Os ", data.O[tid])
print(' Popt ', data.P[tid][np.argmin(data.O[tid])], f'Oopt {min(data.O[tid])[0]:.3f}', 'nth ', np.argmin(data.O[tid]))
sum_Oopt += min(data.O[tid])[0]
results_file.write(f"tid: {tid:d}\n")
results_file.write(f" t: {data.I[tid][0]:.2f}\n")
results_file.write(f" Os {data.O[tid].tolist()}\n")
# print("sum of all optimal objectives", sum_Oopt)
if(TUNER_NAME=='opentuner'):
NS = Btotal
(data,stats) = OpenTuner(T=giventask, NS=NS, tp=problem, computer=computer, run_id="OpenTuner", niter=1, technique=None)
print("Tuner: ", TUNER_NAME)
print("stats: ", stats)
results_file = open(f"demo_ntask{args.ntask}_bandit{args.bmin}-{args.bmax}-{args.eta}_Nloop{args.Nloop}_expid{args.expid}.txt", "a")
results_file.write(f"Tuner: {TUNER_NAME}\n")
results_file.write(f"stats: {stats}\n")
""" Print all input and parameter samples """
for tid in range(NI):
print("tid: %d" % (tid))
print(f" t: {data.I[tid][0]:.2f} ")
print(" Ps ", data.P[tid][:NS])
print(" Os ", data.O[tid][:NS])
print(' Popt ', data.P[tid][np.argmin(data.O[tid][:NS])], 'Oopt ', min(data.O[tid][:NS])[0], 'nth ', np.argmin(data.O[tid][:NS]))
results_file.write(f"tid: {tid:d}\n")
results_file.write(f" t: {data.I[tid][0]:.2f}\n")
results_file.write(f" Os {data.O[tid].tolist()}\n")
# single fidelity version of hpbandster
if(TUNER_NAME=='TPE'):
NS = Btotal
(data,stats)=HpBandSter(T=giventask, NS=NS, tp=problem, computer=computer, options=options, run_id="HpBandSter", niter=1)
print("Tuner: ", TUNER_NAME)
print("stats: ", stats)
results_file = open(f"demo_ntask{args.ntask}_bandit{args.bmin}-{args.bmax}-{args.eta}_Nloop{args.Nloop}_expid{args.expid}.txt", "a")
results_file.write(f"Tuner: {TUNER_NAME}\n")
results_file.write(f"stats: {stats}\n")
""" Print all input and parameter samples """
for tid in range(NI):
print("tid: %d" % (tid))
print(f" t: {data.I[tid][0]:.2f} ")
print(" Ps ", data.P[tid])
print(" Os ", data.O[tid])
print(' Popt ', data.P[tid][np.argmin(data.O[tid])], 'Oopt ', min(data.O[tid])[0], 'nth ', np.argmin(data.O[tid]))
results_file.write(f"tid: {tid:d}\n")
results_file.write(f" t: {data.I[tid][0]:.2f}\n")
results_file.write(f" Os {data.O[tid].tolist()}\n")
# multi-fidelity version
if(TUNER_NAME=='hpbandster'):
NS = Ntotal
(data,stats)=HpBandSter_bandit(T=giventask, NS=NS, tp=problem, computer=computer, options=options, run_id="hpbandster_bandit", niter=1)
print("Tuner: ", TUNER_NAME)
print("stats: ", stats)
results_file = open(f"demo_ntask{args.ntask}_bandit{args.bmin}-{args.bmax}-{args.eta}_Nloop{args.Nloop}_expid{args.expid}.txt", "a")
results_file.write(f"Tuner: {TUNER_NAME}\n")
results_file.write(f"stats: {stats}\n")
""" Print all input and parameter samples """
for tid in range(NI):
print("tid: %d" % (tid))
print(f" t: {data.I[tid][0]:.2f} ")
print(" Ps ", data.P[tid])
print(" Os ", data.O[tid].tolist())
results_file.write(f"tid: {tid:d}\n")
results_file.write(f" t: {data.I[tid][0]:.2f}\n")
results_file.write(f" Os {data.O[tid].tolist()}\n")
# print(' Popt ', data.P[tid][np.argmin(data.O[tid])], 'Oopt ', min(data.O[tid])[0], 'nth ', np.argmin(data.O[tid]))
max_budget = 0.
Oopt = 99999
Popt = None
nth = None
for idx, (config, out) in enumerate(zip(data.P[tid], data.O[tid].tolist())):
for subout in out[0]:
budget_cur = subout[0]
if budget_cur > max_budget:
max_budget = budget_cur
Oopt = subout[1]
Popt = config
nth = idx
elif budget_cur == max_budget:
if subout[1] < Oopt:
Oopt = subout[1]
Popt = config
nth = idx
print(' Popt ', Popt, 'Oopt ', Oopt, 'nth ', nth)
if plot==1:
x = np.arange(0., 1., 0.0001)
ymean_set = [] # stores predicted function values
ytrue_set = []
for tid in range(len(data.I)):
p = data.I[tid]
t = p[0]
fig = plt.figure(figsize=[12.8, 9.6])
I_orig=p
kwargst = {input_space[k].name: I_orig[k] for k in range(len(input_space))}
y=np.zeros([len(x),1])
y_mean=np.zeros([len(x)])
y_std=np.zeros([len(x)])
for i in range(len(x)):
P_orig=[x[i]]
kwargs = {parameter_space[k].name: P_orig[k] for k in range(len(parameter_space))}
kwargs.update(kwargst)
y[i]=objectives(kwargs)
if(TUNER_NAME=='GPTune'):
(y_mean[i],var) = predict_aug(modeler, gt, kwargs,tid)
y_std[i]=np.sqrt(var)
# print(y_mean[i],y_std[i],y[i])
fontsize=40
plt.rcParams.update({'font.size': 40})
plt.plot(x, y, 'b',lw=2,label='true')
plt.plot(x, y_mean, 'k', lw=3, zorder=9, label='prediction')
plt.fill_between(x, y_mean - y_std, y_mean + y_std,alpha=0.2, color='k')
plt.ylim(0, 2)
# print(data.P[tid])
plt.scatter(data.P[tid], data.O[tid], c='r', s=50, zorder=10, edgecolors=(0, 0, 0),label='sample')
plt.xlabel('x',fontsize=fontsize+2)
plt.ylabel('y(t,x)',fontsize=fontsize+2)
plt.title('t=%f'%t,fontsize=fontsize+2)
print('t:',t,'x:',x[np.argmin(y)],'ymin:',y.min())
# legend = plt.legend(loc='upper center', shadow=True, fontsize='x-large')
legend = plt.legend(loc='upper right', shadow=False, fontsize=fontsize)
annot_min(x,y)
# plt.show()
plt.show(block=False)
plt.pause(0.5)
# input("Press [enter] to continue.")
# fig.savefig('obj_t_%f.eps'%t)
fig.savefig(f'obj_ntask{NI}_{expid}_tid_{tid}_t_{t:.1f}.pdf')
ymean_set.append(y_mean)
ytrue_set.append(y)
# show the distance among surrogate functions
R = np.zeros((NI, NI)) # Pearson sample correlation matrix of learned surrogates
R_true = np.zeros((NI, NI))# Pearson sample correlation of true functions
for i in range(NI):
for ip in range(i, NI):
ymean_i = ymean_set[i]
ymean_ip = ymean_set[ip]
ytrue_i = np.array((ytrue_set[i]).reshape((1, -1)))[0]
ytrue_ip = np.array((ytrue_set[ip]).reshape((1, -1)))[0]
# find the Pearson sample correlation coefficient
R[i, ip], _ = scipy.stats.pearsonr(ymean_i, ymean_ip)
R_true[i, ip], _ = scipy.stats.pearsonr(ytrue_i, ytrue_ip)
print("The correlation matrix among surrogate functions is: \n", R)
print("The correlation matrix among true functions is: \n", R_true)
new_Rtrue = R_true[
|
np.triu_indices(R_true.shape[0], 1)
|
numpy.triu_indices
|
import numpy
import pandas
import pytest
from scipy.sparse.csgraph import connected_components
from partitions import Graph, Partition
from partitions.tree import (
ReCom,
bipartition_tree,
contract_leaves_until_balanced_or_None,
map_with_boolean_array,
random_cut_edge,
random_spanning_tree,
recursive_partition,
)
class TestRandomSpanningTree:
def test_on_four_cycle(self, four_cycle):
tree = random_spanning_tree(four_cycle)
assert len(tree.nodes) == 4
assert len(tree.edges) == 3
def test_on_nonregular(self, nonregular):
tree = random_spanning_tree(nonregular)
assert len(tree.nodes) == 6
assert len(tree.edges) == 5
# This edge has to be in it, because 0 is a leaf
assert (0, 1) in tree.edges
assert (1, 0) in tree.edges
# One of these must be in it
assert (1, 3) in tree.edges or (3, 5) in tree.edges
# One of these must be in it
assert any(edge in tree.edges for edge in [(2, 4), (2, 5), (2, 1)])
for node in nonregular:
assert any(
(node, neighbor) in tree.edges
for neighbor in nonregular.neighbors[node]
)
class TestContractEdgesUntilBalanced:
def test_on_10x10(self, grid10x10):
graph = grid10x10
population = numpy.ones_like(graph.nodes)
bounds = (10, 90)
assignment = contract_leaves_until_balanced_or_None(graph, population, bounds)
assert len(assignment) == len(graph.nodes)
assert len(numpy.unique(assignment)) == 2
subgraph = graph.subgraph(graph.nodes[assignment])
assert connected_components(subgraph.neighbors.matrix, return_labels=False) == 1
def test_on_small(self):
graph = Graph.from_edges([(0, 1), (1, 2)])
population = numpy.ones_like(graph.nodes)
bounds = (0, 3)
assignment = contract_leaves_until_balanced_or_None(
graph, population, bounds, choice=lambda x: 1
)
assert assignment[0] == assignment[1] or assignment[1] == assignment[2]
assert len(numpy.unique(assignment)) == 2
def test_on_medium(self):
graph = Graph.from_edges(
[(0, 1), (1, 2), (2, 3), (3, 4), (0, 5), (5, 6), (5, 7), (5, 8)]
)
population =
|
numpy.ones_like(graph.nodes)
|
numpy.ones_like
|
import yaml
import argparse
import numpy as NP
from astropy.io import fits
from astropy.coordinates import Galactic, FK5, SkyCoord
from astropy import units
import progressbar as PGB
import healpy as HP
import geometry as GEOM
import primary_beams as PB
import ipdb as PDB
## Parse input arguments
parser = argparse.ArgumentParser(description='Program to estimate antenna power pattern analytically')
input_group = parser.add_argument_group('Input parameters', 'Input specifications')
input_group.add_argument('-i', '--infile', dest='infile', default='/home/t_nithyanandan/codes/mine/python/interferometry/main/pbparameters.yaml', type=file, required=False, help='File specifying input parameters')
args = vars(parser.parse_args())
with args['infile'] as parms_file:
parms = yaml.safe_load(parms_file)
rootdir = parms['directory']['rootdir']
pbdir = parms['directory']['pbdir']
outfile = parms['directory']['outfile']
project = parms['project']
telescope_id = parms['telescope']['id']
element_shape = parms['antenna']['shape']
element_size = parms['antenna']['size']
element_ocoords = parms['antenna']['ocoords']
element_orientation = parms['antenna']['orientation']
ground_plane = parms['antenna']['ground_plane']
phased_array = parms['antenna']['phased_array']
short_dipole_approx = parms['antenna']['short_dipole']
half_wave_dipole_approx = parms['antenna']['halfwave_dipole']
phased_elements_file = parms['phasedarray']['file']
delayerr = parms['phasedarray']['delayerr']
gainerr = parms['phasedarray']['gainerr']
nrand = parms['phasedarray']['nrand']
A_eff = parms['telescope']['A_eff']
latitude = parms['telescope']['latitude']
longitude = parms['telescope']['longitude']
beam_info = parms['beam']
beam_id = beam_info['identifier']
beam_pol = beam_info['pol']
freq = parms['obsparm']['freq']
freq_resolution = parms['obsparm']['freq_resolution']
nchan = parms['obsparm']['nchan']
nside = parms['obsparm']['nside']
scheme = parms['obsparm']['ordering']
pnt_alt = parms['pointing']['alt']
pnt_az = parms['pointing']['az']
pnt_ha = parms['pointing']['ha']
pnt_dec = parms['pointing']['dec']
frequency_chunk_size = parms['processing']['freq_chunk_size']
n_freq_chunks = parms['processing']['n_freq_chunks']
nproc = parms['pp']['nproc']
pp_method = parms['pp']['method']
pp_key = parms['pp']['key']
if longitude is None:
longitude = 0.0
if project not in ['project_MWA', 'project_global_EoR', 'project_HERA', 'project_drift_scan', 'project_beams', 'project_LSTbin']:
raise ValueError('Invalid project specified')
else:
project_dir = project + '/'
pbeamdir = rootdir + project_dir + pbdir + '/'
if telescope_id not in ['mwa', 'vla', 'gmrt', 'hera', 'mwa_dipole', 'custom', 'paper_dipole', 'mwa_tools']:
raise ValueError('Invalid telescope specified')
if element_shape is None:
element_shape = 'delta'
elif element_shape not in ['dish', 'delta', 'dipole']:
raise ValueError('Invalid antenna element shape specified')
if element_shape != 'delta':
if element_size is None:
raise ValueError('No antenna element size specified')
elif element_size <= 0.0:
raise ValueError('Antenna element size must be positive')
if not isinstance(phased_array, bool):
raise TypeError('phased_array specification must be boolean')
if delayerr is None:
delayerr_str = ''
delayerr = 0.0
elif delayerr < 0.0:
raise ValueError('delayerr must be non-negative.')
else:
delayerr_str = 'derr_{0:.3f}ns'.format(delayerr)
delayerr *= 1e-9
if gainerr is None:
gainerr_str = ''
gainerr = 0.0
elif gainerr < 0.0:
raise ValueError('gainerr must be non-negative.')
else:
gainerr_str = '_gerr_{0:.2f}dB'.format(gainerr)
if nrand is None:
nrandom_str = ''
nrand = 1
elif nrand < 1:
raise ValueError('nrandom must be positive')
else:
nrandom_str = '_nrand_{0:0d}_'.format(nrand)
if (delayerr_str == '') and (gainerr_str == ''):
nrand = 1
nrandom_str = ''
delaygain_err_str = delayerr_str + gainerr_str + nrandom_str
if (telescope_id == 'mwa') or (telescope_id == 'mwa_dipole'):
element_size = 0.74
element_shape = 'dipole'
if telescope_id == 'mwa': phased_array = True
elif telescope_id == 'vla':
element_size = 25.0
element_shape = 'dish'
elif telescope_id == 'gmrt':
element_size = 45.0
element_shape = 'dish'
elif telescope_id == 'hera':
element_size = 14.0
element_shape = 'dish'
elif telescope_id == 'custom':
if element_shape != 'delta':
if (element_shape is None) or (element_size is None):
raise ValueError('Both antenna element shape and size must be specified for the custom telescope type.')
elif element_size <= 0.0:
raise ValueError('Antenna element size must be positive.')
elif telescope_id == 'mwa_tools':
pass
else:
raise ValueError('telescope ID must be specified.')
if telescope_id == 'custom':
if element_shape == 'delta':
telescope_id = 'delta'
else:
telescope_id = '{0:.1f}m_{1:}'.format(element_size, element_shape)
if phased_array:
telescope_id = telescope_id + '_array'
telescope_str = telescope_id+'_'
if element_ocoords not in ['altaz', 'dircos']:
if element_ocoords is not None:
raise ValueError('Antenna element orientation must be "altaz" or "dircos"')
if element_orientation is None:
if element_ocoords is not None:
if element_ocoords == 'altaz':
if (telescope_id == 'mwa') or (telescope_id == 'mwa_dipole') or (element_shape == 'dipole'):
element_orientation = NP.asarray([0.0, 90.0]).reshape(1,-1)
else:
element_orientation = NP.asarray([90.0, 270.0]).reshape(1,-1)
elif element_ocoords == 'dircos':
if (telescope_id == 'mwa') or (telescope_id == 'mwa_dipole') or (element_shape == 'dipole'):
element_orientation = NP.asarray([1.0, 0.0, 0.0]).reshape(1,-1)
else:
element_orientation = NP.asarray([0.0, 0.0, 1.0]).reshape(1,-1)
else:
raise ValueError('Invalid value specified antenna element orientation coordinate system.')
else:
if (telescope_id == 'mwa') or (telescope_id == 'mwa_dipole') or (element_shape == 'dipole'):
element_orientation = NP.asarray([0.0, 90.0]).reshape(1,-1)
else:
element_orientation = NP.asarray([90.0, 270.0]).reshape(1,-1)
element_ocoords = 'altaz'
else:
if element_ocoords is None:
raise ValueError('Antenna element orientation coordinate system must be specified to describe the specified antenna orientation.')
element_orientation = NP.asarray(element_orientation).reshape(1,-1)
if (element_orientation.size < 2) or (element_orientation.size > 3):
raise ValueError('Antenna element orientation must be a two- or three-element vector.')
elif (element_ocoords == 'altaz') and (element_orientation.size != 2):
raise ValueError('Antenna element orientation must be a two-element vector if using Alt-Az coordinates.')
if ground_plane is None:
ground_plane_str = 'no_ground_'
else:
if ground_plane > 0.0:
ground_plane_str = '{0:.1f}m_ground_'.format(ground_plane)
else:
raise ValueError('Height of antenna element above ground plane must be positive.')
telescope = {}
if telescope_id in ['mwa', 'vla', 'gmrt', 'hera', 'mwa_dipole', 'mwa_tools']:
telescope['id'] = telescope_id
telescope['shape'] = element_shape
telescope['size'] = element_size
telescope['orientation'] = element_orientation
telescope['ocoords'] = element_ocoords
telescope['groundplane'] = ground_plane
telescope['latitude'] = latitude
telescope['longitude'] = longitude
if (pnt_alt is not None) and (pnt_az is not None):
pointing_altaz = NP.asarray([pnt_alt, pnt_az])
elif (pnt_ha is not None) and (pnt_dec is not None):
pointing_hadec = NP.asarray([pnt_ha, pnt_dec]).reshape(1,-1)
pointing_altaz = GEOM.hadec2altaz(pointing_hadec, latitude, units='degrees')
pointing_altaz = pointing_altaz.reshape(-1)
else:
raise ValueError('pointing direction not properly specified')
freq =
|
NP.float(freq)
|
numpy.float
|
import os
import glob
import shutil
import logging
import matplotlib.pyplot as plt
import numpy as np
from scipy import ndimage, misc, signal, spatial
from skimage.filters import gaussian
import cv2
import math
def mkdir(path):
if not os.path.exists(path):
os.makedirs(path)
def re_mkdir(path):
if os.path.exists(path):
shutil.rmtree(path)
os.makedirs(path)
def init_log(output_dir):
re_mkdir(output_dir)
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(message)s',
datefmt='%Y%m%d-%H:%M:%S',
filename=os.path.join(output_dir, 'log.log'),
filemode='w')
console = logging.StreamHandler()
console.setLevel(logging.INFO)
logging.getLogger('').addHandler(console)
return logging
def copy_file(path_s, path_t):
shutil.copy(path_s, path_t)
def get_files_in_folder(folder, file_ext=None):
files = glob.glob(os.path.join(folder, "*" + file_ext))
files_name = []
for i in files:
_, name = os.path.split(i)
name, ext = os.path.splitext(name)
files_name.append(name)
return
|
np.asarray(files)
|
numpy.asarray
|
import sys
# sys.path.append(".") # Adds the module to path
import unittest
from .. import augmentations, optics, scatterers
from ..features import Feature
import numpy as np
class TestAugmentations(unittest.TestCase):
class DummyFeature(Feature):
__distributed__ = False
def get(self, image, **kwargs):
output = np.array([[[1], [2]], [[0], [0]]])
return output
def test_FlipLR(self):
feature = self.DummyFeature()
augmented_feature = augmentations.FlipLR(feature)
augmented_feature.update()
output_1 = augmented_feature.resolve()
augmented_feature.update()
output_2 = augmented_feature.resolve()
self.assertTrue(np.all(output_1 == np.array([[[1], [2]], [[0], [0]]])))
self.assertTrue(np.all(output_2 == np.array([[[2], [1]], [[0], [0]]])))
def test_FlipUD(self):
feature = self.DummyFeature()
augmented_feature = augmentations.FlipUD(feature)
augmented_feature.update()
output_1 = augmented_feature.resolve()
augmented_feature.update()
output_2 = augmented_feature.resolve()
self.assertTrue(np.all(output_1 == np.array([[[1], [2]], [[0], [0]]])))
self.assertTrue(np.all(output_2 == np.array([[[0], [0]], [[1], [2]]])))
def test_FlipDiagonal(self):
feature = self.DummyFeature()
augmented_feature = augmentations.FlipDiagonal(feature)
augmented_feature.update()
output_1 = augmented_feature.resolve()
augmented_feature.update()
output_2 = augmented_feature.resolve()
self.assertTrue(np.all(output_1 ==
|
np.array([[[1], [2]], [[0], [0]]])
|
numpy.array
|
import numpy as np
import simpl
import simplpy
import simpl_helpers as sh
import simpl_test_dirs as sd
import orientationanalysispy as orient
def Test1():
'''
This will test manually creating an numpy array, wrapping the numpy array with
a simpl.DataArray, running a filter to adjust those values, and then showing
that the original numpy array has in fact been changed.
'''
print('===================== Test 1 =====================')
shape = [4, 3, 2]
cDims = [3]
# Create a numpy array of ones to hold our data
ashape = np.append([np.prod(shape)], cDims)
# Create a numpy array to hold our data
num_array = np.ndarray(ashape, dtype=np.float32, order='C')
# Get the numpy array as contiguous
z = np.asarray(num_array)
if not z.flags.contiguous:
z =
|
np.ascontiguousarray(z)
|
numpy.ascontiguousarray
|
import numpy as np
class Polygon:
"""docstring for Polygon"""
def __init__(self, v):
self.v = v
if self.v.shape[0] > 1 and len(self.v.shape) > 1:
if self.v.shape[0] > 2:
self.v = self.graham_scan(self.v)
self.e = [[self.v[i, :], self.v[i+1, :]] for i in range(self.v.shape[0]-1)] + [[self.v[-1, :], self.v[0, :]]]
def __str__(self):
return str(self.v) + '\n\n' + str(self.e)
def get_bounds(self):
x1, y1 = np.min(self.v, axis=0)
x2, y2 = np.max(self.v, axis=0)
return x1, y1, x2, y2
def point_intersection(self, point):
# http://geomalgorithms.com/a03-_inclusion.html
wn = 0
for edge in self.e:
if edge[0][1] <= point[1]:
if edge[1][1] > point[1]:
if self.is_left(edge[0], edge[1], point) > 0:
wn += 1
else:
if edge[1][1] <= point[1]:
if self.is_left(edge[0], edge[1], point) < 0:
wn -= 1
return wn != 0
def is_left(self, v_start, v_end, point):
# http://geomalgorithms.com/a01-_area.html
return (v_end[0] - v_start[0]) * (point[1] - v_start[1]) - (point[0] - v_start[0]) * (v_end[1] - v_start[1])
def less(self, p, p0):
pp = p-p0
return np.arctan2(pp[1], pp[0]), np.linalg.norm(pp)
def graham_scan(self, pts):
N, _ = pts.shape
if N < 3:
raise ValueError('At least 3 points are required to define a polygon')
# get p0
p0 = None
x_low = np.inf
y_low = np.inf
for p in pts:
if p[1] < y_low:
y_low = p[1]
x_low = p[0]
p0 = p
elif p[1] == y_low:
if p[0] < x_low:
x_low = p[0]
p0 = p
# sort
pts_sorted = sorted(pts, key=lambda p: self.less(p, p0))
if N == 3:
# return Polygon(pts_sorted)
return np.array(pts_sorted)
# scan
progress = np.zeros((N,))
p0 = N-1
p1 = 0
p2 = 1
p3 = 2
p4 = 3
convex_hull = [i for i in range(N)]
while not
|
np.all(progress)
|
numpy.all
|
import numpy as np
import matplotlib.pyplot as plt
from systemrl.environments.advgridworld import AdvGridworld
from systemrl.agents.sarsa import SARSA
from systemrl.agents.sarsa_lambda import SARSALambda
from systemrl.agents.q_learning import QLearning
from systemrl.agents.q_lambda import QLambda
from tqdm import tqdm
from collections import defaultdict
#decaying epsilon method for epsilon-greedy poilcy
def decaying_epsilon(current_episode, total_episodes):
power = 9*(current_episode/total_episodes)-5
epsilon = 1/(1+np.exp(power))
return epsilon
def human_policy(human_actions, action_index):
possible_actions = [action_index[i] for i in human_actions]
human = defaultdict(lambda: -1)
best_action = [2,1,1,2,3,3,3,\
2,4,4,2,7,7,7,\
2,4,4,2,7,7,7,\
2,1,1,2,3,3,2,\
5,5,2,2,5,5,2,\
5,5,5,2,5,5,2,\
1,1,1,1,1,1,1]
for idx, action in enumerate(best_action):
if action in possible_actions:
human[idx] = action
return human
def get_state(state):
return state[0]+state[1]*7
human_actions = ["right","left","upri","dori"]
agent_actions = ["up", "down","uple","dole"]
action_index = {"up":0,"right":1,"down":2,"left":3,"upri":4,"dori":5,"dole":6,"uple":7}
env = AdvGridworld()
human = human_policy(human_actions, action_index)
num_actions = len(agent_actions) + 1 # +1 for letting human take action
gamma = 0.9
learning_rate = 0.01
lmbda = 0.3
#agent: Note that lambda is not required for SARSA and QLearning
agent = QLearning(num_actions, gamma, learning_rate)
episode_returns = []
episode_lengths = []
#total number of episodes
num_episodes = 1000
#maximum number of steps per episode. if agent exceeds this..
#..we terminate and give a large negative reward
max_count = 200
for episodes in tqdm(range(num_episodes)):
env.reset()
state = env.state
_state = get_state(state)
is_end = False
returns = 0
count = 0
epsilon = decaying_epsilon(episodes, num_episodes)
#iterate until episode ends
while not is_end and count<max_count:
count += 1
#selection of agent action
if np.random.random() < epsilon:
agent_action = np.random.randint(num_actions)
else:
agent_action = agent.get_action(_state)
#human is taking action
if agent_action == num_actions-1:
action = human[_state]
#we do not have option for no movement here..
#..if the human has no good action here, she..
#will take any random action
if action == -1:
action = action_index[
|
np.random.choice(human_actions)
|
numpy.random.choice
|
import cv2
import numpy as np
import math
from datetime import datetime
from matplotlib import pyplot as plt
from collections import deque
from scipy.stats import mode
from scipy.optimize import curve_fit
from yolo_model import BoundBox
temp_dir = "images/detection/detect.jpg"
WHITE = (255, 255, 255)
YELLOW = (66, 244, 238)
GREEN = (80, 220, 60)
LIGHT_CYAN = (255, 255, 224)
DARK_BLUE = (139, 0, 0)
GRAY = (128, 128, 128)
BLUE = (255,0,0)
RED = (0,0,255)
ORANGE =(0,165,255)
BLACK =(0,0,0)
vehicles = [1,2,3,5,6,7,8]
animals =[15,16,17,18,19,21,22,23,]
humans =[0]
obstructions = humans + animals + vehicles
classes = [#
'Ped','bicycle','car','motorbike','aeroplane','bus',\
'train','truck','boat','traffic light','fire hydrant','stop sign',\
'parking meter','bench','bird','cat','dog','horse',\
'sheep','cow','elephant', 'bear','zebra','giraffe',\
'backpack','umbrella','handbag','tie','suitcase','frisbee',\
'skis','snowboard','sports ball','kite','baseball bat',\
'baseball glove','skateboard','surfboard','tennis racket','bottle','wine glass',\
'cup','fork','knife','spoon','bowl','banana',\
'apple','sandwich','orange','broccoli','carrot','hot dog',\
'pizza','donut','cake','chair','sofa','pottedplant',\
'bed','diningtable','toilet','tvmonitor','laptop','mouse',\
'remote','keyboard','cell phone','microwave','oven','toaster',\
'sink','refrigerator','book','clock','vase','scissors',\
'teddy bear','hair drier','toothbrush' ]
def create_queue(length = 10):
return deque(maxlen=length)
def polyfunc(x, a2, a1, a0):
return a2*x*x + a1*x + a0
class OBSTACLE(BoundBox):
xmax :int
xmin :int
ymin :int
ymax :int
xmid :int
ymid :int
lane : str
x : int
y : int
tracker = None
position : [int,int]
PERIOD = 4
__count = 0
def __init__(self,box: BoundBox, _id, v_updt =5) :
self.col_time:float =999.0
self._id = _id
self.position_hist = create_queue(v_updt)
# self.position_hist.append(dst)
self.update_coord(box)
self.update_score(box)
self.velocity = np.zeros((2))
self.score=box.score
self.label = box.label
def update_obstacle(self, dst, fps, n=5) :
self.position = dst
if self.lane == "my" :
self.col_time = min(int(dst[1]/(self.velocity[1]+0.001)*18/5),99)
else :
self.col_time = None
if (self.__count > self.position_hist.maxlen):
self.velocity = ((self.position-self.position_hist[0] ) * fps / self.position_hist.maxlen *5/18 ).astype(int)
self.__count += 1
self.position_hist.append(dst)
def update_coord(self,box):
self.xmax = box.xmax
self.xmin = box.xmin
self.ymin = box.ymin
self.ymax = box.ymax
self.xmid = int((box.xmax+box.xmin)/2)
self.ymid = int((box.ymax+box.ymin)/2)
self.position = np.mean(self.position_hist, axis = 0)
def update_score(self,box):
self.score=box.score
self.label = box.label
def update_box(self,box):
self.update_coord(box)
self.update_score(box)
class LANE_HISTORY:
def __init__(self,fps, queue_depth=12,poly_col=np.array([1,1,1]), test_points=[300, 500, 700], poly_max_deviation_distance=50, smoothing = 10, ploty =np.array([])):
self.fps =fps
self.test_points = np.asarray(test_points)
self.poly_max_deviation_distance = poly_max_deviation_distance
self.lost = False
self.max_lost_count = queue_depth
self.lost_count = self.max_lost_count + 10
self.smoothing = smoothing
self.ploty =ploty
self.leftFit = None # LEFT FIT POINTS
self.rightFit = None # RIGHT FIT POINTS
self.leftx = create_queue(self.fps//4)
self.rightx = create_queue(self.fps//4)
self.width = None
self.previous_centers = None
self.current_coef = None
self.smoothed_poly = poly_col
self.poly_history = create_queue(queue_depth)
self.y = None
self.x = None
self.appended = 0
self.breached = 0
self.reset = 0
self.curvature = 0
self.centerx = 0
self.lane_offset = 0
self.left_windows = []
self.right_windows=[]
def compute_lane_points(self) :
self.leftFit = self.previous_centers - self.width//2
self.rightFit = self.previous_centers + self.width//2
def compute_curvature(self, alpha, beta):
y_eval = -np.max(self.ploty)
lp = self.smoothed_poly
self.curvature = int(((beta**2 + (2 * lp[0] * y_eval * alpha**2 + \
lp[1]*alpha)**2)**1.5)/(np.absolute(2 * lp[0]*(alpha*beta)**2)))
return
def compute_offset(self):
y_eval = -np.max(self.ploty)
lp = self.smoothed_poly
self.lane_offset = lp[0] * y_eval**2 + lp[1] * y_eval + lp[2] - self.centerx
if abs(self.lane_offset) > self.width //2 :
if self.lane_offset < 0 :
print("\n\rLANE CHANGE TO RIGHT\033[F")
self.poly_history = create_queue(self.poly_history.maxlen)
self.leftx = self.rightx
self.rightx = create_queue(length = self.rightx.maxlen)
self.rightx.append(int(np.mean(self.leftx) + self.width ))
self.previous_centers = self.previous_centers+self.width
else :
print("\n\rLANE CHANGE TO LEFT\033[F")
self.poly_history = create_queue(self.poly_history.maxlen)
self.rightx = self.leftx
self.leftx = create_queue(length = self.leftx.maxlen)
self.leftx.append(int(np.mean(self.rightx) - self.width ))
self.previous_centers = self.previous_centers-self.width
else:
self.leftx.append(self.previous_centers[0] - self.width//2)
self.rightx.append(self.previous_centers[0] + self.width//2)
return
def addlane(self, y,x):
status = "APPENDED | "
self.y = y
self.x = x
self.current_coef,_ = curve_fit(polyfunc,self.y, self.x, p0=self.smoothed_poly)
if (self.lost_count > self.max_lost_count ) :
status ="RESET | "
self.get_smoothed_polynomial()
self.lost = False
self.lost_count = 0
self.reset +=1
return True, status
test_y_smooth = np.asarray(list(map(lambda x: np.polyval(self.smoothed_poly,x), -self.test_points)))
test_y_new = np.asarray(list(map(lambda x: np.polyval(self.current_coef,x), -self.test_points)))
dist = np.absolute(test_y_smooth - test_y_new)
max_dist = dist[np.argmax(dist)]
if max_dist > self.poly_max_deviation_distance:
status = "BREACHED | "
self.lost = True
self.lost_count += 1
self.breached +=1
return False , status
self.get_smoothed_polynomial()
self.lost = False
self.lost_count = 0
self.appended +=1
return True, status
def get_smoothed_polynomial(self):
self.poly_history.append(self.current_coef)
all_coeffs = np.asarray(list(self.poly_history))
self.smoothed_poly = np.mean(all_coeffs[-self.smoothing:,:], axis=0)
self.previous_centers = np.asarray([np.polyval(self.smoothed_poly,-x) for x in self.ploty], dtype=int)
self.compute_lane_points()
self.compute_offset()
return self.smoothed_poly
def calculate_position(self,x,y):
position = np.polyval(self.smoothed_poly ,y) - x
status = "right"
if position < - self.width//2 :
status = "left"
elif position < self.width//2:
status = "my"
return status
class LANE_DETECTION:
"""
The AdvancedLaneDetectorWithMemory is a class that can detect lines on the road
"""
UNWARPED_SIZE :(int,int)
WRAPPED_WIDTH : int
_pip_size=(int,int)
_pip__x_offset=20
_pip__y_offset=10
img_dimensions=(int,int)
temp_dir = "./images/detection/"
windows_per_line = 30
vanishing_point:(int,int)
real_world_lane_size_meters=(32, 3.7)
font = cv2.FONT_HERSHEY_SIMPLEX
bottom = 0
def __init__(self, img,fps,
yellow_lower = np.uint8([ 20, 50, 50]),
yellow_upper =
|
np.uint8([35, 255, 255])
|
numpy.uint8
|
# ***************************************************************
# Copyright (c) 2021 Jittor.
# All Rights Reserved.
# Maintainers:
# <NAME> <<EMAIL>>.
#
# Contributors:
# <NAME> <<EMAIL>>
#
# This file is subject to the terms and conditions defined in
# file 'LICENSE.txt', which is part of this source code package.
# ***************************************************************
import unittest
import random
from PIL import Image
import numpy as np
from numpy.testing import assert_array_almost_equal
import jittor as jt
import jittor.transform as transform
try:
from scipy import stats
except ImportError:
stats = None
class Tester(unittest.TestCase):
def test_crop(self):
height = random.randint(10, 32) * 2
width = random.randint(10, 32) * 2
oheight = random.randint(5, (height - 2) / 2) * 2
owidth = random.randint(5, (width - 2) / 2) * 2
img = np.ones([height, width, 3])
oh1 = (height - oheight) // 2
ow1 = (width - owidth) // 2
# imgnarrow = img[oh1:oh1 + oheight, ow1:ow1 + owidth, :]
# imgnarrow.fill(0)
img[oh1:oh1 + oheight, ow1:ow1 + owidth, :] = 0
# img = jt.array(img)
result = transform.Compose([
transform.ToPILImage(),
transform.CenterCrop((oheight, owidth)),
transform.ToTensor(),
])(img)
self.assertEqual(result.sum(), 0,
f"height: {height} width: {width} oheight: {oheight} owdith: {owidth}")
oheight += 1
owidth += 1
result = transform.Compose([
transform.ToPILImage(),
transform.CenterCrop((oheight, owidth)),
transform.ToTensor(),
])(img)
sum1 = result.sum()
# TODO: not pass
# self.assertGreater(sum1, 1,
# f"height: {height} width: {width} oheight: {oheight} owdith: {owidth}")
oheight += 1
owidth += 1
result = transform.Compose([
transform.ToPILImage(),
transform.CenterCrop((oheight, owidth)),
transform.ToTensor(),
])(img)
sum2 = result.sum()
self.assertGreater(sum2, 0,
f"height: {height} width: {width} oheight: {oheight} owdith: {owidth}")
self.assertGreaterEqual(sum2, sum1,
f"height: {height} width: {width} oheight: {oheight} owdith: {owidth}")
def test_resize(self):
height = random.randint(24, 32) * 2
width = random.randint(24, 32) * 2
osize = random.randint(5, 12) * 2
img = jt.ones([height, width, 3])
result = transform.Compose([
transform.ToPILImage(),
transform.Resize(osize),
transform.ToTensor(),
])(img)
self.assertIn(osize, result.shape)
if height < width:
self.assertLessEqual(result.shape[1], result.shape[2])
elif width < height:
self.assertGreaterEqual(result.shape[1], result.shape[2])
result = transform.Compose([
transform.ToPILImage(),
transform.Resize([osize, osize]),
transform.ToTensor(),
])(img)
self.assertIn(osize, result.shape)
self.assertEqual(result.shape[1], osize)
self.assertEqual(result.shape[2], osize)
oheight = random.randint(5, 12) * 2
owidth = random.randint(5, 12) * 2
result = transform.Compose([
transform.ToPILImage(),
transform.Resize((oheight, owidth)),
transform.ToTensor(),
])(img)
self.assertEqual(result.shape[1], oheight)
self.assertEqual(result.shape[2], owidth)
result = transform.Compose([
transform.ToPILImage(),
transform.Resize([oheight, owidth]),
transform.ToTensor(),
])(img)
self.assertEqual(result.shape[1], oheight)
self.assertEqual(result.shape[2], owidth)
def test_random_crop(self):
height = random.randint(10, 32) * 2
width = random.randint(10, 32) * 2
oheight = random.randint(5, (height - 2) / 2) * 2
owidth = random.randint(5, (width - 2) / 2) * 2
img = np.ones((height, width, 3))
result = transform.Compose([
transform.ToPILImage(),
transform.RandomCrop((oheight, owidth)),
transform.ToTensor(),
])(img)
self.assertEqual(result.shape[1], oheight)
self.assertEqual(result.shape[2], owidth)
result = transform.Compose([
transform.ToPILImage(),
transform.RandomCrop((oheight, owidth)),
transform.ToTensor(),
])(img)
self.assertEqual(result.shape[1], oheight)
self.assertEqual(result.shape[2], owidth)
result = transform.Compose([
transform.ToPILImage(),
transform.RandomCrop((height, width)),
transform.ToTensor()
])(img)
self.assertEqual(result.shape[1], height)
self.assertEqual(result.shape[2], width)
self.assertTrue(np.allclose(img, result.transpose(1,2,0)))
with self.assertRaises(AssertionError):
result = transform.Compose([
transform.ToPILImage(),
transform.RandomCrop((height + 1, width + 1)),
transform.ToTensor(),
])(img)
def test_lambda(self):
trans = transform.Lambda(lambda x: x.add(10))
x = jt.random([10])
y = trans(x)
self.assertTrue(np.allclose(y.data, jt.add(x, 10).data))
@unittest.skipIf(stats is None, 'scipy.stats not available')
def test_random_apply(self):
random_state = random.getstate()
random.seed(42)
random_apply_transform = transform.RandomApply(
[
transform.RandomHorizontalFlip(),
transform.RandomVerticalFlip(),
], p=0.4
)
img = transform.ToPILImage()(jt.random((3, 10, 10)))
num_samples = 250
num_applies = 0
for _ in range(num_samples):
out = random_apply_transform(img)
if out != img:
num_applies += 1
p_value = stats.binom_test(num_applies, num_samples, p=0.3)
random.setstate(random_state)
self.assertGreater(p_value, 0.0001)
@unittest.skipIf(stats is None, 'scipy.stats not available')
def test_random_choice(self):
random_state = random.getstate()
random.seed(42)
random_choice_transform = transform.RandomChoice(
[
transform.Resize(15),
transform.Resize(20),
transform.CenterCrop(10)
]
)
img = transform.ToPILImage()(jt.random((25, 25, 3)))
num_samples = 250
num_resize_15 = 0
num_resize_20 = 0
num_crop_10 = 0
for _ in range(num_samples):
out = random_choice_transform(img)
if out.size == (15, 15):
num_resize_15 += 1
elif out.size == (20, 20):
num_resize_20 += 1
elif out.size == (10, 10):
num_crop_10 += 1
p_value = stats.binom_test(num_resize_15, num_samples, p=0.33333)
self.assertGreater(p_value, 0.0001)
p_value = stats.binom_test(num_resize_20, num_samples, p=0.33333)
self.assertGreater(p_value, 0.0001)
p_value = stats.binom_test(num_crop_10, num_samples, p=0.33333)
self.assertGreater(p_value, 0.0001)
random.setstate(random_state)
@unittest.skipIf(stats is None, 'scipy.stats not available')
def test_random_order(self):
random_state = random.getstate()
random.seed(42)
random_order_transform = transform.RandomOrder(
[
transform.Resize(20),
transform.CenterCrop(10)
]
)
img = transform.ToPILImage()(jt.random((3, 25, 25)))
num_samples = 250
num_normal_order = 0
resize_crop_out = transform.CenterCrop(10)(transform.Resize(20)(img))
for _ in range(num_samples):
out = random_order_transform(img)
if out == resize_crop_out:
num_normal_order += 1
p_value = stats.binom_test(num_normal_order, num_samples, p=0.5)
random.setstate(random_state)
self.assertGreater(p_value, 0.0001)
def test_to_tensor(self):
test_channels = [1, 3, 4]
height, width = 4, 4
trans = transform.ToTensor()
with self.assertRaises(TypeError):
trans(np.random.rand(1, height, width).tolist())
with self.assertRaises(ValueError):
trans(np.random.rand(height))
trans(np.random.rand(1, 1, height, width))
for channels in test_channels:
input_data = np.random.randint(low=0, high=255, size=(height, width, channels)).astype(np.float32) / np.float32(255.0)
img = transform.ToPILImage()(input_data)
output = trans(img)
expect = input_data.transpose(2,0,1)
self.assertTrue(np.allclose(expect, output), f"{expect.shape}\n{output.shape}")
ndarray = np.random.randint(low=0, high=255, size=(channels, height, width)).astype(np.uint8)
output = trans(ndarray)
expected_output = ndarray / 255.0
np.testing.assert_allclose(output, expected_output)
ndarray = np.random.rand(channels, height, width).astype(np.float32)
output = trans(ndarray)
expected_output = ndarray
self.assertTrue(np.allclose(output, expected_output))
# separate test for mode '1' PIL images
input_data = np.random.binomial(1, 0.5, size=(height, width, 1)).astype(np.uint8)
img = transform.ToPILImage()(input_data * 255).convert('1')
output = trans(img)
self.assertTrue(np.allclose(input_data[:,:,0], output[0]), f"{input_data.shape}\n{output.shape}")
def test_1_channel_tensor_to_pil_image(self):
to_tensor = transform.ToTensor()
shape = (4, 4, 1)
img_data_float = jt.array(np.random.rand(*shape), dtype='float32')
img_data_byte = jt.array(np.random.randint(0, 255, shape), dtype='uint8')
img_data_short = jt.array(np.random.randint(0, 32767, shape), dtype='int16')
img_data_int = jt.array(np.random.randint(0, 2147483647, shape), dtype='int32')
inputs = [img_data_float, img_data_byte, img_data_short, img_data_int]
expected_outputs = [img_data_float.multiply(255).int().float().divide(255).numpy(),
img_data_byte.float().divide(255.0).numpy(),
img_data_short.numpy(),
img_data_int.numpy()]
expected_modes = ['F', 'L', 'I;16', 'I']
for img_data, expected_output, mode in zip(inputs, expected_outputs, expected_modes):
for t in [transform.ToPILImage(), transform.ToPILImage(mode=mode)]:
img = t(img_data)
self.assertEqual(img.mode, mode)
np.testing.assert_allclose(expected_output[:,:,0], to_tensor(img)[0], atol=0.01)
# 'F' mode for torch.FloatTensor
img_F_mode = transform.ToPILImage(mode='F')(img_data_float)
self.assertEqual(img_F_mode.mode, 'F')
def test_1_channel_ndarray_to_pil_image(self):
img_data_float = np.random.rand(4, 4, 1).astype(np.float32)
img_data_byte = np.random.randint(0, 255, (4, 4, 1)).astype(np.uint8)
img_data_short = np.random.randint(0, 32767, (4, 4, 1)).astype(np.int16)
img_data_int = np.random.randint(0, 2147483647, (4, 4, 1)).astype(np.int32)
inputs = [img_data_float, img_data_byte, img_data_short, img_data_int]
expected_modes = ['F', 'L', 'I;16', 'I']
for img_data, mode in zip(inputs, expected_modes):
for t in [transform.ToPILImage(), transform.ToPILImage(mode=mode)]:
img = t(img_data)
self.assertEqual(img.mode, mode)
self.assertTrue(np.allclose(img_data[:, :, 0], img))
def test_2_channel_ndarray_to_pil_image(self):
def verify_img_data(img_data, mode):
if mode is None:
img = transform.ToPILImage()(img_data)
self.assertEqual(img.mode, 'LA') # default should assume LA
else:
img = transform.ToPILImage(mode=mode)(img_data)
self.assertEqual(img.mode, mode)
split = img.split()
for i in range(2):
self.assertTrue(np.allclose(img_data[:, :, i], split[i]))
img_data = np.random.randint(0, 255, (4, 4, 2)).astype(np.uint8)
for mode in [None, 'LA']:
verify_img_data(img_data, mode)
with self.assertRaises(ValueError):
# should raise if we try a mode for 4 or 1 or 3 channel images
transform.ToPILImage(mode='RGBA')(img_data)
transform.ToPILImage(mode='P')(img_data)
transform.ToPILImage(mode='RGB')(img_data)
def test_2_channel_tensor_to_pil_image(self):
def verify_img_data(img_data, expected_output, mode):
if mode is None:
img = transform.ToPILImage()(img_data)
self.assertEqual(img.mode, 'LA') # default should assume LA
else:
img = transform.ToPILImage(mode=mode)(img_data)
self.assertEqual(img.mode, mode)
split = img.split()
for i in range(2):
self.assertTrue(np.allclose(expected_output[:,:,i], transform.to_tensor(split[i])))
img_data = jt.random((4, 4, 2))
expected_output = img_data.multiply(255).int().float().divide(255)
for mode in [None, 'LA']:
verify_img_data(img_data, expected_output, mode=mode)
with self.assertRaises(ValueError):
# should raise if we try a mode for 4 or 1 or 3 channel images
transform.ToPILImage(mode='RGBA')(img_data)
transform.ToPILImage(mode='P')(img_data)
transform.ToPILImage(mode='RGB')(img_data)
def test_3_channel_tensor_to_pil_image(self):
def verify_img_data(img_data, expected_output, mode):
if mode is None:
img = transform.ToPILImage()(img_data)
self.assertEqual(img.mode, 'RGB') # default should assume RGB
else:
img = transform.ToPILImage(mode=mode)(img_data)
self.assertEqual(img.mode, mode)
split = img.split()
for i in range(3):
self.assertTrue(np.allclose(expected_output[:,:,i], transform.to_tensor(split[i])))
img_data = jt.random((4, 4, 3))
expected_output = img_data.multiply(255).int().float().divide(255)
for mode in [None, 'RGB', 'HSV', 'YCbCr']:
verify_img_data(img_data, expected_output, mode=mode)
with self.assertRaises(ValueError):
# should raise if we try a mode for 4 or 1 or 2 channel images
transform.ToPILImage(mode='RGBA')(img_data)
transform.ToPILImage(mode='P')(img_data)
transform.ToPILImage(mode='LA')(img_data)
with self.assertRaises(ValueError):
transform.ToPILImage()(jt.random((1, 3, 4, 4)))
def test_3_channel_ndarray_to_pil_image(self):
def verify_img_data(img_data, mode):
if mode is None:
img = transform.ToPILImage()(img_data)
self.assertEqual(img.mode, 'RGB') # default should assume RGB
else:
img = transform.ToPILImage(mode=mode)(img_data)
self.assertEqual(img.mode, mode)
split = img.split()
for i in range(3):
self.assertTrue(np.allclose(img_data[:, :, i], split[i]))
img_data =
|
np.random.randint(0, 255, (4, 4, 3))
|
numpy.random.randint
|
"""
Pre-binning class.
"""
# <NAME> <<EMAIL>>
# Copyright (C) 2019
import numpy as np
from sklearn.preprocessing import KBinsDiscretizer
from sklearn.tree import _tree
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeRegressor
from .mdlp import MDLP
class PreBinning:
"""Prebinning algorithms.
Parameters
----------
problem_type:
The problem type depending on the target type.
method : str
Available methods are 'uniform', 'quantile' and 'cart'.
n_bins : int
The number of bins to produce.
min_bin_size : int, float
The minimum bin size.
**kwargs : keyword arguments
Keyword arguments for prebinning method. See notes.
Notes
-----
Keyword arguments are those available in the following classes:
* ``method="uniform"``: `sklearn.preprocessing.KBinsDiscretizer.
* ``method="quantile"``: `sklearn.preprocessing.KBinsDiscretizer.
* ``method="cart"``: sklearn.tree.DecistionTreeClassifier.
* ``method="mdlp"``: optbinning.binning.mdlp.MDLP.
"""
def __init__(self, problem_type, method, n_bins, min_bin_size,
class_weight=None, **kwargs):
self.problem_type = problem_type
self.method = method
self.n_bins = n_bins
self.min_bin_size = min_bin_size
self.class_weight = class_weight
self.kwargs = kwargs
self._splits = None
def fit(self, x, y, sample_weight=None):
"""Fit PreBinning algorithm.
Parameters
----------
x : array-like, shape = (n_samples)
Data samples, where n_samples is the number of samples.
y : array-like, shape = (n_samples)
Target vector relative to x.
sample_weight : array-like of shape (n_samples,) (default=None)
Array of weights that are assigned to individual samples.
Returns
-------
self : object
"""
if self.method not in ("uniform", "quantile", "cart", "mdlp"):
raise ValueError('Invalid value for prebinning method. Allowed '
'string values are "cart", "mdlp", "quantile" '
'and "uniform".')
if self.problem_type not in ("classification", "regression"):
raise ValueError('Invalid value for problem_type. Allowed '
'string values are "classification" and '
'"regression".')
if self.problem_type == "regression" and self.method == "mdlp":
raise ValueError("mdlp method can only handle binary "
"classification problems.")
if self.method in ("uniform", "quantile"):
unsup_kwargs = {"n_bins": self.n_bins, "strategy": self.method}
unsup_kwargs.update(**self.kwargs)
est = KBinsDiscretizer(**unsup_kwargs)
est.fit(x.reshape(-1, 1), y)
self._splits = est.bin_edges_[0][1:-1]
elif self.method == "cart":
cart_kwargs = {
"min_samples_leaf": self.min_bin_size,
"max_leaf_nodes": self.n_bins}
if self.problem_type == "classification":
cart_kwargs["class_weight"] = self.class_weight
cart_kwargs.update(**self.kwargs)
est = DecisionTreeClassifier(**cart_kwargs)
else:
cart_kwargs.update(**self.kwargs)
est = DecisionTreeRegressor(**cart_kwargs)
est.fit(x.reshape(-1, 1), y, sample_weight=sample_weight)
splits =
|
np.unique(est.tree_.threshold)
|
numpy.unique
|
from typing import Optional, List, Dict
import gym
import numpy as np
from flatland.core.env import Environment
from flatland.core.env_observation_builder import ObservationBuilder
from flatland.envs.agent_utils import RailAgentStatus
from flatland.envs.rail_env import RailEnv
from flatlander.envs.observations import Observation, register_obs
from flatlander.envs.observations.common.shortest_path_conflict_detector import ShortestPathConflictDetector
@register_obs("simple_meta")
class SimpleMetaObservation(Observation):
def __init__(self, config) -> None:
super().__init__(config)
self._builder = SimpleMetaObservationBuilder()
def builder(self) -> ObservationBuilder:
return self._builder
def observation_space(self) -> gym.Space:
return gym.spaces.Box(low=0, high=1, shape=(5,),
dtype=np.float32) # own distance to target & nr agents at start
class SimpleMetaObservationBuilder(ObservationBuilder):
def get_many(self, handles: Optional[List[int]] = None):
if self.env._elapsed_steps == 0:
self.conflict_detector = ShortestPathConflictDetector()
self.conflict_detector.set_env(self.env)
positions = {h: self.get_position(h) for h in handles}
directions = {h: self.env.agents[h].direction for h in handles}
agent_conflicts, _ = self.conflict_detector.detect_conflicts(handles=handles,
positions=positions,
directions=directions)
if handles is None:
handles = []
obs = {h: self.get(h) for h in handles}
max_start_and_dir = max(obs.values(), key=lambda v: v[2])[2]
max_start = max(obs.values(), key=lambda v: v[1])[1]
if len(agent_conflicts) < 1:
max_conflicts = 1e-7
else:
max_conflicts = len(max(agent_conflicts.values(), key=lambda v: len(v))) + 1e-7
for h, o in obs.items():
o[-4] = o[-4] / max_start
o[-3] = o[-3] / max_start_and_dir
o[-2] = len(set(agent_conflicts[h])) / max_conflicts
o[-1] = len(agent_conflicts[h]) / max_conflicts
return obs
else:
return {h: [] for h in handles}
def get(self, handle: int = 0):
"""
compute density map for agent: a value is asigned to every cell along the shortest path between
the agent and its target based on the distance to the agent, i.e. the number of time steps the
agent needs to reach the cell, encoding the time information.
"""
num_agents = self.env.get_num_agents()
distance_map = self.env.distance_map.get()
nan_inf_mask = ((distance_map != np.inf) * (np.abs(np.isnan(distance_map) - 1))).astype(np.bool)
max_distance = np.max(distance_map[nan_inf_mask])
agent = self.env.agents[handle]
init_pos = agent.initial_position
init_dir = agent.initial_direction
agents_same_start = [a for a in self.env.agents
if a.initial_position == init_pos]
nr_agents_same_start = len(agents_same_start)
nr_agents_same_start_and_dir = len([a.handle for a in agents_same_start
if a.initial_direction == init_dir])
distance = distance_map[handle][init_pos + (init_dir,)]
distance = max_distance if (
distance == np.inf or
|
np.isnan(distance)
|
numpy.isnan
|
import os
from classification import FastLogisticRegression
from chart import ClassificationChart as cChart
from utility import FeatureAugmentation as FA
import numpy as np
path = os.path.dirname(os.path.abspath(__file__))
def create_test(clf, degree, _min, step, _max):
# create test data
x1 = np.linspace(_min, step, _max)
x2 = np.linspace(_min, step, _max)
X = np.zeros((len(x1), 3))
X[:, 0] = x1
X[:, 1] = x2
len1 = len(x1)
z = np.zeros(shape=(len(x1), len(x2)))
for i in range(len(x1)):
for j in range(len(x2)):
x = np.array([[X[i, 0], X[j, 1]]])
t = FA.map(x, degree)
o = np.ones(t.shape[1]+1)
o[1:] = t.flatten()
z[i, j] = np.dot(o, clf.model)
return X, z
data =
|
np.loadtxt(path+'/data/data1.txt', delimiter=',')
|
numpy.loadtxt
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 20 15:59:25 2015
@author: jordan
"""
# Should I find a way to use Python "StatsModels" to do linear fitting with
# uncertainties in X and Y?
import os
import sys
import numpy as np
from astropy.wcs import WCS
from astropy.table import Table, Column, hstack, join
import astropy.units as u
from astropy.coordinates import SkyCoord
from astropy.stats import sigma_clipped_stats
from photutils import centroid_com, aperture_photometry, CircularAperture, CircularAnnulus
from scipy.odr import *
from matplotlib import pyplot as plt
import pdb
# Add the AstroImage class
from astroimage.astroimage import AstroImage
# This script will compute the photometry of polarization standard stars
# and output a file containing the polarization position angle
# additive correction and the polarization efficiency of the PRISM instrument.
#==============================================================================
# *********************** CUSTOM USER CODE ************************************
# this is where the user specifies where the raw data is stored
# and some of the subdirectory structure to find the actual .FITS images
#==============================================================================
# Define how the font will appear in the plots
font = {'family': 'sans-serif',
'color': 'black',
'weight': 'normal',
'size': 14
}
# This is the location where all pyPol data will be saved
pyPol_data = 'C:\\Users\\Jordan\\FITS_data\\PRISM_data\\pyPol_data\\201501'
# This is the name of the file in which the calibration constants will be stored
calDataFile = os.path.join(pyPol_data, 'calData.csv')
# The user needs to specify the "Target" values associated with
# calibration data in the fileIndex.
targets = ['Taurus_Cal', 'Orion_Cal']
# Define the saturation limit to use for whether or not to trust photometry
satLimit = 1.6e4
# Define some useful conversion factors
rad2deg = (180.0/np.pi)
deg2rad = (np.pi/180.0)
# This is the location of the previously generated masks (step 4)
maskDir = os.path.join(pyPol_data, 'Masks')
# Setup new directory for polarimetry data
polarimetryDir = os.path.join(pyPol_data, 'Polarimetry')
if (not os.path.isdir(polarimetryDir)):
os.mkdir(polarimetryDir, 0o755)
polAngDir = os.path.join(polarimetryDir, 'polAngImgs')
if (not os.path.isdir(polAngDir)):
os.mkdir(polAngDir, 0o755)
################################################################################
# STEP 1 -- MEASURE THE PHOTOMETRY OF THE CALIBRATION STARS
################################################################################
if (not os.path.isfile(RtableFile)) or (not os.path.isfile(VtableFile)):
# Read in the indexFile data and select the filenames
print('\nReading file index from disk')
indexFile = os.path.join(pyPol_data, 'reducedFileIndex.csv')
fileIndex = Table.read(indexFile, format='ascii.csv')
# Read in the polarization standards file
print('Reading polarization data from disk')
polStandardFile = os.path.join('polStandards.csv')
polStandards = Table.read(polStandardFile, format='ascii.csv')
# Construct SkyCoord object containing the coordinates of the standards
ra1 = polStandards['RA'].data
dec1 = polStandards['Dec'].data
polStanCoords = SkyCoord(ra = ra1, dec = dec1,
unit = (u.hour, u.deg), frame = 'fk5')
# Determine which parts of the fileIndex are usable
useFiles = fileIndex['Use'] == 1
# Further restrict the selection to only include the pre-selected targets
targetFiles = np.array([False]*len(fileIndex), dtype=bool)
for target in targets:
targetFiles = np.logical_or(targetFiles,
fileIndex['Target'] == target)
# Cull the fileIndex to ONLY include the specified calibration targets
calFiles = np.logical_and(useFiles, targetFiles)
if np.sum(calFiles) > 0:
fileInds = np.where(calFiles)
fileIndex = fileIndex[fileInds]
# Group the fileIndex by...
# 1. Target
# 2. Waveband
# 3. Dither (pattern)
fileIndexByWaveband = fileIndex.group_by(['Waveband'])
# Loop through each target
for iGroup, group in enumerate(fileIndexByWaveband.groups):
# Grab the current group information
thisWaveband = str(np.unique(group['Waveband'].data)[0])
# Update the user on processing status
print('\nProcessing targets for')
print('Waveband : {0}'.format(thisWaveband))
# Define the polarization standard files
thisFilename = 'polStandardTable_{0}.csv'.format(thiswaveBand)
outTableFile = os.path.join(pyPol_data, thisFilename)
# # Setup a table for storing the final polStandard information
# # Begin by copying the original polStandards table to get the proper formating
# polStandardTable = polStandards.copy()
# polStandardTable_V.remove_rows(slice(0, len(polStandardTable_V), 1))
# polStandardTable_R = polStandardTable_V.copy()
#
# # Remove the R-band columns from the V table and vice versa
# polStandardTable_V.remove_columns(['P_R', 'sP_R', 'PA_R', 'sPA_R'])
# polStandardTable_R.remove_columns(['P_V', 'sP_V', 'PA_V', 'sPA_V'])
# Loop through each subgroup and compute the photometry of the stars
# in those images.
indexBySubGroup = group.group_by(['Name'])
subGroupKeys = indexBySubGroup.groups.keys
for iSubGroup, subGroup in enumerate(indexBySubGroup.groups):
# Update the user on processing status
thisSubGroup = str(np.unique(subGroup['Name'].data)[0])
print('\tSubgroup : {0}'.format(thisSubGroup))
# Start by breaking the subGroup up into its constituent polAngs
indexByPolAng = subGroup.group_by(['Pol Ang'])
# Initalize a dictionary for storing the respective images
subGroupImgDict = {}
# We are assuming that each subgroup is uniqouely named. In this
# case, it represents an independent measure of the polarization
# of a standard star. Thus, let's loop through the expected polAng
# files, read them in, and store them in a dictionary for later use.
# Initalize a boolean list to track which standards appear within
# the images of this subgroup
polStandardBool = np.ones((len(polStandards),), dtype=bool)
# Loop through each polAng subset of the subGroup and read in images
polAngGroupKeys = indexByPolAng.groups.keys
for polAngGroup in indexByPolAng.groups:
# Generate the expected file name and attempt to read it in
thisPolAng = str(np.unique(polAngGroup['Pol Ang'])[0])
inputFile = '_'.join([thisTarget, thisSubGroup, thisPolAng]) + '.fits'
inputPath = os.path.join(polAngDir, inputFile)
if os.path.isfile(inputPath):
polAngImg = AstroImage(inputPath)
# Determine which standards appear in this image
polStandardBool = np.logical_and(polStandardBool,
polAngImg.in_image(polStanCoords, edge=50))
else:
print('Could not find expected polAng image')
pdb.set_trace()
# Store this image in the dictionary
subGroupImgDict[int(thisPolAng)] = polAngImg
# Now that all the polAng images are stored in a dictionary, let's
# double check that AT LEAST ONE of the standards appear in all four
# polAng images.
if np.sum(polStandardBool) < 1:
errStr = '''It would seem that none of the entries in the standard
catalog appear in these images. Either...
1) You need to add entries into your polarization standard catalog
OR
2) These images don't actually contain any polarization standard star
If it is option (2), then make sure to eliminate this target from the
"targets" variable near the top of this script.'''
print(errStr)
pdb.set_trace()
else:
# At least one polarization standard was found in all four
# images, so we can proceed to do polarimetry on that source.
# Start by grabbing the standard(s) which appear in these imgs
subGroupStandards = polStandards[np.where(polStandardBool)]
for standard in subGroupStandards:
# Grab the name of this standard
thisStandard = standard['Name']
print('\t\tStandard : {0}'.format(thisStandard))
# Loop through each polAng image, test for saturation,
# measure star width, and perform aperture photometry.
photDict = {}
for polAng, polAngImg in subGroupImgDict.items():
# Update the user on processing status
print('\t\t\tPolaroid Angle : {0}'.format(str(polAng)))
# Find the expectedstar coordinates in this image using
# the WCS in the header
thisWCS = WCS(polAngImg.header)
skyCoord1 = SkyCoord(ra=standard['RA'], dec=standard['Dec'],
unit=(u.hour, u.deg), frame='icrs')
x1, y1 = thisWCS.all_world2pix(skyCoord1.ra, skyCoord1.dec, 0)
# Cut out a small subarray around the predicted position
lf, rt = np.int(np.round(x1 - 20)), np.int(np.round(x1 + 20))
bt, tp = np.int(np.round(y1 - 20)), np.int(np.round(y1 + 20))
tmpArr = polAngImg.arr[bt:tp, lf:rt]
# Test if this star appears to be saturated
if tmpArr.max() > satLimit:
# If it is saturated, then make the "saturatedStar"
# variable "True" so that we will know NOT to use
# this standard star later and break out of the loop.
print('\t\t\tStar is saturated!')
saturatedStar = True
break
# Use a centroid function to get a more precise position
x1, y1 = (centroid_com(tmpArr) + np.array([lf, bt]))
from photutils import data_properties, properties_table
# Measure star width properties
columns = ['id', 'xcentroid', 'ycentroid', 'semimajor_axis_sigma', 'semiminor_axis_sigma', 'orientation']
props = data_properties(tmpArr)
tbl = properties_table(props, columns=columns)
# Compute the axis ratio and test if it's any good
semimajor = tbl['semimajor_axis_sigma'].data[0]
semiminor = tbl['semiminor_axis_sigma'].data[0]
axisRatio = semimajor/semiminor
if axisRatio > 1.3:
print('\t\t\tStar is too oblate!')
print('\t\t\ta/b = {0}'.format(axisRatio))
oblateStar = True
break
# If it is not too oblate, then compute an approximate
# width using a geometric mean
starWidth = np.sqrt(semimajor*semiminor)
# Build a circular aperture and a sky annulus to
# measure the star photometry
starAperture = CircularAperture((x1, y1), r=2*starWidth)
skyAperture = CircularAnnulus((x1, y1),
r_in=3.0*starWidth, r_out=3.0*starWidth + 4)
# Measure the photometry (flux not magnitudes) using the
# polAngImg.sigma array as the source of uncertainties
phot_table = aperture_photometry(polAngImg.arr,
[starAperture, skyAperture],
error=polAngImg.sigma)
# Compute a mean background count rate within annulus
skyArea = skyAperture.area()
bkg_mean = phot_table['aperture_sum_1'].data[0] / skyArea
sig_bkg_mean = phot_table['aperture_sum_err_1'].data[0] / skyArea
# Compute the background contribution to the stellar flux
starArea = starAperture.area()
bkg_sum = bkg_mean * starArea
sig_bkg_sum = sig_bkg_mean * starArea
# Compute a final stellar flux
final_flux = phot_table['aperture_sum_0'].data[0] - bkg_sum
sig_final_flux = np.sqrt(phot_table['aperture_sum_err_0'].data[0]**2 +
sig_bkg_sum)
# Store the star photometry (and uncertainty) in the
# photDict under its polAng
photDict[polAng] = {
'flux': final_flux,
's_flux': sig_final_flux}
else:
# If the whole loop executed without any problems, then
# it is safe to assume that photometry can be trusted.
# Indicate this with the "starSaturated" boolean flag.
saturatedStar = False
oblateStar = False
# Now that the photometry for this star has been
# successfully measured, let's double check that the star
# was not saturated or oblate.
if saturatedStar:
print('\t\tAt least one photometry measurement was saturated!')
print('\t\tDo not compute the observed polarization.')
continue
if oblateStar:
print('\t\tAt least one star was too oblate.')
print('\t\tDo not compute the observed polarization.')
continue
# If the all of the photometry measurements can be trusted,
# then continue to estimate the polarization of this source.
# ********** STOKES U **********
A = (photDict[0]['flux'] - photDict[400]['flux'])
B = (photDict[0]['flux'] + photDict[400]['flux'])
U = A/B
# Compute the uncertainty in that Stokes U quantity
s_AB = np.sqrt(photDict[0]['s_flux']**2 +
photDict[400]['s_flux']**2)
s_U = np.abs(s_AB/B)*np.sqrt(1.0 + U**2)
# ********** STOKES Q **********
A = (photDict[200]['flux'] - photDict[600]['flux'])
B = (photDict[200]['flux'] + photDict[600]['flux'])
Q = A/B
# Compute the uncertainty in that Stokes U quantity
s_AB = np.sqrt(photDict[200]['s_flux']**2 +
photDict[600]['s_flux']**2)
s_Q = np.abs(s_AB/B)*np.sqrt(1.0 + Q**2)
# ********** POLARIZATION PERCENTAGE **********
P = np.sqrt(U**2 + Q**2)
s_P = np.sqrt((U*s_U)**2 + (Q*s_Q)**2)/P
# ...and de-bias the polarization measurements
if P/s_P <= 1:
P = 0
else:
P = np.sqrt(P**2 - s_P**2)
# ********** POLARIZATION POSITION ANGLE **********
PA = np.rad2deg(0.5*np.arctan2(U, Q))
# lazy way (assumes sigQ ~= sigU)
# sigPA = 0.5*rad2deg*(sigP/P)
# Real way (uses actual sigQ and sigU)
s_PA = 0.5*rad2deg*np.sqrt((U*s_Q)**2 + (Q*s_U)**2)/P**2
# TODO Double check that this matches the formula in PEGS_pol
# I think that PEGS pol is actually MISSING a factor of P
# in the denominator.
# Scale up polarization values to percentages
P *= 100.0
s_P *= 100.0
# Check that the polarization is reasonable
# (e.g. R-band, 20150119, HD38563A is problematic)
if P > 10:
print('\tThe polarization of star {0} seems to high'.format(star))
print('\tskipping to next star')
continue
pdb.set_trace()
# Now add an entry to the temporary polarization table
# # Generate the poper keys for storing this data in table columns
# subGroupStr = str(iGroup+1) + str(iSubGroup+1)
# P_key = '_'.join(['P', thisWaveband, subGroupStr])
# sP_key = '_'.join(['sP', thisWaveband, subGroupStr])
# PA_key = '_'.join(['PA', thisWaveband, subGroupStr])
# sPA_key = '_'.join(['sPA', thisWaveband, subGroupStr])
#
# # Add the columns required for the polStandardTables
# if thisWaveband == 'V':
# polStandardTable_V.add_columns([
# Column(name = P_key, data = np.zeros(len(polStandardTable_V))),
# Column(name = sP_key, data = np.zeros(len(polStandardTable_V))),
# Column(name = PA_key, data = np.zeros(len(polStandardTable_V))),
# Column(name = sPA_key, data = np.zeros(len(polStandardTable_V)))
# ])
# elif thisWaveband == 'R':
# polStandardTable_R.add_columns([
# Column(name = P_key, data = np.zeros(len(polStandardTable_R))),
# Column(name = sP_key, data = np.zeros(len(polStandardTable_R))),
# Column(name = PA_key, data = np.zeros(len(polStandardTable_R))),
# Column(name = sPA_key, data = np.zeros(len(polStandardTable_R)))
# ])
#
#
# pdb.set_trace()
#
#
#
# Initalize a boolean list for testing which stars lie within the image
# footprint.
polStandardBool = [True]*len(polStandards)
pdb.set_trace()
# Initalize an empty dictionary for storing polAng AstroImage objects
polAngImgs = {}
indexTargetWavePolAng = indexTargetWave.group_by(['Pol Ang'])
for polAng in indexTargetWavePolAng.groups:
# Loop through each of the polAng images,
# and check which polarization standards are common to them all
thisPolAng = str(np.unique(polAng['Pol Ang'].data)[0])
# Read in the current polAng image
inFile = os.path.join(polAngDir,
'_'.join([thisTarget, thisWaveband, thisPolAng]) + '.fits')
# Read the file and store it in the dictionary
thisImg = AstroImage(inFile)
polAngImgs[int(thisPolAng)] = thisImg
# Determine which standards appear in this image
polStandardBool = np.logical_and(polStandardBool,
thisImg.in_image(polStanCoords, edge=50))
# A bit of cleanup to prevent confusion down the road
del thisPolAng
del thisImg
# Select the standards for this targetWave group, and grob their
# coordinates
thisStandard = polStandards[np.where(polStandardBool)]
thisCoords = polStanCoords[
|
np.where(polStandardBool)
|
numpy.where
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from multiprocessing import Process, Manager
import numpy as np
import unittest
import tempfile
import shutil
import logging
from hypothesis import given
import hypothesis.strategies as st
log = logging.getLogger("parallelize_bmuf_distributed_test")
log.setLevel(logging.INFO)
def bmuf_process(filestore_dir, process_id, shared_results,
cpu_device=False, nesterov=False):
# We need to import caffe2 in every process to initialize CUDA independently.
from caffe2.python import core, cnn, data_parallel_model, dyndep, workspace
from caffe2.proto import caffe2_pb2
dyndep.InitOpsLibrary("@/caffe2/caffe2/distributed:file_store_handler_ops")
if not cpu_device:
if not workspace.has_gpu_support and not workspace.has_hip_support:
log.info('No GPU support test is Ignored.')
return
if workspace.NumGpuDevices() < 4:
log.info('Not enough GPU support, test IGNORED')
return
model = cnn.CNNModelHelper(
order="NHWC",
name="test"
)
if not cpu_device:
device_type = workspace.GpuDeviceType
device_prefix = "gpu"
else:
device_type = caffe2_pb2.CPU
device_prefix = "cpu"
devices = [0, 1] if process_id == 0 else [2, 3]
def _model_build_fun(model, loss_scale):
fc = model.FC(
"data", "fc", 16, 1, ("ConstantFill", {}), ("ConstantFill", {})
)
fc_fl = model.FlattenToVec(fc, "fc_fl")
sigm = model.Sigmoid(fc_fl, "sigm")
sq = model.SquaredL2Distance([sigm, "label"], "sq")
loss = model.AveragedLoss(sq, "loss")
loss = model.Scale(loss, scale=loss_scale)
# For testing explicit sync
model.param_init_net.UniformFill([], ["sync_num"], shape=[1])
return [loss]
def _input_builder_fun(model):
return None
def _param_update_fun(model):
ITER = model.Iter("ITER")
LR = model.net.LearningRate(
[ITER],
"LR",
base_lr=(-0.1),
policy="fixed",
)
ONE = model.param_init_net.ConstantFill(
[], "ONE", shape=[1], value=1.0,
)
for param in model.GetParams():
grad = model.param_to_grad[param]
model.WeightedSum([param, ONE, grad, LR], param)
def _generate_data(devices, process_id, device_type, device_prefix):
np.random.seed(26 + process_id * 10)
# Each run has same input, independent of number of gpus
batch_size = 64
for _ in range(0, 10):
full_data =
|
np.random.rand(batch_size, 16)
|
numpy.random.rand
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Utility functions required for the download helpers.
:copyright:
<NAME> (<EMAIL>), 2014
:license:
GNU Lesser General Public License, Version 3
(http://www.gnu.org/copyleft/lesser.html)
"""
from urllib2 import HTTPError, URLError
import collections
import copy
import fnmatch
import itertools
import os
from lxml import etree
import numpy as np
from scipy.spatial import cKDTree
from socket import timeout as SocketTimeout
import time
import obspy
import warnings
from obspy.core.util.base import NamedTemporaryFile
from obspy.fdsn.client import FDSNException
from obspy.mseed.util import getRecordInformation
# Different types of errors that can happen when downloading data via the
# FDSN clients.
ERRORS = (FDSNException, HTTPError, URLError, SocketTimeout)
# Some application.wadls are wrong...
OVERWRITE_CAPABILITIES = {
"resif": None
}
# mean earth radius in meter as defined by the International Union of
# Geodesy and Geophysics.
EARTH_RADIUS = 6371009
ChannelAvailability = collections.namedtuple(
"ChannelAvailability",
["network", "station", "location", "channel", "starttime", "endtime",
"filename"])
class Station(object):
__slots__ = ["network", "station", "latitude", "longitude",
"elevation_in_m", "channels", "stationxml_filename"]
def __init__(self, network, station, latitude, longitude,
elevation_in_m, channels=None, stationxml_filename=None):
self.network = network
self.station = station
self.latitude = latitude
self.longitude = longitude
self.elevation_in_m = elevation_in_m
self.channels = channels if channels else []
self.stationxml_filename = stationxml_filename
def __repr__(self):
return "Station(%s, %s, %s, %s, %s, %s, %s)" % (
self.network.__repr__(),
self.station.__repr__(),
self.latitude.__repr__(),
self.longitude.__repr__(),
self.elevation_in_m.__repr__(),
self.channels.__repr__(),
self.stationxml_filename.__repr__())
def __eq__(self, other):
try:
for key in self.__slots__:
if getattr(self, key) != getattr(other, key):
return False
except AttributeError as e:
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(tuple(str(getattr(self, i)) for i in self.__slots__))
class Channel(object):
__slots__ = ["location", "channel", "mseed_filename"]
def __init__(self, location, channel, mseed_filename=None):
self.location = location
self.channel = channel
self.mseed_filename = mseed_filename
def __repr__(self):
return "Channel(%s, %s, %s)" % (
self.location.__repr__(),
self.channel.__repr__(),
self.mseed_filename.__repr__())
def __eq__(self, other):
try:
for key in self.__slots__:
if getattr(self, key) != getattr(other, key):
return False
except AttributeError:
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(tuple(getattr(self, i) for i in self.__slots__))
def format_report(report):
"""
Pretty print the report returned from the download() function of the
download helper.
"""
print("\nAttempted to acquire data from %i clients." % len(report))
for info in report:
stationxmls = []
mseeds = []
for station in info["data"]:
stationxmls.append(station.stationxml_filename)
mseeds.extend([i.mseed_filename for i in station.channels])
filesize = sum(os.path.getsize(i) for i in (mseeds + stationxmls))
filesize /= (1024.0 * 1024.0)
print("\tClient %10s - %4i StationXML files | %5i MiniSEED files "
"| Total Size: %.2f MB" %
('%s' % info["client"], len(stationxmls), len(mseeds),
filesize))
def is_in_list_of_channel_availability(network, station, location, channel,
starttime, endtime, availabilities):
"""
Helper function checking if a given channel is in a list of
ChannelAvailability tuples.
:param network: The network code.
:param station: The station code.
:param location: The location code.
:param channel: The channel code.
:param starttime: The starttime of the data.
:param endtime: The endtime of the data
:param availabilities: List of ChannelAvailability objects.
"""
for avail in availabilities:
if (avail.network == network) and \
(avail.station == station) and \
(avail.location == location) and \
(avail.channel == channel) and \
(avail.starttime <= starttime) and \
(avail.endtime >= endtime):
return True
return False
def attach_stationxml_filenames(stations, restrictions, stationxml_path,
logger):
# Attach filenames to the StationXML files and get a list of
# already existing StationXML files.
stations_to_download = []
existing_stationxml_channels = []
for stat in copy.deepcopy(stations):
filename = get_stationxml_filename(
stationxml_path, stat.network, stat.station, stat.channels)
if not filename:
continue
# If the StationXML file already exists, make sure it
# contains all the necessary information. Otherwise
# delete it and it will be downloaded again in the
# following.
if os.path.exists(filename):
contents = get_stationxml_contents(filename)
all_channels_good = True
for chan in stat.channels:
if is_in_list_of_channel_availability(
stat.network, stat.station,
chan.location, chan.channel,
restrictions.starttime,
restrictions.endtime, contents):
continue
all_channels_good = False
break
if all_channels_good is False:
logger.warning(
"StationXML file '%s' already exists but it "
"does not contain matching data for all "
"MiniSEED data available for this stations. "
"It will be deleted and redownloaded." %
filename)
safe_delete(filename)
else:
existing_stationxml_channels.extend(contents)
continue
dirname = os.path.dirname(filename)
if not os.path.exists(dirname):
os.makedirs(dirname)
stat.stationxml_filename = filename
stations_to_download.append(stat)
return {
"stations_to_download": stations_to_download,
"existing_stationxml_contents": existing_stationxml_channels
}
def attach_miniseed_filenames(stations, mseed_path):
"""
Attach filenames to the channels in the stations list splitting the
dataset into already existing channels and new channels.
"""
stations_to_download = []
existing_miniseed_filenames = []
stations = copy.deepcopy(stations)
for station in stations:
channels = []
for channel in station.channels:
filename = get_mseed_filename(
mseed_path, station.network, station.station,
channel.location, channel.channel)
if not filename:
continue
if os.path.exists(filename):
existing_miniseed_filenames.append(filename)
continue
dirname = os.path.dirname(filename)
if not os.path.exists(dirname):
os.makedirs(dirname)
channel.mseed_filename = filename
channels.append(channel)
if not channels:
continue
station.channels = channels
stations_to_download.append(station)
return {
"stations_to_download": stations_to_download,
"existing_miniseed_filenames": existing_miniseed_filenames
}
def filter_stations_based_on_duplicate_id(existing_stations,
discarded_station_ids, new_stations):
"""
:param existing_stations: A set of :class:`~.Station` object. detailing
already existing stations.
:param discarded_station_ids: A set of tuples denoting discarded
station ids, e.g. station ids that have been discarded due to some
reason.
:param new_stations: A set or list of new :class:`~.Station` objects
that will be filtered.
:return: A set of filtered :class:`~.Station` objects.
"""
existing_stations = [(_i.network, _i.station) for _i in existing_stations]
invalid_station_ids = set(existing_stations).union(discarded_station_ids)
return list(itertools.ifilterfalse(
lambda x: (x.network, x.station) in invalid_station_ids,
new_stations))
def filter_stations_with_channel_list(stations, channels):
station_channels = {}
get_station = lambda x: "%s.%s" % (x.network, x.station)
for s, c in itertools.groupby(sorted(channels, key=get_station),
get_station):
station_channels[s] = [(_i.location, _i.channel) for _i in c]
final_stations = []
for station in stations:
station_id = "%s.%s" % (station.network, station.station)
if station_id not in station_channels:
continue
station_chan = station_channels[station_id]
good_channels = []
for channel in station.channels:
if (channel.location, channel.channel) not in station_chan:
continue
good_channels.append(channel)
if good_channels:
station = copy.deepcopy(station)
station.channels = good_channels
final_stations.append(station)
return final_stations
def download_stationxml(client, client_name, starttime, endtime, station,
logger):
bulk = [(station.network, station.station, _i.location, _i.channel,
starttime, endtime) for _i in station.channels]
try:
client.get_stations(starttime = starttime, endtime=endtime,
network=station.network, station=station.station,
location=station.channels[0].location,
channel=station.channels[0].channel[:2] + "?",
level="response",
filename=station.stationxml_filename)
except Exception as e:
logger.info("Failed to downloaded StationXML from %s for station "
"%s.%s." %
(client_name, station.network, station.station))
return None
logger.info("Client '%s' - Successfully downloaded '%s'." %
(client_name, station.stationxml_filename))
return station.stationxml_filename
def download_and_split_mseed_bulk(client, client_name, starttime, endtime,
stations, logger):
"""
Downloads the channels of a list of stations in bulk, saves it in the
temp folder and splits it at the record level to obtain the final
miniseed files.
:param client:
:param client_name:
:param starttime:
:param endtime:
:param stations:
:param temp_folder:
:return:
"""
bulk = []
filenames = {}
for station in stations:
for channel in station.channels:
net, sta, loc, chan = station.network, station.station, \
channel.location, channel.channel
filenames["%s.%s.%s.%s" % (net, sta, loc, chan)] = \
channel.mseed_filename
bulk.append((net, sta, loc, chan, starttime, endtime))
temp_filename = NamedTemporaryFile().name
try:
client.get_waveforms_bulk(bulk, filename=temp_filename)
open_files = {}
# If that succeeds, split the old file into multiple new ones.
file_size = os.path.getsize(temp_filename)
with open(temp_filename, "rb") as fh:
try:
while True:
if fh.tell() >= (file_size - 256):
break
info = getRecordInformation(fh)
position = fh.tell()
fh.seek(position + 8, 0)
data = fh.read(12)
info["station"] = data[:5].strip().decode()
info["location"] = data[5:7].strip().decode()
info["channel"] = data[7:10].strip().decode()
info["network"] = data[10:12].strip().decode()
fh.seek(position, 0)
channel_id = "%s.%s.%s.%s" % (
info["network"], info["station"], info["location"],
info["channel"])
# Sometimes the services return something noone wants.
if channel_id not in filenames:
fh.read(info["record_length"])
continue
filename = filenames[channel_id]
if filename not in open_files:
open_files[filename] = open(filename, "wb")
open_files[filename].write(fh.read(info["record_length"]))
finally:
for f in open_files:
try:
f.close()
except:
pass
finally:
try:
os.remove(temp_filename)
except:
pass
logger.info("Client '%s' - Successfully downloaded %i channels (of %i)" % (
client_name, len(open_files), len(bulk)))
return open_files.keys()
def get_availability_from_client(client, client_name, restrictions, domain,
logger):
"""
Returns availability information from an initialized FDSN client.
:type client: :class:`obspy.fdsn.client.Client`
:param client: An initialized FDSN client.
:type client_name: str
:param client_name: The name of the client. Only used for logging.
:type restrictions: :class:`obspy.fdsn.download_helpers.Restrictions`
:param restrictions: The non-domain related restrictions for the query.
:type domain: :class:`obspy.fdsn.download_helpers.Domain` subclass
:param domain: The domain definition.
:rtype: dict
Return a dictionary akin to the following containing information about
all available channels according to the webservice.
.. code-block:: python
{("NET", "STA1"): Station(network="NET", station="STA1",
latitude=1.0, longitude=2.0, elevation_in_m=3.0,
channels=(Channel(location="", channel="EHE"),
Channel(...), ...),
client="IRIS"),
("NET", "STA2"): Station(network="NET", station="STA2",
latitude=1.0, longitude=2.0, elevation_in_m=3.0,
channels=(Channel(location="", channel="EHE"),
Channel(...), ...),
client="IRIS"),
...
}
"""
# Check if stations needs to be filtered after downloading or if the
# restrictions one can impose with the FDSN webservices queries are enough.
# This depends on the domain definition.
try:
domain.is_in_domain(0, 0)
needs_filtering = True
except NotImplementedError:
needs_filtering = False
arguments = {
"network": restrictions.network,
"station": restrictions.station,
"location": restrictions.location,
"channel": restrictions.channel,
"starttime": restrictions.starttime,
"endtime": restrictions.endtime,
# Request at the channel level.
"level": "channel"
}
# Add the domain specific query parameters.
arguments.update(domain.get_query_parameters())
# Check the capabilities of the service and see what is the most
# appropriate way of acquiring availability information.
if client_name.lower() in OVERWRITE_CAPABILITIES:
cap = OVERWRITE_CAPABILITIES[client_name.lower()]
if cap is None:
reliable = False
elif cap == "matchtimeseries":
reliable = True
arguments["matchtimeseries"] = True
elif cap == "includeavailability":
reliable = True
arguments["includeavailability"] = True
else:
raise NotImplementedError
elif "matchtimeseries" in client.services["station"]:
arguments["matchtimeseries"] = True
reliable = True
elif "includeavailability" in client.services["station"]:
reliable = True
arguments["includeavailability"] = True
else:
reliable = False
if reliable:
logger.info("Client '%s' - Requesting reliable availability." %
client_name)
else:
logger.info("Client '%s' - Requesting unreliable availability." %
client_name)
try:
start = time.time()
inv = client.get_stations(**arguments)
end = time.time()
except ERRORS as e:
if "no data available" in str(e).lower():
logger.info("Client '%s' - No data available for request." %
client_name)
return {"reliable": reliable, "availability": None}
logger.error(
"Client '{0}' - Failed getting availability: %s".format(
client_name), str(e))
return {"reliable": reliable, "availability": None}
logger.info("Client '%s' - Successfully requested availability "
"(%.2f seconds)" % (client_name, end - start))
availability = {}
for network in inv:
for station in network:
# Skip the station if it is not in the desired domain.
if needs_filtering is True and \
not domain.is_in_domain(station.latitude,
station.longitude):
continue
channels = []
for channel in station.channels:
# Remove channels that somehow slipped past the temporal
# constraints due to weird behaviour from the data center.
if (channel.start_date > restrictions.starttime) or \
(channel.end_date < restrictions.endtime):
continue
# Use availability information if possible. In the other
# cases it should already work.
if "includeavailability" in arguments and \
arguments["includeavailability"]:
da = channel.data_availability
if da is None:
logger.warning(
"Client '%s' supports the 'includeavailability'"
"parameter but returns channels without "
"availability information. The final "
"availability might not be complete" % client_name)
continue
if (da.start > restrictions.starttime) or \
(da.end < restrictions.endtime):
continue
channels.append(Channel(location=channel.location_code,
channel=channel.code))
# Group by locations and apply the channel priority filter to
# each.
filtered_channels = []
get_loc = lambda x: x.location
for location, _channels in itertools.groupby(
sorted(channels, key=get_loc), get_loc):
filtered_channels.extend(filter_channel_priority(
list(_channels), key="channel",
priorities=restrictions.channel_priorities))
channels = filtered_channels
# Filter to remove unwanted locations according to the priority
# list.
channels = filter_channel_priority(
channels, key="location",
priorities=restrictions.location_priorities)
if not channels:
continue
availability[(network.code, station.code)] = Station(
network=network.code,
station=station.code,
latitude=station.latitude,
longitude=station.longitude,
elevation_in_m=station.elevation,
channels=channels)
logger.info("Client '%s' - Found %i station (%i channels)." % (
client_name, len(availability),
sum([len(_i.channels) for _i in availability.values()])))
return {"reliable": reliable, "availability": availability}
class SphericalNearestNeighbour(object):
"""
Spherical nearest neighbour queries using scipy's fast kd-tree
implementation.
"""
def __init__(self, data):
cart_data = self.spherical2cartesian(data)
self.data = data
self.kd_tree = cKDTree(data=cart_data, leafsize=10)
def query(self, points, k=10):
points = self.spherical2cartesian(points)
d, i = self.kd_tree.query(points, k=k)
return d, i
def query_pairs(self, maximum_distance):
return self.kd_tree.query_pairs(maximum_distance)
@staticmethod
def spherical2cartesian(data):
"""
Converts a list of :class:`~obspy.fdsn.download_helpers.Station`
objects to an array of shape(len(list), 3) containing x/y/z in meters.
"""
# Create three arrays containing lat/lng/elevation.
shape = len(data)
lat = np.array([_i.latitude for _i in data], dtype=np.float64)
lon = np.array([_i.longitude for _i in data], dtype=np.float64)
r = np.array([EARTH_RADIUS + _i.elevation_in_m for _i in data],
dtype=np.float64)
# Convert data from lat/lng to x/y/z.
colat = 90.0 - lat
cart_data = np.empty((shape, 3), dtype=np.float64)
cart_data[:, 0] = r * np.sin(np.deg2rad(colat)) * \
np.cos(np.deg2rad(lon))
cart_data[:, 1] = r * np.sin(np.deg2rad(colat)) * \
np.sin(
|
np.deg2rad(lon)
|
numpy.deg2rad
|
"""Description of Crystal class."""
__author__ = 'ikibalin'
__version__ = "2020_08_19"
import math
from matplotlib.pyplot import flag
import numpy
from typing import NoReturn
from cryspy.A_functions_base.charge_form_factor import calc_jl_for_ion, calc_scattering_amplitude_tabulated, get_atom_name_ion_charge_shell, D_DISPERSION
from cryspy.A_functions_base.debye_waller_factor import calc_param_iso_aniso_by_b_iso_beta, calc_u_ij_by_beta
from cryspy.A_functions_base.matrix_operations import calc_m1_m2_inv_m1, calc_m_v
from cryspy.A_functions_base.magnetic_form_factor import get_j0_j2_parameters
from cryspy.A_functions_base.unit_cell import calc_eq_ccs_by_unit_cell_parameters, calc_m_m_by_unit_cell_parameters, calc_reciprocal_by_unit_cell_parameters
from cryspy.A_functions_base.structure_factor import calc_f_nucl_by_dictionary, calc_sft_ccs_by_dictionary, calc_f_m_perp_by_sft
from cryspy.A_functions_base.symmetry_elements import calc_full_mag_elems, calc_symm_flags, define_bravais_type_by_symm_elems
from cryspy.A_functions_base.symmetry_constraints import calc_sc_beta, calc_sc_fract_sc_b, calc_sc_chi
from cryspy.B_parent_classes.cl_3_data import DataN
from cryspy.B_parent_classes.preocedures import take_items_by_class
from cryspy.C_item_loop_classes.cl_1_atom_site import AtomSite, AtomSiteL
from cryspy.C_item_loop_classes.cl_1_atom_type import AtomTypeL
from cryspy.C_item_loop_classes.cl_1_atom_site_aniso import \
AtomSiteAnisoL
from cryspy.C_item_loop_classes.cl_1_atom_site_susceptibility import \
AtomSiteSusceptibilityL
from cryspy.C_item_loop_classes.cl_1_atom_type_scat import \
AtomTypeScatL
from cryspy.C_item_loop_classes.cl_2_atom_site_scat import \
AtomSiteScatL
from cryspy.C_item_loop_classes.cl_1_atom_site_moment import AtomSiteMoment, AtomSiteMomentL
from cryspy.C_item_loop_classes.cl_1_refln_susceptibility import \
ReflnSusceptibilityL
from cryspy.C_item_loop_classes.cl_1_atom_local_axes import \
AtomLocalAxesL
from cryspy.C_item_loop_classes.cl_1_atom_electron_configuration \
import AtomElectronConfigurationL
from cryspy.C_item_loop_classes.cl_1_cell import Cell
from cryspy.C_item_loop_classes.cl_1_refln import ReflnL
from cryspy.C_item_loop_classes.cl_1_space_group_symop_magn_centering import \
SpaceGroupSymopMagnCenteringL
from cryspy.C_item_loop_classes.cl_2_atom_rho_orbital_radial_slater \
import AtomRhoOrbitalRadialSlaterL
from cryspy.C_item_loop_classes.cl_2_space_group_symop_magn_operation import \
SpaceGroupSymopMagnOperationL
from cryspy.C_item_loop_classes.cl_2_space_group import SpaceGroup
from cryspy.D_functions_item_loop.function_1_report_magnetization_ellipsoid \
import magnetization_ellipsoid_by_u_ij, report_main_axes_of_magnetization_ellipsoids
na = numpy.newaxis
class Crystal(DataN):
"""
Crystal structure description.
Data items in the CRYSTAL category record details about
crystal structure.
Methods
-------
- calc_b_iso_beta
- calc_f_nucl
- calc_susceptibility_moment_tensor
- calc_magnetic_moments_with_field_loc
- report_main_axes_of_magnetization_ellipsoids
- calc_main_axes_of_magnetization_ellipsoids
- calc_magnetization_ellipsoid
- calc_hkl_in_range
- calc_hkl
- calc_refln_susceptibility
- calc_refln
Attributes
----------
- space_group, cell, atom_site (mandatory)
- atom_type, atom_site_aniso, atom_site_susceptibility,
atom_site_scat, atom_type_scat, atom_local_axes,
atom_electron_confiduration (optional)
"""
CLASSES_MANDATORY = (Cell, AtomSiteL,)
CLASSES_OPTIONAL = (SpaceGroup,
AtomTypeL, AtomSiteAnisoL, AtomSiteSusceptibilityL, AtomSiteScatL,
AtomTypeScatL, AtomLocalAxesL, AtomElectronConfigurationL,
AtomSiteMomentL, SpaceGroupSymopMagnOperationL, SpaceGroupSymopMagnCenteringL)
# CLASSES_INTERNAL = ()
CLASSES = CLASSES_MANDATORY + CLASSES_OPTIONAL
PREFIX = "crystal"
# default values for the parameters
D_DEFAULT = {}
def __init__(self, data_name=None, **kwargs) -> NoReturn:
super(Crystal, self).__init__()
self.__dict__["items"] = []
self.__dict__["data_name"] = data_name
for key, attr in self.D_DEFAULT.items():
setattr(self, key, attr)
for key, attr in kwargs.items():
setattr(self, key, attr)
def form_object(self) -> NoReturn:
"""Redefined method of DataN."""
self.apply_constraints()
def apply_constraints(self) -> NoReturn:
"""
Symmetry constraints on parameters.
Returns
-------
NoReturn
DESCRIPTION.
"""
if self.is_attribute("space_group"):
space_group = self.space_group
space_group_wyckoff = space_group.space_group_wyckoff
cell = self.cell
cell.type_cell = space_group.bravais_type
cell.it_coordinate_system_code = space_group.it_coordinate_system_code
cell.apply_constraints()
atom_site = self.atom_site
atom_site.apply_constraints(space_group_wyckoff)
if self.is_attribute("atom_site_aniso"):
atom_site_aniso = self.atom_site_aniso
atom_site_aniso.apply_space_group_constraint(
atom_site, space_group)
if self.is_attribute("atom_site_susceptibility"):
atom_site_susceptibility = self.atom_site_susceptibility
atom_site_susceptibility.apply_chi_iso_constraint(cell)
atom_site_susceptibility.apply_space_group_constraint(
atom_site, space_group, cell)
def calc_b_iso_beta(self):
"""
Calculate b_iso and beta_ij based on atom_site and atom_sites.
For each atom defined in atom_site.
"""
a_s = self.atom_site
try:
a_s_a = self.atom_site_aniso
except AttributeError:
a_s_a = None
l_b_iso, l_beta = [], []
coeff = float(8.*numpy.pi**2)
cell = self.cell
for item_a_s in a_s.items:
label_atom = item_a_s.label
try:
adp_type = item_a_s.adp_type
except AttributeError:
adp_type = None
b_iso = 0.
beta = (0., 0., 0., 0., 0., 0.)
if adp_type == "Uiso":
u_iso = float(item_a_s.u_iso_or_equiv)
b_iso = float(8.*numpy.pi**2*u_iso)
elif adp_type == "Biso":
b_iso = float(item_a_s.b_iso_or_equiv)
elif adp_type == "Uovl":
# FIXME: correct it
u_iso = float(item_a_s.u_iso_or_equiv)
b_iso = coeff*u_iso
elif adp_type == "Umpe":
# FIXME: correct it
u_iso = float(item_a_s.u_iso_or_equiv)
b_iso = float(8.*numpy.pi**2*u_iso)
elif adp_type == "Uani":
item_a_s_a = a_s_a[label_atom]
beta = item_a_s_a.calc_beta(cell)
elif adp_type == "Bovl":
# FIXME: correct it
b_iso = float(item_a_s.b_iso_or_equiv)
elif adp_type == "Bani":
item_a_s_a = a_s_a[label_atom]
beta = (float(item_a_s_a.b_11), float(item_a_s_a.b_22),
float(item_a_s_a.b_33), float(item_a_s_a.b_12),
float(item_a_s_a.b_13), float(item_a_s_a.b_23))
l_b_iso.append(b_iso)
l_beta.append(beta)
np_b_iso = numpy.array(l_b_iso, dtype=float)
np_beta = numpy.array(l_beta, dtype=float)
return np_b_iso, np_beta
def calc_f_nucl(self, index_hkl):
dict_crystal = self.get_dictionary()
dict_in_out = {"index_hkl": index_hkl}
f_nucl, dder = calc_f_nucl_by_dictionary(dict_crystal, dict_in_out, flag_use_precalculated_data=False)
return f_nucl
def calc_refln(self, index_hkl,
flag_internal: bool = True):
"""
Calculate Refln cryspy object where nuclear structure factor is stored.
Keyword Arguments:
-----------------
h, k, l: 1D numpy array of Miller indexes
flag_internal: a flag to calculate or to use internal objects.
It should be True if user call the function.
It's True by default.
Output:
-------
refln: object cryspy.Refln
Example:
-------
>>> import numpy as np
>>> h, k, l = np.array([1,2],dtype=int), np.array([1,0],dtype=int),
np.array([1,0],dtype=int)
>>> refln = crystal.calc_refln(h, k, l)
>>> print(refln.to_cif())
"""
# if isinstance(index_h, (float, int)):
# index_h = numpy.array([index_h], dtype=float)
# index_k = numpy.array([index_k], dtype=float)
# index_l = numpy.array([index_l], dtype=float)
# elif isinstance(index_h, list):
# index_h = numpy.array(index_h, dtype=float)
# index_k = numpy.array(index_k, dtype=float)
# index_l = numpy.array(index_l, dtype=float)
# index_hkl = numpy.stack([index_h, index_k, index_l], axis=0)
f_nucl = self.calc_f_nucl(index_hkl)
res = ReflnL(loop_name=self.data_name)
res.numpy_index_h = index_hkl[0]
res.numpy_index_k = index_hkl[1]
res.numpy_index_l = index_hkl[2]
res.numpy_f_calc = f_nucl
if flag_internal:
res.numpy_to_items()
return res
def calc_structure_factor_tensor_ccs(
self, index_hkl, flag_only_orbital: bool = False, dict_in_out: dict = None):
"""Calculate structure factor tensor in CCS coordinate (X||a*, Z||c)."""
dict_crystal = self.get_dictionary()
if dict_in_out is None:
dict_in_out = {"index_hkl": index_hkl, "flag_only_orbital": flag_only_orbital}
else:
dict_in_out["index_hkl"] = index_hkl
dict_in_out["flag_only_orbital"] = flag_only_orbital
sft_ccs, dder = calc_sft_ccs_by_dictionary(dict_crystal, dict_in_out, flag_use_precalculated_data=False)
return sft_ccs, dder
def calc_refln_susceptibility(
self, index_hkl, flag_internal: bool = True,
flag_only_orbital: bool = False):
"""
Calculate susceptibility tensor and moment tensor.
They are given in Cartesian orthogonal system (x||a*, z||c).
Keyword Arguments:
-----------------
h, k, l: 1D numpy array of Miller indexes
Output:
-------
ReflnSusceptibilityL object of cryspy
Example:
-------
>>> import numpy as np
>>> h, k, l = np.array([1,2],dtype=int), np.array([1,0],dtype=int),
np.array([1,0],dtype=int)
>>> refln_suscept = crystal.calc_refln_susceptibility(h, k, l)
"""
sft_ccs, dder = self.calc_structure_factor_tensor_ccs(
index_hkl, flag_only_orbital=flag_only_orbital)
res = ReflnSusceptibilityL(loop_name=self.data_name)
res.numpy_index_h = index_hkl[0,:]
res.numpy_index_k = index_hkl[1,:]
res.numpy_index_l = index_hkl[2,:]
res.numpy_chi_11_calc = sft_ccs[0, :]
res.numpy_chi_12_calc = sft_ccs[1, :]
res.numpy_chi_13_calc = sft_ccs[2, :]
res.numpy_chi_21_calc = sft_ccs[3, :]
res.numpy_chi_22_calc = sft_ccs[4, :]
res.numpy_chi_23_calc = sft_ccs[5, :]
res.numpy_chi_31_calc = sft_ccs[6, :]
res.numpy_chi_32_calc = sft_ccs[7, :]
res.numpy_chi_33_calc = sft_ccs[8, :]
if flag_internal:
res.numpy_to_items()
return res
def calc_f_m_perp(self, index_hkl, magnetic_field, dict_in_out: dict = None):
"""Calculate F_M_perpendicular
"""
sft_ccs, dder_sft_ccs = self.calc_structure_factor_tensor_ccs(index_hkl, dict_in_out=dict_in_out)
unit_cell_parameters = self.cell.get_unit_cell_parameters()
eq_ccs, dder_eq_ccs = calc_eq_ccs_by_unit_cell_parameters(index_hkl, unit_cell_parameters)
f_m_perp, dder_f_m_perp = calc_f_m_perp_by_sft(
sft_ccs, magnetic_field, eq_ccs,
flag_sft_ccs=False,
flag_magnetic_field=False,
flag_eq_ccs=False)
return f_m_perp
def calc_hkl(self, sthol_min: float = 0., sthovl_max: float = 1.):
"""
Calculate hkl and multiplicity taking into account the symmetry
constraints.
Parameters
----------
sthol_min : float
minimal sin(theta)/wavelength in inversed angstrems.
sthovl_max : float
maximal sin(theta)/wavelength in inversed angstrems.
Returns
-------
h, k, l, mult : numpy.array[float]
The Miller indeces: h, k, l, and its multiplicity: mult.
"""
cell = self.cell
space_group = self.space_group
res = cell.calc_hkl(space_group, sthol_min, sthovl_max)
return res
def calc_hkl_in_range(self, sthol_min: float = 0., sthovl_max: float = 1.):
"""
Give hkl without taking symmetry into account.
Parameters
----------
sthol_min : float
DESCRIPTION.
sthovl_max : float
DESCRIPTION.
Returns
-------
res : TYPE
DESCRIPTION.
"""
cell = self.cell
res = cell.calc_hkl_in_range(sthol_min, sthovl_max)
return res
def calc_magnetization_ellipsoid(self):
"""
Magnetization ellipsoids.
The same coordinate system as U_ij (anisotropic Debye-Waller factor)
Negtive eigenvalues of ellipsoid are replaced by positive.
"""
try:
cell = self.cell
a_s_m_a = self.atom_site_susceptibility
except AttributeError:
return l_res
l_res = []
for item in a_s_m_a.items:
chi_as_u_loc = magnetization_ellipsoid_by_u_ij(cell, item)
l_res.append(chi_as_u_loc)
return l_res
def calc_main_axes_of_magnetization_ellipsoids(self):
"""Susceptibility along the main axes of magnetization ellipsoid.
Output
------
- l_moments is main axes of ellipsoid in mu_B/T for each atom
- l_moments_sigma is sigmas for main axes of ellipsoid for each
atom
- l_rot_matrix is directions for moments
for moments[0] direction is rot_matrix[:, 0]
for moments[1] direction is rot_matrix[:, 1]
for moments[2] direction is rot_matrix[:, 2]
The main axes are given in Cartezian coordinate system (x||a*, z||c).
"""
ll_moments = []
ll_directions = []
try:
cell = self.cell
a_s_s = self.atom_site_susceptibility
except AttributeError:
return ll_moments, ll_directions
l_moments, l_moments_sigma, l_rot_matrix = \
a_s_s.calc_main_axes_of_magnetization_ellipsoid(cell)
return l_moments, l_moments_sigma, l_rot_matrix
def report_main_axes_of_magnetization_ellipsoids(self):
"""
Report about main axes of magnetization ellipsoids.
Make a report about magnetization ellipsoids in string format.
Calculations are performed by
get_main_axes_of_magnetization_ellipsoids method
and calc_magnetization_ellipsoid method of the crystal object.
"""
# crystal is defined object of cryspy library;
# type(crystal) is Crystal
if self.is_attribute("atom_site_susceptibility"):
a_s_m_a = self.atom_site_susceptibility
else:
return ""
cell = self.cell
s_out = report_main_axes_of_magnetization_ellipsoids(
cell, a_s_m_a)
return s_out
def calc_magnetic_moments_with_field_loc(self, field_abc):
"""Orientation of magetic moment for atoms at applied magnetic field.
The input magnetic field should be given in normalized unit cell
(a/|a|, b/|b|, c/|c|)
The output magnetic moment are given in normalized unit cell
(a/|a|, b/|b|, c/|c|)
Output
------
l_lab_out - label of atoms
l_xyz_out - position of atoms
l_moment_out - moment of atoms
"""
np_field = numpy.array(field_abc, dtype=float)
norm_field = numpy.sqrt(numpy.square(np_field).sum())
if norm_field == 0.:
norm_field = 1.
np_field = np_field/norm_field
l_lab_out, l_xyz_out, l_moment_out = [], [], []
try:
spgr = self.space_group
cell = self.cell
a_s = self.atom_site
a_s_m_a = self.atom_site_susceptibility
except AttributeError:
return None
m_m_norm = cell.m_m_norm
m_mt_norm_m_norm_field = numpy.matmul(
numpy.matmul(m_m_norm.transpose(), m_m_norm), np_field)
as_p1 = []
asm_p1 = []
for _l, _11, _22, _33, _12, _13, _23 in zip(
a_s_m_a.label, a_s_m_a.chi_11, a_s_m_a.chi_22, a_s_m_a.chi_33,
a_s_m_a.chi_12, a_s_m_a.chi_13, a_s_m_a.chi_23):
m_chi = numpy.array([[_11, _12, _13],
[_12, _22, _23],
[_13, _23, _33]], dtype=float)
x, y, z = a_s[_l].fract_x, a_s[_l].fract_y, a_s[_l].fract_z
l_out = spgr.calc_rotated_matrix_for_position(m_chi, x, y, z)
for _i_out, _out in enumerate(l_out):
_xyz = _out[0]
_chi = _out[1]
_moment = numpy.matmul(_chi, m_mt_norm_m_norm_field)
label_p1 = f"{_l:}_{_i_out+1:}"
l_lab_out.append(label_p1)
l_xyz_out.append(_xyz)
l_moment_out.append(_moment)
as_p1.append(AtomSite(label=label_p1, fract_x=_xyz[0], fract_y=_xyz[1], fract_z=_xyz[2]))
asm_p1.append(AtomSiteMoment(
label=label_p1,
crystalaxis_x=numpy.round(_moment[0], 5),
crystalaxis_y=numpy.round(_moment[1], 5),
crystalaxis_z=numpy.round(_moment[2], 5)))
spgr_p1 = SpaceGroup(it_number=1)
spgr_p1.form_object()
atom_site_p1 = AtomSiteL()
atom_site_p1.items = as_p1
atom_site_moment_p1 = AtomSiteMomentL()
atom_site_moment_p1.items = asm_p1
cryspy_p1 = Crystal(space_group = spgr_p1, cell=cell, atom_site=atom_site_p1, atom_site_moment=atom_site_moment_p1)
return cryspy_p1
def report(self):
return self.report_main_axes_of_magnetization_ellipsoids()
def plots(self):
return []
def get_flags_atom_beta(self):
atom_site = self.atom_site
if self.is_attribute("atom_site_aniso"):
atom_site_aniso = self.atom_site_aniso
l_f = []
for item_as in atom_site.items:
if item_as.adp_type == "Bani":
item_asa = atom_site_aniso[item_as.label]
l_f.append(
numpy.array([
item_asa.b_11_refinement, item_asa.b_22_refinement, item_asa.b_33_refinement,
item_asa.b_12_refinement, item_asa.b_13_refinement, item_asa.b_23_refinement],
dtype=bool))
elif item_as.adp_type == "Uani":
item_asa = atom_site_aniso[item_as.label]
l_f.append(
numpy.array([
item_asa.u_11_refinement, item_asa.u_22_refinement, item_asa.u_33_refinement,
item_asa.u_12_refinement, item_asa.u_13_refinement, item_asa.u_23_refinement],
dtype=bool))
else:
l_f.append(numpy.zeros((6,), dtype=bool))
res = numpy.stack(l_f, axis=0).transpose()
else:
res = numpy.zeros((6, len(atom_site.items)), dtype=bool)
return res
def get_dictionary(self):
"""Form dictionary. See documentation moduel CrysPy using Jupyter notebook.
"""
self.form_object()
ddict = {}
space_group, cell, atom_site = None, None, None
atom_site_susceptibility, atom_electron_configuration = None, None
atom_type_scat = None
atom_type = None
atom_site_aniso = None
atom_site_scat = None
atom_site_moment = None
space_group_symop_magn_centering = None
space_group_symop_magn_operation = None
l_obj = take_items_by_class(self, (SpaceGroup,))
if len(l_obj) > 0:
space_group = l_obj[0]
l_obj = take_items_by_class(self, (Cell,))
if len(l_obj) > 0:
cell = l_obj[0]
l_obj = take_items_by_class(self, (AtomSiteL,))
if len(l_obj) > 0:
atom_site = l_obj[0]
l_obj = take_items_by_class(self, (AtomSiteAnisoL,))
if len(l_obj) > 0:
atom_site_aniso = l_obj[0]
l_obj = take_items_by_class(self, (AtomSiteScatL,))
if len(l_obj) > 0:
atom_site_scat = l_obj[0]
l_obj = take_items_by_class(self, (AtomTypeL,))
if len(l_obj) > 0:
atom_type = l_obj[0]
l_obj = take_items_by_class(self, (AtomTypeScatL,))
if len(l_obj) > 0:
atom_type_scat = l_obj[0]
l_obj = take_items_by_class(self, (AtomSiteSusceptibilityL,))
if len(l_obj) > 0:
atom_site_susceptibility = l_obj[0]
l_obj = take_items_by_class(self, (AtomElectronConfigurationL,))
if len(l_obj) > 0:
atom_electron_configuration = l_obj[0]
l_obj = take_items_by_class(self, (AtomSiteMomentL,))
if len(l_obj) > 0:
atom_site_moment = l_obj[0]
l_obj = take_items_by_class(self, (SpaceGroupSymopMagnCenteringL,))
if len(l_obj) > 0:
space_group_symop_magn_centering = l_obj[0]
l_obj = take_items_by_class(self, (SpaceGroupSymopMagnOperationL,))
if len(l_obj) > 0:
space_group_symop_magn_operation = l_obj[0]
ddict["name"] = self.data_name
ddict["type_name"] = self.get_name()
ind_mag = None
rad = numpy.pi/180.
if space_group is not None:
full_symm_elems = space_group.full_space_group_symop.get_symm_elems()
ddict["full_symm_elems"] = full_symm_elems
reduced_symm_elems = space_group.reduced_space_group_symop.get_symm_elems()
ddict["reduced_symm_elems"] = reduced_symm_elems
ddict["centrosymmetry"] = space_group.centrosymmetry
if ddict["centrosymmetry"]:
p_centr = space_group.pcentr
lcm = numpy.lcm.reduce([fr.denominator for fr in p_centr])
vals = [int(fr.numerator * lcm / fr.denominator) for fr in p_centr]
vals.append(lcm)
ddict["centrosymmetry_position"] = numpy.array(vals, dtype=int)
shift = space_group.shift
l_vals = []
for ind in range(len(shift)):
lcm = numpy.lcm.reduce([fr.denominator for fr in shift[ind]])
vals = [int(fr.numerator * lcm / fr.denominator) for fr in shift[ind]]
vals.append(lcm)
l_vals.append(vals)
ddict["translation_elems"] = numpy.array(l_vals, dtype=int).transpose()
if space_group_symop_magn_operation is not None:
if space_group_symop_magn_centering is not None:
se_sgsmo = self.space_group_symop_magn_operation.get_sym_elems()
se_sgsmc = self.space_group_symop_magn_centering.get_sym_elems()
full_mcif_elems = calc_full_mag_elems(se_sgsmo, se_sgsmc)
ddict["full_mcif_elems"] = full_mcif_elems
if cell is not None:
if cell.is_attribute("type_cell"):
type_cell, it_coordinate_system_code = cell.type_cell, cell.it_coordinate_system_code
elif "full_mcif_elems" in ddict.keys():
type_cell, it_coordinate_system_code = define_bravais_type_by_symm_elems(full_mcif_elems)
cell.type_cell = type_cell
cell.it_coordinate_system_code = it_coordinate_system_code
cell.apply_constraints()
unit_cell_parameters = cell.get_unit_cell_parameters()
sc_uc, v_uc = calc_sc_v_unit_cell_parameters(type_cell, it_coordinate_system_code)
ddict["sc_uc"] = sc_uc
ddict["v_uc"] = v_uc
ddict["unit_cell_parameters"] = numpy.dot(sc_uc, unit_cell_parameters)+v_uc
ddict["flags_unit_cell_parameters"] = cell.get_flags_unit_cell_parameters()
if atom_site is not None:
atom_label = numpy.array(atom_site.label, dtype=str)
atom_occupancy = numpy.array(atom_site.occupancy, dtype=float)
if atom_site.is_attribute("multiplicity"):
atom_multiplicity = numpy.array(atom_site.multiplicity, dtype=int)
ddict["atom_multiplicity"] = atom_multiplicity
atom_fract_xyz = numpy.array([atom_site.fract_x,
atom_site.fract_y,
atom_site.fract_z], dtype=float)
atom_fract_xyz = numpy.mod(atom_fract_xyz, 1.)
atom_type_symbol = numpy.array(atom_site.type_symbol, dtype=str)
table_sthovl = numpy.linspace(0, 2, 501) # fro0 to 2 Å**-1
table_wavelength = D_DISPERSION["table_wavelength"]
l_table_atom_scattering_amplitude = []
l_table_atom_dispersion = []
flag_atom_scattering_amplitude = True
for type_symbol in atom_type_symbol:
try:
jl = calc_jl_for_ion(table_sthovl, type_symbol)
scattering_amplitude = jl[:,0]
except UserWarning:
try:
scattering_amplitude = calc_scattering_amplitude_tabulated(type_symbol, table_sthovl)[0]
except KeyError:
flag_atom_scattering_amplitude = False
break
l_table_atom_scattering_amplitude.append(scattering_amplitude)
type_atom = get_atom_name_ion_charge_shell(type_symbol)[0]
if type_atom in D_DISPERSION.keys():
l_table_atom_dispersion.append(D_DISPERSION[type_atom])
else:
l_table_atom_dispersion.append(numpy.zeros_like(table_wavelength))
if flag_atom_scattering_amplitude:
ddict["table_sthovl"] = table_sthovl
ddict["table_atom_scattering_amplitude"] = numpy.stack(l_table_atom_scattering_amplitude, axis=0)
ddict["table_wavelength"] = table_wavelength
ddict["table_atom_dispersion"] = numpy.stack(l_table_atom_dispersion, axis=0)
atom_b_scat = numpy.array(atom_site.scat_length_neutron, dtype=complex)
b_d = 3*10**6*
|
numpy.ones(atom_fract_xyz.shape[1:], dtype=int)
|
numpy.ones
|
from shared.finesse.helper import *
import numpy as np
import math, time
__author__ = '<NAME>'
__copyright__ = 'Copyright (c) 2015-2016 Eclipse Technologies'
_AXES = {
'x': np.array([1, 0, 0]),
'y': np.array([0, 1, 0]),
'z': np.array([0, 0, 1])
}
def ik2d(l1, l2, x, y):
# Compute theta2. There are two solutions.
t2 = math.acos((x**2 + y**2 - l1**2 - l2**2) / (2 * l1 * l2))
theta2 = np.array((t2, -t2))
# Compute theta1.
xdot = x * (l1 + l2 * np.cos(theta2)) + y * (l2 * np.sin(theta2))
ydot = y * (l1 + l2 * np.cos(theta2)) - x * (l2 * np.sin(theta2))
theta1 = np.arctan2(ydot, xdot)
return theta1, theta2
def fullik(angles, lengths, target, constraints=None, predilection=None):
target = np.asarray(target, dtype=np.float64)
dist = np.linalg.norm(target)
if dist > sum(lengths):
# Target is unreachable.
return
# Rotate target to the xz plane.
roll = -math.atan2(target[2], target[1]) - math.pi/2
q2 = quaternion(_AXES['x'], -roll)
target = np.dot(target, q2)
theta1, theta2 = ik2d(lengths[0], lengths[1], target[0], target[2])
theta1 += np.pi/2
# Apply constraints.
if constraints is not None:
c1, c2 = constraints
c1_index = np.where((theta1 >= c1[0]) & (theta1 <= c1[1]))
c2_index = np.where((theta2 >= c2[0]) & (theta2 <= c2[1]))
if len(c1_index) == 1 and len(c2_index) == 1:
theta1 = theta1[c1_index]
theta2 = theta2[c2_index]
# Apply predilection.
if predilection is not None and len(theta1) > 1 and len(theta2) > 1:
if predilection == 1:
c2_index = np.where(theta2 > 0)
theta1 = theta1[c2_index]
theta2 = theta2[c2_index]
else:
c2_index = np.where(theta2 < 0)
theta1 = theta1[c2_index]
theta2 = theta2[c2_index]
return theta1[0], roll, theta2[0]
def forward(lengths, angles):
l1 = lengths[0]
l2 = lengths[1]
theta1 = angles[0]
theta2 = angles[1]
x1 = l1 * math.cos(theta1)
y1 = l1 * math.sin(theta1)
x2 = x1 + l2 * math.cos(theta1 + theta2)
y2 = y1 + l2 * math.sin(theta1 + theta2)
return np.array((x1, y1)), np.array((x2, y2))
def forward_kinematics(lengths, angles):
# Set initial points.
points = [
np.array([0, 0, 0]),
|
np.array([0, 0, -lengths[0]])
|
numpy.array
|
''' Wigner symbols.
<NAME> 2016
Algorithm based on the sympy implimentation of sympy.physics.wigner,
which was based off of:
[Rasch03] <NAME> and <NAME>, 'Efficient Storage Scheme for
Pre-calculated Wigner 3j, 6j and Gaunt Coefficients', SIAM
J. Sci. Comput. Volume 25, Issue 4, pp. 1416-1428 (2003)
Additional routines have been added for special configurations of Wigner 3 j symbols
The code has not be written in a way to handle large input values; beware of precission issues
associated with large values aquired from factorials.
'''
import numpy as np
try:
from scipy.special import factorial
except ImportError:
print('You are using an older version of scipy. Importing factorial from old location')
from scipy.misc import factorial
import sys
# store factorials in an array
def factorial_list(N):
x=np.arange(N+1)
return factorial(x)
# old way for the factorials, no longer used.
# def factorial_list(N):
#
# FL=[1]
# # N = highest factorial needed for computation
# # note_to_self : maybe it is faster to use the scipy factorial
# # function and then append to Factorial_list
#
# #if N > len(FL):
# if N > 1:
# for i in range(len(FL), int(N+1)):
# FL.append(FL[i-1]*i)
# return FL[:int(N) + 1]
# else:
# return FL
def three_j(j,m):
j_1,j_2,j_3=j
m_1,m_2,m_3=m
# symmetry conditions
if int(j_1 * 2) != j_1 * 2 or int(j_2 * 2) != j_2 * 2 or \
int(j_3 * 2) != j_3 * 2:
raise ValueError("j values must be integer or half integer, error in three_j)")
if int(m_1 * 2) != m_1 * 2 or int(m_2 * 2) != m_2 * 2 or \
int(m_3 * 2) != m_3 * 2:
raise ValueError("m values must be integer or half integer, error in three_m")
if m_1 + m_2 + m_3 != 0:
return 0
PF= np.int((-1) ** int(j_1 - j_2 - m_3))
M=-m_3;
a1 = j_1 + j_2 - j_3
if a1 < 0:
return 0
a2 = j_1 - j_2 + j_3
if a2 < 0:
return 0
a3 = -j_1 + j_2 + j_3
if a3 < 0:
return 0
if (abs(m_1) > j_1) or (abs(m_2) > j_2) or (abs(m_3) > j_3):
return 0
# special case identities, taken from the Mathematica website for 3-j symbols
if ( (j_1==j_2) & (j_3==0) & (m_1==-m_2) & (m_3==0) ):
return (-1)**(j_1-m_1)/np.sqrt(2*j_1+1)
#if ( (m_1==0) & (m_2==0) & (m_3==0)
max_factor=max(j_1 + j_2 + j_3 + 1, j_1 + abs(m_1), j_2 + abs(m_2),
j_3 + abs(m_3))
FL=factorial_list(max_factor)
Sqrt_Arg=(FL[int(j_1+j_2-j_3)] \
*FL[int(j_1-j_2+j_3)] \
*FL[int(-j_1+j_2+j_3)] \
*FL[int(j_1-m_1)] \
*FL[int(j_1+m_1)] \
*FL[int(j_2-m_2)] \
*FL[int(j_2+m_2)] \
*FL[int(j_3-m_3)] \
*FL[int(j_3+m_3)] )/FL[int(j_1+j_2+j_3+1)]
Sqrt_part=np.sqrt(Sqrt_Arg)
# need to fix this
#if Sqrt_part.is_complex:
# Sqrt_part=Sqrt_part.as_real_imag()[0]
i_min = max(-j_3 + j_1 + m_2, -j_3 + j_2 - m_1, 0)
i_max = min(j_2 + m_2, j_1 - m_1, j_1 + j_2 - j_3)
Sum=0
for i in range(int(i_min),int(i_max) + 1):
denom=FL[i] \
*FL[int(i+j_3-j_1-m_2)] \
*FL[int(j_2 + m_2- i)] \
*FL[int(j_1-i-m_1)] \
*FL[int(i + j_3-j_2 + m_1)] \
*FL[int(j_1 +j_2 -j_3 -i)]
Sum=Sum+np.int((-1)**i)/float(denom)
# might have to reset FL
return Sum*Sqrt_part*PF
def Delta_coef(a,b,c,prec=None):
if int(a + b- c) != (a + b - c):
raise ValueError("j values must be integer or half integer and fulfill the triangle relation")
if int(a + c - b) != (a + c - b):
raise ValueError("j values must be integer or half integer and fulfill the triangle relation")
if int(b + c - a) != (b + c - a):
raise ValueError("j values must be integer or half integer and fulfill the triangle relation")
if (a+ b - c) < 0:
return 0
if (a + c - b) < 0:
return 0
if (b + c - a) < 0:
return 0
max_factor=max(a + b -c, a+c-b, b+c -a, a+b +c + 1)
FL=factorial_list(max_factor)
Sqrt_Arg=float(FL[int(a + b -c)] \
*FL[int(a + c - b)] \
*FL[int(b + c - a)])/float(FL[int(a + b + c + 1)])
Sqrt_part=np.sqrt(Sqrt_Arg)
#if prec:
# Sqrt_part=Sqrt_part.evalf(
return Sqrt_part
def Racah(a,b,c,d,e,f, prec=None):
# the Racah symbol
PF=Delta_coef(a,b,e,prec) \
*Delta_coef(c,d,e,prec) \
*Delta_coef(a,c,f,prec) \
*Delta_coef(b,d,f, prec)
if PF==0:
return 0
i_min = max(a + b + e, c + d + e, a + c + f, b + d + f)
i_max = min(a + b+ c + d, a + d + e + f, b + c + e + f)
max_factor=max(i_max + 1, a + b + c + d, a + d + e + f,b + c + e + f)
FL=factorial_list(max_factor)
Sum=0
for i in range(int(i_min), int(i_max)+ 1):
denom=FL[int(i-a-b-e)]\
*FL[int(i-c-d-e)]\
*FL[int(i-a-c-f)]\
*FL[int(i-b-d-f)]\
*FL[int(a + b + c + d - i)]\
*FL[int(a + d + e + f - i)]\
*FL[int(b + c + e + f - i)]
Sum=Sum+((-1)**i*FL[i+1])/float(denom)
return PF*Sum*(-1)**int(a+b+c+d)
def six_j(j):
j_1,j_2,j_3,j_4,j_5,j_6=j
return (-1)**int(j_1+j_2+j_4 +j_5)*Racah(j_1, j_2, j_5, j_4, j_3, j_6,)
if __name__=="__main__":
# j=np.array([2,6,4])
# m=np.array([0,0,0])
# j_1,j_2,j_3=j
# m_1,m_2,m_3=m
# W=wigner.wigner_3j(j_1,j_2,j_3, m_1,m_2,m_3)
#
# print(W
#
# print(three_j(j,m)
# j=np.array([3,3,3,3,3,3])
# print('my six j',six_j(j)
# W=wigner.wigner_6j(3,3,3,3,3,3)
# print('6 j',W, -1/14.
#
# print(six_j(np.array([5,5,5,5,5,5])), 1/52.
print('some test cases for the 3-j symbols')
print('test 1')
print('----------------------------------------')
print('j=1,2,3 & m=0,0,0 => ', -np.sqrt(3/35.))
j=np.array([1,2,3]); m=
|
np.array([0,0,0])
|
numpy.array
|
# --------------------------------------------------------
# Fast/er R-CNN
# Licensed under The MIT License [see LICENSE for details]
# Written by <NAME>
# --------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import pickle
import numpy as np
from math import radians
def parse_rec(df, filename):
""" Parse PASCAL 3D annotation file """
objects = []
objs = df[df.im_path == filename]
for ix in range(len(objs)):
obj_struct = {}
obj_struct['class'] = objs.iloc[ix]['cat']
x1 = max(int(objs.iloc[ix]['left']), 0)
y1 = max(int(objs.iloc[ix]['upper']), 0)
x2 = min(int(objs.iloc[ix]['right']), int(objs.iloc[ix]['height'] - 1))
y2 = min(int(objs.iloc[ix]['lower']), int(objs.iloc[ix]['width'] - 1))
obj_struct['bbox'] = [x1, y1, x2, y2]
obj_struct['difficult'] = objs.iloc[ix]['difficult']
obj_struct['truncated'] = objs.iloc[ix]['truncated']
obj_struct['occluded'] = objs.iloc[ix]['occluded']
objects.append(obj_struct)
return objects
def voc_ap(rec, prec):
"""
Compute VOC AP given precision and recall.
"""
# correct AP calculation
# first append sentinel values at the end
mrec = np.concatenate(([0.], rec, [1.]))
mpre = np.concatenate(([0.], prec, [0.]))
# compute the precision envelope
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] =
|
np.maximum(mpre[i - 1], mpre[i])
|
numpy.maximum
|
# ---------------------------------------------------------- #
# ------------------ OzDES_Calculation.py ------------------ #
# --------- https://github.com/jhoormann/RMCodeDump -------- #
# ---------------------------------------------------------- #
# This is a dump of all the functions I have collated for #
# the OzDES RM program. This includes funtions defined in #
# OzDES_calibSpec/getPhoto/makeLC plus some others. #
# Unless otherwise noted this code was written by #
# <NAME>. #
# ---------------------------------------------------------- #
from astropy.io import fits
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy.interpolate import interp1d
from scipy.integrate import fixed_quad
import OzDES_Plotting as ozplot
from scipy.interpolate import InterpolatedUnivariateSpline
from sklearn.gaussian_process import GaussianProcessRegressor, kernels
import sys
# -------------------------------------------------- #
# Modified from a function originally provided by #
# <NAME> #
# -------------------------------------------------- #
# ----------------- SpectrumCoadd ------------------ #
# -------------------------------------------------- #
# Read in calibrated spectral data assuming data is #
# in the format provided by OzDES_calibSpec after #
# coadding. #
# -------------------------------------------------- #
class SpectrumCoadd(object):
# Spectrum class for latest version of the OzDES pipeline
def __init__(self, filepath=None):
assert filepath != None, "No file name is specified."
self.filepath = filepath
try:
self.data = fits.open(filepath)
except IOError:
print("Error: file {0} could not be found".format(filepath))
exit()
data = fits.open(filepath)
self.combined = data[0]
self.combinedVariance = data[1]
self._wavelength = None
self._flux = None
self._variance = None
self._fluxCoadd = None
self._varianceCoadd = None
self._dates = None
self._runs = None
self.numEpochs = int((np.size(data) - 3) / 3)
self.redshift = self.combined.header['z']
self.RA = self.combined.header['RA']
self.DEC = self.combined.header['DEC']
self.field = self.combined.header['FIELD']
@property
def wavelength(self):
"""Define wavelength solution."""
if getattr(self, '_wavelength', None) is None:
crpix = self.combined.header[
'crpix1'] - 1.0 # Central pixel value. The -1.0 is needed as Python is ZERO indexed
crval = self.combined.header['crval1'] # central wavelength value
self.cdelt = self.combined.header['cdelt1'] # Wavelength interval between subsequent pixels
n_pix = self.combined.header["NAXIS1"]
wave = ((np.arange(n_pix) - crpix) * self.cdelt) + crval
self._wavelength = wave
return self._wavelength
@property
def flux(self):
if getattr(self, '_flux', None) is None:
self._flux = np.zeros((5000, self.numEpochs), dtype=float)
for i in range(self.numEpochs):
self._flux[:, i] = self.data[i * 3 + 3].data
return self._flux
@property
def variance(self):
if getattr(self, '_variance', None) is None:
self._variance = np.zeros((5000, self.numEpochs), dtype=float)
for i in range(self.numEpochs):
self._variance[:, i] = self.data[i * 3 + 4].data
return self._variance
@property
def fluxCoadd(self):
if getattr(self, '_fluxCoadd', None) is None:
self._fluxCoadd = np.zeros(5000, dtype=float)
self._fluxCoadd[:] = self.data[0].data
return self._fluxCoadd
@property
def varianceCoadd(self):
if getattr(self, '_varianceCoadd', None) is None:
self._varianceCoadd = np.zeros(5000, dtype=float)
self._varianceCoadd[:] = self.data[1].data
return self._varianceCoadd
@property
def dates(self):
if getattr(self, '_dates', None) is None:
self._dates = np.zeros(self.numEpochs, dtype=float)
for i in range(self.numEpochs):
self._dates[i] = self.data[i * 3 + 3].header[
'AVGDATE'] # this give the average Modified Julian Date (UTC) that observation was taken
return self._dates
@property
def runs(self):
if getattr(self, '_runs', None) is None:
self._runs = np.zeros(self.numEpochs, dtype=float)
for i in range(self.numEpochs):
self._runs[i] = self.data[i * 3 + 3].header['RUN'] # this give the run number of the observation
return self._runs
# -------------------------------------------------- #
# ------------------- magToFlux -------------------- #
# -------------------------------------------------- #
# Reads in magnitude, error, and pivot wavelength #
# and converts to f_lambda in units of ergs/s/cm^2/A #
# -------------------------------------------------- #
def magToFlux(mag, err, pivot):
flux = (3*pow(10,18)/pow(pivot,2))*pow(10, -(2./5.)*(mag + 48.6))
flux_err = abs(flux*(-2./5.)*2.30259*err)
return flux, flux_err
# -------------------------------------------------- #
# ------------------- outputLC --------------------- #
# -------------------------------------------------- #
# Creates an output file with date, flux, error #
# columns as is expected by lag recovery tools #
# Javelin and PyCCF. #
# -------------------------------------------------- #
def outputLC(date, flux, error, name, loc, obj_name):
length = len(date)
outname = loc + obj_name + "_" + name + ".txt"
output = open(outname, 'w')
for i in range(length):
if np.isnan(flux[i]) == False:
output.write("%s %s %s \n" % (date[i], flux[i], error[i]))
else:
# Sometimes the flux ends up as nan, this is generally because the SNR is so bad/the emission line so
# small that the continuum subtraction makes the line negative. These are not saved in the data file
# but a warning is outputted so you can have a look at what the problem is.
print("-------\n Houston, we have a problem! " + obj_name + " Night " + str(i) + "\n-------\n ")
output.close()
return
# -------------------------------------------------- #
# ---------------- convertPhotoLC -------------------#
# -------------------------------------------------- #
# Converts photometric light curves from magnitudes #
# to flux and saves the light curves separately for #
# each band. #
# -------------------------------------------------- #
def convertPhotoLC(photoName, source, bandName, bandPivot, scale, makeFig, outLoc):
# Read in the photometric data
photo = pd.read_table(photoName, delim_whitespace=True)
if makeFig == True:
# Define figure and axis for light curves of all bands
fig_photo, ax_photo = ozplot.plot_share_x(len(bandName), source, "Date (MJD)", bandName)
# Make a light curve for each band
for b in range(len(bandName)):
# Create an array for observations of a specified band and sort observations by date
band_data = photo[photo['BAND'] == bandName[b]].sort_values('MJD')
# Find date, mag, and magerr array for the specified band
ph_date = np.array(band_data['MJD'])
ph_mag = np.array(band_data['MAG'])
ph_magerr = np.array(band_data['MAGERR'])
# Loop over each epoch and convert magnitude to flux
ph_flux = np.zeros(len(ph_date))
ph_fluxerr = np.zeros(len(ph_date))
for e in range(len(ph_date)):
ph_flux[e], ph_fluxerr[e] = magToFlux(ph_mag[e], ph_magerr[e], bandPivot[b])
# Scale the fluxes before they are saved, if you are concerned about remembering the scale factor perhaps
# included it in the outputted file name.
ph_flux = ph_flux / scale
ph_fluxerr = ph_fluxerr / scale
# Save the data as a light curve with filename outLoc + source + _ + bandName[b] + .txt
outputLC(ph_date, ph_flux, ph_fluxerr, bandName[b], outLoc, source)
if makeFig == True:
# plot the light curve on the subplot defined above.
ax_photo[b].errorbar(ph_date, ph_flux, yerr=ph_fluxerr, fmt='o', color='black')
# Once all the light curves are plotted save the figure as outLoc + source + "_photo.png"
if makeFig == True:
fig_photo.savefig(outLoc + source + "_photo.png")
return
# -------------------------------------------------- #
# ------------------ findLines ----------------------#
# -------------------------------------------------- #
# Determines which emission lines are present in the #
# spectrum. Returns an array of booleans where True #
# means the emission line is present. #
# -------------------------------------------------- #
def findLines(wavelength, z, lineName, contWinBSMin, contWinBSMax):
# decide which emission lines are available in the spectrum
availLines = np.zeros(len(lineName)).astype(bool)
for l in range(len(lineName)):
# for a line to be in the spectrum you need to include the continuum subtraction windows as well. This can
# be limiting but as we need continuum subtracted spectra it is necessary.
minWave = min(contWinBSMin[lineName[l]])
maxWave = max(contWinBSMax[lineName[l]])
if minWave * (1 + z) > wavelength[0] and maxWave * (1 + z) < wavelength[-1]:
availLines[l] = True
return availLines
# -------------------------------------------------- #
# -------------------- findBin ----------------------#
# -------------------------------------------------- #
# Finds the bin of the given vector (wavelength) #
# where the specified quantity (line) is located. #
# -------------------------------------------------- #
def findBin(line, wavelength):
bin = 0
for i in range(len(wavelength)-1):
if line >= wavelength[i] and line <= wavelength[i+1]:
bin = i
i = len(wavelength)
if line > wavelength[-1]:
bin = len(wavelength)-1
i = len(wavelength)
return bin
# -------------------------------------------------- #
# ---------------- interpolateVals ------------------#
# -------------------------------------------------- #
# Interpolates a linear line between two points and #
# propagates the uncertainty. #
# -------------------------------------------------- #
def interpolateVals(x, y, s, val):
# uncertainty is variance
interp = y[0] + (val - x[0]) * (y[1] - y[0]) / (x[1] - x[0])
interp_var = s[0] + (s[0] + s[1]) * ((val - x[0]) / (x[1] - x[0])) ** 2.
return interp, interp_var
# -------------------------------------------------- #
# ------------------ meanUncert ---------------------#
# -------------------------------------------------- #
# Finds the uncertainty corresponding to the mean #
# of a set of numbers. #
# -------------------------------------------------- #
def meanUncert(variance):
length = len(variance)
var = 0
num = 0
for i in range(length):
if np.isnan(variance[i]) == False:
var = var + variance[i]
num += 1
sigma2 = (var / (num ** 2))
return sigma2
# -------------------------------------------------- #
# ---------------- cont_fit_reject ------------------#
# -------------------------------------------------- #
# Interpolate a linear line through the mean of the #
# continuum subtraction windows to represent the #
# continuum and subtract this line. Modifies the #
# given flux/variance vectors. #
# -------------------------------------------------- #
def cont_fit_reject(wavelength, fluxes, variances, minWin, maxWin):
# Define the wavelength range for the continuum model, between the mean of both windows
wave = np.array([np.nanmean(minWin), np.nanmean(maxWin)])
nBins = len(wavelength)
# Determine how many epochs there are to continuum subtract
number = int(fluxes.size / nBins)
for epoch in range(number):
if number == 1:
flux = fluxes
variance = variances
else:
flux = fluxes[:, epoch]
variance = variances[:, epoch]
# Calculate the average flux at each extreme of the wave vector (ie mean of the continuum subtraction window)
fvals = np.array([np.nanmean(flux[findBin(minWin[0], wavelength):findBin(minWin[1], wavelength)]),
np.nanmean(flux[findBin(maxWin[0], wavelength):findBin(maxWin[1], wavelength)])])
# Calculate the average uncertainty at each extreme of the wave vector
svals = np.array([meanUncert(variance[findBin(minWin[0], wavelength):findBin(minWin[1], wavelength)]),
meanUncert(variance[findBin(maxWin[0], wavelength):findBin(maxWin[1], wavelength)])])
cont = np.zeros(nBins)
contVar = np.zeros(nBins)
# Find the interpolated linear continuum model
for i in range(nBins):
cont[i], contVar[i] = interpolateVals(wave, fvals, svals, wavelength[i])
# Subtract the continuum from the flux and add the error of the model in quadrature with the spectral error
flux -= cont
variance += contVar
return
# -------------------------------------------------- #
# The next three functions are modified from code #
# provided by <NAME> #
# -------------------------------------------------- #
# ------------------ filterCurve ------------------- #
# -------------------------------------------------- #
# creates a class to hold the transmission function #
# for each band. #
# -------------------------------------------------- #
class filterCurve:
"""A filter"""
def __init__(self):
self.wave = np.array([], 'float')
self.trans = np.array([], 'float')
return
def read(self, file):
# DES filter curves express the wavelengths in nms
if 'DES' in file:
factor = 10.
else:
factor = 1.
file = open(file, 'r')
for line in file.readlines():
if line[0] != '#':
entries = line.split()
self.wave = np.append(self.wave, float(entries[0]))
self.trans = np.append(self.trans, float(entries[1]))
file.close()
# We use Angstroms for the wavelength in the filter transmission file
self.wave = self.wave * factor
return
# -------------------------------------------------- #
# ---------------- readFilterCurve ----------------- #
# -------------------------------------------------- #
# Reads in the filter curves and stores it as the #
# filter curve class. #
# -------------------------------------------------- #
def readFilterCurves(bands, filters):
filterCurves = {}
for f in bands:
filterCurves[f] = filterCurve()
filterCurves[f].read(filters[f])
return filterCurves
# -------------------------------------------------- #
# ----------------- computeABmag ------------------- #
# -------------------------------------------------- #
# computes the AB magnitude for given transmission #
# functions and spectrum (f_lambda). Returns the #
# magnitude and variance. #
# -------------------------------------------------- #
def computeABmag(trans_flux, trans_wave, tmp_wave, tmp_flux, tmp_var):
# Takes and returns variance
# trans_ : transmission function data
# tmp_ : spectral data
# trans/tmp not necessarily defined over the same wavelength range
# first determine the wavelength range over which both are defined
minV = min(trans_wave)
if minV < min(tmp_wave):
minV = min(tmp_wave)
maxV = max(trans_wave)
if maxV > max(trans_wave):
maxV = max(trans_wave)
interp_wave = []
tmp_flux2 = []
tmp_var2 = []
# Make new vectors for the flux just using that range (assuming spectral binning)
for i in range(len(tmp_wave)):
if minV < tmp_wave[i] < maxV:
interp_wave.append(tmp_wave[i])
tmp_flux2.append(tmp_flux[i])
tmp_var2.append(tmp_var[i])
# interpolate the transmission function onto this range
# the transmission function is interpolated as it is generally much smoother than the spectral data
trans_flux2 = interp1d(trans_wave, trans_flux)(interp_wave)
# And now calculate the magnitude and uncertainty
c = 2.992792e18 # Angstrom/s
Num = np.nansum(tmp_flux2 * trans_flux2 * interp_wave)
Num_var = np.nansum(tmp_var2 * (trans_flux2 * interp_wave) ** 2)
Den = np.nansum(trans_flux2 / interp_wave)
with np.errstate(divide='raise'):
try:
magAB = -2.5 * np.log10(Num / Den / c) - 48.60
magABvar = 1.17882 * Num_var / (Num ** 2)
except FloatingPointError:
magAB = 99.
magABvar = 99.
return magAB, magABvar
# --------------------------------------------------- #
# --------------- uncertainty_cont ------------------ #
# --------------------------------------------------- #
# This function finds the uncertainty in line flux #
# and width measurements. For line flux you can #
# input a range of potential continuum windows and #
# it will randomly pick regions to use for continuum #
# subtraction. You can also input a region over which #
# to randomly choose the integration window. These #
# all also include flux randomization in order to #
# consider the effect of the variance spectrum. #
# You can also look at the effect flux randomization #
# has on the line width measurements FWHM and #
# velocity dispersion. You can also specify to look #
# at the RMS spectrum (flag='rms') for the line width #
# measurements, the default is to look at the provided#
# spectrum as is. The error is calculated through #
# bootstrap resampling using strapNum iterations. #
# The standard deviation of the calculated quantity #
# is then the associated error. #
# --------------------------------------------------- #
def uncertainty_cont(wavelength, flux, variance, strapNum, z, line, pivotLC, winLimMin, winLimMax, winsize, scale,
calc='cont', flag='mean', res=0):
# calc = cont -> continuum subtraction
# calc = win -> integration window
# calc = fwhm -> FWHM line width: can specify flag=rms
# calc = sigma -> line velocity dispersion: can specify flag=rms
# Performs bootstrap resampling in the range of potentially clean continuum to determine
# uncertainties on the flux measurement
# Continuum window in Angstroms - will be scaled according to redshift
# Winsize means the continuum subtraction windows are all the same size, just the locations shift
winsize = winsize/(1+z)
lineMin = line[0]
lineMax = line[1]
# Option for potentially clean continuum region pass in bootstrap
# Calculate the width of the bootstrapping region on each side of the line
lowW = (winLimMin[1]-winLimMin[0])/(1+z)
highW = (winLimMax[1]-winLimMax[0])/(1+z)
# Check edge conditions: if the bootstraping region goes outside the region of the spectrograph use the spectrograph
# bounds as the edges
if winLimMin[0] < wavelength[0]:
winLimMin[0] = wavelength[0]
winLimMin[1] = (winLimMin[0] / (1 + z) + lowW) * (1 + z)
if winLimMin[1] > wavelength[line[0]]:
winLimMin[1] = wavelength[line[0]]
if winLimMax[1] > wavelength[-1]:
winLimMax[1] = wavelength[-1]
winLimMax[0] = (winLimMax[1] / (1 + z) - highW) * (1 + z)
if winLimMax[0] < wavelength[line[1]]:
winLimMax[0] = wavelength[line[1]]
# Wavelengths to choose in each window in steps of 0.5A
winMinVect = np.arange(winLimMin[0], winLimMin[1] - (winsize - 0.5) * (1 + z), 0.5 * (1 + z))
winMaxVect = np.arange(winLimMax[0], winLimMax[1] - (winsize - 0.5) * (1 + z), 0.5 * (1 + z))
# Array of random continuum window starting points
randVectMin = len(winMinVect) * np.random.rand(strapNum)
randVectMin = randVectMin.astype(int)
randVectMax = len(winMaxVect) * np.random.rand(strapNum)
randVectMax = randVectMax.astype(int)
# An array of values obtained through bootstrapping to determine uncertainties
vals = np.zeros(strapNum)
for i in range(strapNum):
if calc == 'win':
# subtracts from standard continuum but changes integration window, in this case feed in potential
# integration windows instead of bootstrapping regions
lineMinNew = findBin(winMinVect[randVectMin[i]], wavelength)
lineMaxNew = findBin(winMaxVect[randVectMax[i]], wavelength)
# Performs flux resampling to account for variance spectrum. Flux values shifted by Gaussian scaled by
# variance
varC = np.copy(variance)
fluxC = flux + np.random.normal(size=flux.shape) * (variance ** 0.5)
# Continuum Subtract this new vector
cont_fit_reject(wavelength, fluxC, varC, winLimMin, winLimMax)
# Calculate the flux
lc_mag, lc_mag_err = computeABmag(np.ones(len(wavelength[lineMinNew:lineMaxNew])),
wavelength[lineMinNew:lineMaxNew], wavelength[lineMinNew:lineMaxNew],
fluxC[lineMinNew:lineMaxNew]*scale, varC[lineMinNew:lineMaxNew]*
pow(scale,2))
vals[i], lc_mag_err = magToFlux(lc_mag, lc_mag_err**0.5, pivotLC)
if calc == "cont":
# changes cont region
minWin = [winMinVect[randVectMin[i]], winMinVect[randVectMin[i]] + winsize * (1 + z)]
maxWin = [winMaxVect[randVectMax[i]], winMaxVect[randVectMax[i]] + winsize * (1 + z)]
# Performs flux resampling to account for variance spectrum. Flux values shifted by Gaussian scaled by
# variance
varC = np.copy(variance)
fluxC = flux + np.random.normal(size=flux.shape) * (variance ** 0.5)
# Continuum Subtract this new vector
cont_fit_reject(wavelength, fluxC, varC, minWin, maxWin)
# Calculate the flux
lc_mag, lc_mag_err = computeABmag(np.ones(len(wavelength[lineMin:lineMax])),wavelength[lineMin:lineMax],
wavelength[lineMin:lineMax], fluxC[lineMin:lineMax]*scale,
varC[lineMin:lineMax]*pow(scale, 2))
vals[i], lc_mag_err = magToFlux(lc_mag, lc_mag_err**0.5, pivotLC)
if calc == "fwhm":
# Determine uncertainty in FWHM line measurement
# do flux randomization and continuum subtraction
varC = np.copy(variance)
fluxC = flux + np.random.normal(size=flux.shape) * (variance ** 0.5)
cont_fit_reject(wavelength, fluxC, varC, winLimMin, winLimMax)
if flag == 'rms':
# first calculate the RMS spectrum if requested
fluxC, varC = rmsSpec(fluxC, varC)
vals[i] = fwhm(wavelength[lineMin:lineMax], fluxC[lineMin:lineMax], res)
if calc == "sigma":
# Determine uncertainty in velocity dispersion measurement
# do flux randomization and continuum subtraction
varC = np.copy(variance)
fluxC = flux + np.random.normal(size=flux.shape) * (variance ** 0.5)
cont_fit_reject(wavelength, fluxC, varC, winLimMin, winLimMax)
if flag == 'rms':
# first calculate the RMS spectrum if requested
fluxC, varC = rmsSpec(fluxC, varC)
vals[i] = lineVar(wavelength[lineMin:lineMax], fluxC[lineMin:lineMax], res)
stddev_bs = np.nanstd(vals)
return stddev_bs
# --------------------------------------------------- #
# ----------------------- fwhm ---------------------- #
# --------------------------------------------------- #
# Takes an input spectrum and calculate the FWHM of #
# the provided emission line. It will search over #
# the entire provided wavelength window so just #
# include the relevant region of the spectrum. #
# --------------------------------------------------- #
def fwhm(wave, flux, res):
# First I am smoothing the spectrum
exponential_smooth(flux)
# Find the half maximum
peak = max(flux)
valley = min(flux)
peakLoc = wave[np.where(flux == peak)[0][0]]
peakLocB = findBin(peakLoc, wave)
hm = (peak-valley) / 2 + valley
leftUp = wave[0]
leftDown = wave[peakLocB]
rightUp = wave[-1]
rightDown = wave[peakLocB]
# First search for the half max to the left of the line
for i in range(peakLocB):
# First search going up the line
if flux[i] < hm < flux[i+1]:
leftUp = (wave[i] + wave[i+1])/2
# Then going down the line
if flux[peakLocB-i-1] < hm < flux[peakLocB-i]:
leftDown = (wave[peakLocB-i-1] + wave[peakLocB-i])/2
# Then take the average which will account for any double peaks/noise in the spectrum
left = (leftUp + leftDown)/2
# And now to the right
maxSize = len(wave) - 1
for i in range(maxSize - peakLocB):
# Go up
if flux[peakLocB + i + 1] < hm < flux[peakLocB + i]:
rightDown = (wave[peakLocB + i] + wave[peakLocB + i + 1])/2
# Go down
if flux[maxSize-i] < hm < flux[maxSize-i-1]:
rightUp = (wave[maxSize-i] + wave[maxSize-i-1])/2
right = (rightUp + rightDown)/2
# Now calculate the velocity
# km/s
c = 299792.458
widthObs = (right-left)
widthT = pow(widthObs**2 - res**2,0.5)/2
zLeft = -widthT/peakLoc
zRight = widthT/peakLoc
zComb = (1+zRight)/(1+zLeft)-1
vel = c*((1+zComb)**2-1)/((1+zComb)**2+1)
return vel
# --------------------------------------------------- #
# ---------------------- lineVar -------------------- #
# --------------------------------------------------- #
# Takes an input spectrum and calculate the velocity #
# dispersion of the emission line. It will search #
# over the entire provided wavelength window so just #
# include the relevant region of the spectrum. #
# --------------------------------------------------- #
def lineVar(wave, flux, res):
length = len(wave)
peak = max(flux)
peakLoc = wave[np.where(flux == peak)[0][0]]
# Calculate velocity dispersion following equation written in Peterson 2004, the three following constants
# correspond to the main terms in that equation.
Pdl = 0
lPdl = 0
l2Pdl = 0
for i in range(length):
Pdl += flux[i]
lPdl += flux[i] * wave[i]
l2Pdl += flux[i] * pow(wave[i], 2)
lambda0 = lPdl / Pdl
lambda2 = l2Pdl / Pdl
lambda02 = pow(lambda0, 2)
linevar = lambda2 - lambda02
sigma = linevar ** 0.5
c = 299792.458
sigmaT = pow(sigma**2 - res**2, 0.5)
left = peakLoc - sigmaT / 2
right = peakLoc + sigmaT / 2
zLeft = (left - peakLoc) / peakLoc
zRight = (right - peakLoc) / peakLoc
#redshift from lambda_l to lambda_r
zComb = (1 + zRight) / (1 + zLeft) - 1
vel = c * ((1 + zComb) ** 2 - 1) / ((1 + zComb) ** 2 + 1)
return vel
# --------------------------------------------------- #
# --------------- exponential_smooth ---------------- #
# --------------------------------------------------- #
# Function to apply an exponential smoothing kernel #
# to the data. Written by <NAME>. #
# --------------------------------------------------- #
def exponential_smooth(fluxes):
number = int(fluxes.size/fluxes.shape[0])
search_pixels = 5
decay = 0.9
window = np.arange(-search_pixels, search_pixels + 1)
weights = decay ** np.abs(window)
weights /= np.sum(weights)
if (number == 1):
flux = fluxes[:]
flux[:] = np.convolve(flux, weights, mode='same')
else:
for epoch in range(fluxes.shape[1]):
flux = fluxes[:, epoch]
flux[:] = np.convolve(flux, weights, mode='same')
# --------------------------------------------------- #
# -------------------- meanSpec --------------------- #
# --------------------------------------------------- #
# Calculates the mean of multiple spectra as well as #
# the corresponding variance spectrum. #
# --------------------------------------------------- #
def meanSpec(flux, variance):
length = len(flux[:,0])
meanFlux = np.zeros(length)
meanVar = np.zeros(length)
for i in range(length):
meanFlux[i] = np.nanmean(flux[i,:])
meanVar[i] = np.nanmean(variance[i,:])
return meanFlux, meanVar
# --------------------------------------------------- #
# -------------------- rmsSpec ---------------------- #
# --------------------------------------------------- #
# Calculates the RMS of the inputted spectra. Will #
# expect fluxes in [wavelength, epoch] format. An #
# exponential smoothing function is applied to the #
# data as a first and last step to mitigate some of #
# the noise. #
# --------------------------------------------------- #
def rmsSpec(flux, variance):
# smooth the input spectra
exponential_smooth(flux)
length = len(flux[:, 0])
epochs = len(flux[0, :])
# Calculate the RMS spectrum, variance propagated through but not used later
mean, meanVar = meanSpec(flux, variance)
rms = np.zeros(length)
rmsVar = np.zeros(length)
rmsVar2 = np.zeros(length)
for b in range(length):
for e in range(epochs):
rms[b] += (flux[b, e] - mean[b]) ** 2
rmsVar2[b] += 4 * rms[b] * (variance[b, e] + meanVar[b])
rms[b] = (rms[b] / (epochs - 1)) ** 0.5
rmsVar2[b] = rmsVar2[b] / ((epochs - 1) ** 2)
rmsVar[b] = rmsVar2[b] * (0.5 / rms[b]) ** 2
# smooth the final RMS spectrum
exponential_smooth(rms)
return rms, rmsVar
# -------------------------------------------------- #
# -------------------- lineLC ---------------------- #
# -------------------------------------------------- #
# Create emission line light curves by integrating #
# the emission lines after local continuum #
# subtraction. The uncertainties due to the variance#
# of the spectrum and the continuum subtraction is #
# performed through bootstrap resampling. This is #
# done for every emission line from the provided list#
# that is present in the spectrum. #
# -------------------------------------------------- #
def lineLC(dates, lineName, availLines, lineInt, contWinMin, contWinMax, contWinBSMin, contWinBSMax, wavelength,
origFluxes, origVariances, fluxCoadd, numEpochs, scale, z, strapNum, outLoc, source, makeFig, makeFigEpoch):
if makeFig == True:
# Define figure and axis for light curves of all available emission lines
lineAxis = [lineName[i] for i in range(len(lineName)) if availLines[i] == True]
fig_spec, ax_spec = ozplot.plot_share_x(len(lineAxis), source, "Date (MJD)", lineAxis)
for l in range(len(lineName)):
if availLines[l] == True:
line = lineName[l]
# Copy the flux/variance vectors so you have an uncontinuum subtracted version to use for other lines
fluxes = np.copy(origFluxes)
variances = np.copy(origVariances)
# define some variables for line/continuum windows in observed frame
contMin = np.array(contWinMin[line]) * (1 + z)
contMax = np.array(contWinMax[line]) * (1 + z)
contMinBS = np.array(contWinBSMin[line]) * (1 + z)
contMaxBS = np.array(contWinBSMax[line]) * (1 + z)
# similar for the line integration window but I want the wavelength bin number, not just the wavelength
lineMin = findBin(lineInt[line][0] * (1 + z), wavelength)
lineMax = findBin(lineInt[line][1] * (1 + z), wavelength)
# Perform the continuum subtraction
cont_fit_reject(wavelength, fluxes, variances, contMin, contMax)
lc_mag = np.zeros(numEpochs)
lc_mag_sigma = np.zeros(numEpochs)
lc_flux = np.zeros(numEpochs)
lc_flux_sigma = np.zeros(numEpochs)
total_error = np.zeros(numEpochs)
# Calculate the pivot wavelength associated with each line window
pivotLC = pow(np.nansum(wavelength[lineMin:lineMax]) / np.nansum(1 / wavelength[lineMin:lineMax]), 0.5)
# Calculate magnitudes and fluxes for each line
for epoch in range(numEpochs):
# first calculate magnitudes, save these if you want to compare this instead of fluxes
# Here the transmission function is 1 for all wavelengths within the integration window.
lc_mag[epoch], lc_mag_sigma[epoch] = computeABmag(np.ones(len(wavelength[lineMin:lineMax])),
wavelength[lineMin:lineMax],
wavelength[lineMin:lineMax],
fluxes[lineMin:lineMax, epoch] * scale,
variances[lineMin:lineMax, epoch] * pow(scale, 2))
# Now convert to flux, this is what is saved. Note: all fluxes here are actually flux densities
# This uncertainty just considers the variance spectrum, we will take everything in the next step
lc_flux[epoch], lc_flux_sigma[epoch] = magToFlux(lc_mag[epoch], lc_mag_sigma[epoch] ** 0.5, pivotLC)
total_error[epoch] = uncertainty_cont(wavelength, origFluxes[:, epoch], origVariances[:, epoch],
strapNum, z, [lineMin, lineMax], pivotLC, contMinBS,
contMaxBS, contMin[1] - contMin[0], scale)
if makeFigEpoch == True:
# Save figures showing spectrum before/after continuum subtraction for each epoch and line
fig_epoch, ax_epoch = ozplot.plot_share_x(2, source + " epoch " + str(epoch), "Wavelength ($\AA$)",
["Before", " After"], [wavelength[0], wavelength[-1]])
for p in range(2):
ax_epoch[p].axvspan(contMinBS[0], contMinBS[1], color='mediumblue', alpha=0.3)
ax_epoch[p].axvspan(contMaxBS[0], contMaxBS[1], color='mediumblue', alpha=0.3)
ax_epoch[p].axvspan(contMin[0], contMin[1], color='mediumblue', alpha=0.5)
ax_epoch[p].axvspan(contMax[0], contMax[1], color='mediumblue', alpha=0.5)
ax_epoch[p].axvspan(wavelength[lineMin], wavelength[lineMax], color='forestgreen', alpha=0.3)
ax_epoch[0].plot(wavelength, origFluxes[:, epoch], color='black')
ax_epoch[1].plot(wavelength, fluxes[:, epoch], color='black')
fig_epoch.savefig(outLoc + source + "_" + lineName[l] + "_epoch_" + str(epoch) + ".png")
plt.close(fig_epoch)
# Scale the line fluxes as with the photometry
lc_flux = lc_flux / scale
total_error = total_error / scale
# Save the data as a light curve with filename outLoc + source + _ + line + .txt
outputLC(dates, lc_flux, total_error, line, outLoc, source)
if makeFig == True:
# plot the light curve on the subplot defined above. First get the index for the axis associated with
# the line being analyzed.
lbin = lineAxis.index(line)
ax_spec[lbin].errorbar(dates, lc_flux, yerr=total_error, fmt='o', color='black')
# make a plot to show the continuum subtraction regions on the coadded spectrum
fig_coadd, ax_coadd = ozplot.plot_share_x(1, source, "Wavelength ($\AA$)", ["Total Coadded Flux (" +
str(scale) +
" erg/s/cm$^2$/$\AA$)"],
[wavelength[0], wavelength[-1]])
ax_coadd[0].axvspan(contMinBS[0], contMinBS[1], color='mediumblue', alpha=0.3)
ax_coadd[0].axvspan(contMaxBS[0], contMaxBS[1], color='mediumblue', alpha=0.3)
ax_coadd[0].axvspan(contMin[0], contMin[1], color='mediumblue', alpha=0.5)
ax_coadd[0].axvspan(contMax[0], contMax[1], color='mediumblue', alpha=0.5)
ax_coadd[0].axvspan(wavelength[lineMin], wavelength[lineMax], color='forestgreen', alpha=0.3)
ax_coadd[0].plot(wavelength, fluxCoadd, color='black')
fig_coadd.savefig(outLoc + source + "_" + lineName[l] + "_coadd.png")
plt.close(fig_coadd)
# Once all the light curves are plotted save the figure as outLoc + source + "_spec.png"
if makeFig == True:
fig_spec.savefig(outLoc + source + "_spec.png")
return
# -------------------------------------------------- #
# ------------------ makePhotoLC --------------------#
# -------------------------------------------------- #
# Makes light curves by applying photometric filters #
# to a series of spectral data. The data is saved #
# as fluxes. #
# -------------------------------------------------- #
def makePhotoLC(dates, bandName, bandPivot, filters, wavelength, origFluxes, origVariances, numEpochs, scale, outLoc,
source, makeFig):
filterCurves = readFilterCurves(bandName, filters)
if makeFig == True:
# Define figure and axis for light curves of all available emission lines
fig_phot, ax_phot = ozplot.plot_share_x(len(bandName), source, "Date (MJD)", bandName)
for b in range(len(bandName)):
mags = np.zeros(numEpochs)
mags_var = np.zeros(numEpochs)
flux = np.zeros(numEpochs)
flux_err = np.zeros(numEpochs)
for e in range(numEpochs):
# Calculate the magntiude given the transmission function provided
mags[e], mags_var[e] = computeABmag(filterCurves[bandName[b]].trans, filterCurves[bandName[b]].wave,
wavelength, origFluxes[:, e] * scale,
origVariances[:, e] * pow(scale, 2))
# Then convert to fluxes
flux[e], flux_err[e] = magToFlux(mags[e], mags_var[e] ** 0.5, bandPivot[b])
# Scale the fluxes
flux = flux / scale
flux_err = flux_err / scale
# Save the data as a light curve with filename outLoc + source + _ + calc_bandName + .txt
outputLC(dates, flux, flux_err, 'calc_' + bandName[b], outLoc, source)
if makeFig == True:
# plot the light curve on the subplot defined above.
ax_phot[b].errorbar(dates, flux, yerr=flux_err, fmt='o', color='black')
# Once all the light curves are plotted save the figure as outLoc + source + "_makePhot.png"
if makeFig == True:
fig_phot.savefig(outLoc + source + "_makePhot.png")
return
# -------------------------------------------------- #
# ------------------- calcWidth ---------------------#
# -------------------------------------------------- #
# Calculates emission line width (FWHM and velocity #
# dispersion) using the mean and RMS spectra. If #
# possible calculates the BH mass using the R-L #
# relationship. The data is saved to a text file. #
# -------------------------------------------------- #
def calcWidth(wavelength, lineName, lineLoc, availLines, lineInt, lumLoc, contWinMin, contWinMax, contWinBSMin,
contWinBSMax, origFluxes, origVariances, origFluxCoadd, origVarCoadd, z, strapNum, scale, outLoc, source,
makeFig, calcBH):
# open a file to save the data to - outLoc + source + _vel.txt
out = open(outLoc + source + "_vel_and_mass.txt", 'w')
# Type (Mean/RMS), Measure (FWHM, Vel Disp)
out.write("Line Type Measure Vel Vel_Err Mass Lag Lag_Err_Min Lag_Err_Max Mass_Err_Min, Mass_Err_Max\n")
# Convert wavelength vector to rest frame
wave = wavelength/(1+z)
for l in range(len(lineName)):
if availLines[l] == True:
line = lineName[l]
# If calcBH == True estimate BH mass from the R-L relationship. Here I will calculate the lag. If you want
# to use the measured lag feed that in here. If the luminosity needed isn't in the spectroscopic window
# I will just give nan for the black hole mass. The luminosity is determined from the coadded flux
if calcBH == True:
lum, lumerr = luminosity(wavelength, origFluxCoadd, origVarCoadd, z, lumLoc[l]*(1+z), strapNum, scale)
if np.isnan(lum) == True:
lag = np.nan
lag_err_min = np.nan
lag_err_max = np.nan
elif line == 'CIV':
lag, lag_err_max, lag_err_min = RL_CIV(lum, lumerr)
elif line == 'MgII':
lag, lag_err_max, lag_err_min = RL_MgII(lum, lumerr)
elif line == 'Hbeta':
lag, lag_err_max, lag_err_min = RL_Hbeta(lum, lumerr)
else:
lag = np.nan
lag_err_min = np.nan
lag_err_max = np.nan
# Calculate the resolution of the spectrograph at the specified wavelength
res = findRes(lineLoc[l], z)
# define some variables for line/continuum windows in rest frame
contMin = np.array(contWinMin[line])
contMax = np.array(contWinMax[line])
contMinBS = np.array(contWinBSMin[line])
contMaxBS = np.array(contWinBSMax[line])
# similar for the line integration window but I want the wavelength bin number, not just the wavelength
lineMin = findBin(lineInt[line][0], wave)
lineMax = findBin(lineInt[line][1], wave)
fluxes = np.copy(origFluxes)
variances = np.copy(origVariances)
fluxCoadd = np.copy(origFluxCoadd)
varCoadd = np.copy(origVarCoadd)
# Perform the continuum subtraction on epochs and coadd
cont_fit_reject(wave, fluxes, variances, contMin, contMax)
cont_fit_reject(wave, fluxCoadd, varCoadd, contMin, contMax)
# First look at the mean spectrum, let's smooth it
# FWHM
vel_mean_fwhm = fwhm(wave[lineMin:lineMax], fluxCoadd[lineMin:lineMax], res)
err_mean_fwhm = uncertainty_cont(wave, origFluxCoadd, origVarCoadd, strapNum, 0, [lineMin, lineMax], 0,
contMinBS, contMaxBS, contMin[1] - contMin[0], scale, calc='fwhm',
flag='mean', res=res)
# Sigma
vel_mean_sigma = lineVar(wave[lineMin:lineMax], fluxCoadd[lineMin:lineMax], res)
err_mean_sigma = uncertainty_cont(wave, origFluxCoadd, origVarCoadd, strapNum, 0, [lineMin, lineMax], 0,
contMinBS, contMaxBS, contMin[1] - contMin[0], scale, calc='sigma',
flag='mean', res=res)
# Now look at the RMS spectrum
rms, rms_var = rmsSpec(fluxes, variances)
vel_rms_fwhm = fwhm(wave[lineMin:lineMax], rms[lineMin:lineMax], res)
err_rms_fwhm = uncertainty_cont(wave, origFluxes, origVariances, strapNum, 0, [lineMin, lineMax], 0,
contMinBS, contMaxBS, contMin[1] - contMin[0], scale, calc='fwhm',
flag='rms', res=res)
# Sigma
vel_rms_sigma = fwhm(wave[lineMin:lineMax], rms[lineMin:lineMax], res)
err_rms_sigma = uncertainty_cont(wave, origFluxes, origVariances, strapNum, 0, [lineMin, lineMax], 0,
contMinBS, contMaxBS, contMin[1] - contMin[0], scale, calc='sigma',
flag='rms', res=res)
if calcBH == True and np.isnan(lag) == False:
# Calculate BH mass for all 4 line measurements
mass_mean_fwhm, mass_min_mean_fwhm, mass_max_mean_fwhm = \
blackHoleMass(lag, lag_err_min, lag_err_max, vel_mean_fwhm, err_mean_fwhm)
mass_mean_sigma, mass_min_mean_sigma, mass_max_mean_sigma = \
blackHoleMass(lag, lag_err_min, lag_err_max, vel_mean_sigma, err_mean_sigma)
mass_rms_fwhm, mass_min_rms_fwhm, mass_max_rms_fwhm = \
blackHoleMass(lag, lag_err_min, lag_err_max, vel_rms_fwhm, err_rms_fwhm)
mass_rms_sigma, mass_min_rms_sigma, mass_max_rms_sigma = \
blackHoleMass(lag, lag_err_min, lag_err_max, vel_rms_sigma, err_rms_sigma)
else:
mass_mean_fwhm, mass_min_mean_fwhm, mass_max_mean_fwhm = np.nan, np.nan, np.nan
mass_mean_sigma, mass_min_mean_sigma, mass_max_mean_sigma = np.nan, np.nan, np.nan
mass_rms_fwhm, mass_min_rms_fwhm, mass_max_rms_fwhm = np.nan, np.nan, np.nan
mass_rms_sigma, mass_min_rms_sigma, mass_max_rms_sigma = np.nan, np.nan, np.nan
out.write(line + " MEAN FWHM %d %d %d %d %d %2.2f %2.2f %2.2f \n" %(vel_mean_fwhm, err_mean_fwhm, lag,
lag_err_min, lag_err_max,
mass_mean_fwhm, mass_min_mean_fwhm,
mass_max_mean_fwhm))
out.write(line + " MEAN Sigma %d %d %d %d %d %2.2f %2.2f %2.2f \n" %(vel_mean_sigma, err_mean_sigma, lag,
lag_err_min, lag_err_max,
mass_mean_sigma, mass_min_mean_sigma,
mass_max_mean_sigma))
out.write(line + " RMS FWHM %d %d %d %d %d %2.2f %2.2f %2.2f \n" %(vel_rms_fwhm, err_rms_fwhm, lag,
lag_err_min, lag_err_max, mass_rms_fwhm,
mass_min_rms_fwhm, mass_max_rms_fwhm))
out.write(line + " RMS Sigma %d %d %d %d %d %2.2f %2.2f %2.2f \n" %(vel_rms_sigma, err_rms_sigma,
lag,lag_err_min, lag_err_max,
mass_rms_sigma, mass_min_rms_sigma,
mass_max_rms_sigma))
if makeFig == True:
# Define figure and axis for mean and rms spectrum
fig_width, ax_width = ozplot.plot_share_x(2, source, "Wavelength ($\AA$)", ["Mean Flux", "RMS Flux"],
[contMin[1], contMax[0]])
ax_width[0].plot(wave, fluxCoadd, color='black')
ax_width[0].axvline(wave[lineMin], color='forestgreen')
ax_width[0].axvline(wave[lineMax], color='forestgreen')
ax_width[1].plot(wave, rms, color='black')
ax_width[1].axvline(wave[lineMin], color='forestgreen')
ax_width[1].axvline(wave[lineMax], color='forestgreen')
fig_width.savefig(outLoc + source + "_" + line + "_width.png")
plt.close(fig_width)
out.close()
return
# -------------------------------------------------- #
# -------------------- findRes ----------------------#
# -------------------------------------------------- #
# The line width measurements are dependent on the #
# resolution of the spectrograph. The OzDES spectra #
# are made up of two arms of AAOmega with different #
# resolutions. This function will find the #
# resolution at the emission line in question. You #
# will need to modify this if you are using a #
# different spectrograph. Input rest frame emission #
# line wavelength and convert. #
# -------------------------------------------------- #
def findRes(line, z):
#Use OzDES data - splice 5700 and resolution for red/blue arms
splice = 5700
resO = [1600, 1490] #blue/red arm of spectrograph resolution
obsLine = line*(1+z)
if obsLine < splice:
dL = obsLine/resO[0]
else:
dL = obsLine/resO[1]
return dL
# --------------------------------------------------- #
# ---------------- comoving_distance ---------------- #
# --------------------------------------------------- #
# Function to calculate the comoving distance at a #
# given redshift. Written by <NAME>. #
# --------------------------------------------------- #
def comoving_distance(z):
# returns the comoving distance in Mpc
# c in km/s
c = 299792.458
# H0 in km/s/Mpc
H0 = 70.0
f_E = lambda x: 1.0 / np.sqrt(0.3 * (1 + x) ** 3 + 0.7)
d_C = c / H0 * fixed_quad(f_E, 0.0, z, n=500)[0]
return d_C
# --------------------------------------------------- #
# ------------------- luminosity -------------------- #
# --------------------------------------------------- #
# Calculates the lambda L_lambda luminosity for the #
# specified wavelength and gives uncertainty via #
# bootstrapping. If the luminosity is not present in #
# the spectrum return nan. #
# --------------------------------------------------- #
def luminosity(wavelength, flux, variance, z, lum, strapNum, scale):
# Check if the luminosity windows used (lum +/- 10 A in observed frame) are present in the spectrum. If not return
# nan for the luminosity
if wavelength[0] < lum - 10 and lum + 10 < wavelength[-1]:
lumBin = findBin(lum, wavelength)
# calculate the mean flux around the specified luminosity
fluxV = np.nanmean(flux[lumBin-2:lumBin+2]) * scale
# calculate the range of fluxes based on bootstrapping
flux_std = Lum_uncertainty(wavelength, flux, variance, lum, strapNum, scale)
# scale by luminosity - we want lambda L_lambda
fluxV = fluxV*lum
flux_std = flux_std*lum
# flux should be in erg/s/cm^2 the above statement gets rid of the angstroms
d_C = comoving_distance(z)
d_L = (1.0 + z) * d_C
# convert d_L from Mpc to cm
d_L *= 3.0857E24
# scale factor used for uncertainty propogation
scalefact = 4. * np.pi * d_L ** 2
L = fluxV * scalefact
L_std = flux_std * scalefact
# calculate log Luminosity and error
lgL = np.log10(L)
err = lgL- np.log10(L-L_std)
else:
lgL = np.nan
err = np.nan
return lgL, err
# --------------------------------------------------- #
# ---------------- Lum_uncertainty ------------------ #
# --------------------------------------------------- #
# Calculates the uncertainty due to flux resampling #
# and shifting luminosity window. #
# --------------------------------------------------- #
def Lum_uncertainty(wavelength, flux, variance, lum, strapNum, scale):
# Performs bootstrap resampling in the range of potentially clean continuum to determine
# 10 Angstroms on either size of luminosity
nBins = len(wavelength)
winLim = [findBin(lum-10, wavelength), findBin(lum+10, wavelength)]
# vector of wavelengths within winLim spaced by 1 Angstrom
winVect = np.arange(winLim[0], winLim[1]+1, 1)
# Array of random continuum window starting points
randVect = len(winVect)*np.random.rand(strapNum)
randVect = randVect.astype(int)
fluxes = np.zeros(strapNum)
# For each iteration do flux resampling and calculate the line flux and shift window slightly
for i in range(strapNum):
varC = np.copy(variance)
fluxC = np.zeros(nBins)
for w in range(nBins):
err = varC[w] ** 0.5
fluxC[w] = np.random.normal(flux[w], err)
fluxes[i] = np.nanmean(fluxC[winVect[randVect[i]] - 2:winVect[randVect[i]] + 2]) * scale
return np.nanstd(fluxes)
# --------------------------------------------------- #
# -------------------- RL_CIV ----------------------- #
# --------------------------------------------------- #
# Radius Luminosity using CIV line and L1350 from #
# Hoormann et al 2019. L and L_std are log_10. #
# --------------------------------------------------- #
def RL_CIV(L, L_std):
# From Hoormann et al 2019 using L1350
lag = pow(10, 0.81 + 0.47 * (L - 44))
lag_err_p = abs(pow(10, (0.81 + 0.09) + (0.47 + 0.03) * ((L + L_std) - 44)) - lag)
lag_err_m = abs(pow(10, (0.81 - 0.09) + (0.47 - 0.03) * ((L - L_std) - 44)) - lag)
return lag, lag_err_p, lag_err_m
# --------------------------------------------------- #
# -------------------- RL_MgII ---------------------- #
# --------------------------------------------------- #
# Radius Luminosity using MgII line and L3000 from #
# Trakhenbrot & Netzer 2012 best fit BCES method. #
# L and L_std are log_10. #
# --------------------------------------------------- #
def RL_MgII(L, L_std):
lag = pow(10, 1.34 + 0.615 * (L - 44))
lag_err_p = abs(pow(10, (1.34 + 0.019) + (0.615 + 0.014) * ((L + L_std) - 44)) - lag)
lag_err_m = abs(pow(10, (1.34 - 0.019) + (0.615 - 0.014) * ((L - L_std) - 44)) - lag)
return lag, lag_err_p, lag_err_m
# --------------------------------------------------- #
# -------------------- RL_Hbeta --------------------- #
# --------------------------------------------------- #
# Radius Luminosity using Hbeta line and L5100 from #
# Bentz et al 2013. L and L_std are log_10. #
# --------------------------------------------------- #
def RL_Hbeta(L, L_std):
lag = pow(10, 1.527 + 0.533 * (L - 44))
lag_err_p = abs(pow(10, (1.527 + 0.031) + (0.533 + 0.035) * ((L + L_std) - 44)) - lag)
lag_err_m = abs(pow(10, (1.527 - 0.031) + (0.533 - 0.033) * ((L - L_std) - 44)) - lag)
return lag, lag_err_p, lag_err_m
# --------------------------------------------------- #
# ------------------ blackHoleMass ------------------ #
# --------------------------------------------------- #
# Given a lag and velocity calculate the black hole #
# mass. Given in units of 10^9 Solar Masses. #
# --------------------------------------------------- #
def blackHoleMass(lag, lErrMin, lErrMax, velocity, vErr):
# convert everything to cgs
G = 6.67*10**-11
c = 2.998*10**8
Msun = 1.989*10**30
lag = lag*86400
lErrMin = lErrMin*86400
lErrMax = lErrMax*86400
velocity = velocity*1000
vErr = vErr*1000
# Define f factor
f = 4.47
ferr = 1.25 #Woo et al 2014
# Calculate Mass
mass = f*(pow(velocity, 2)*c*lag/G)/Msun/10**9
sigmaMin = mass*pow((ferr/f)**2 + (2*vErr/velocity)**2 + (lErrMin/lag)**2 ,0.5)
sigmaMax = mass*pow((ferr/f)**2 + (2*vErr/velocity)**2 + (lErrMax/lag)**2 ,0.5)
return mass, sigmaMin, sigmaMax
# -------------------------------------------------- #
# Modified from a function originally provided by #
# <NAME> #
# -------------------------------------------------- #
# ------------------ Spectrumv18 ------------------- #
# -------------------------------------------------- #
# Read in spectral data assuming the format from v18 #
# of the OzDES reduction pipeline. Modify if your #
# input data is stored differently #
# -------------------------------------------------- #
class Spectrumv18(object):
def __init__(self, filepath=None):
assert filepath is not None
self.filepath = filepath
try:
self.data = fits.open(filepath)
except IOError:
print("Error: file {0} could not be found".format(filepath))
exit()
data = fits.open(filepath)
self.combinedFlux = data[0]
self.combinedVariance = data[1]
self.combinedPixels = data[2]
self.numEpochs = int((np.size(data) - 3) / 3)
self.field = self.data[3].header['SOURCEF'][19:21]
self.cdelt1 = self.combinedFlux.header['cdelt1'] # Wavelength interval between subsequent pixels
self.crpix1 = self.combinedFlux.header['crpix1']
self.crval1 = self.combinedFlux.header['crval1']
self.n_pix = self.combinedFlux.header['NAXIS1']
self.RA = self.combinedFlux.header['RA']
self.DEC = self.combinedFlux.header['DEC']
self.fluxCoadd = self.combinedFlux.data
self.varianceCoadd = self.combinedVariance.data
self.badpixCoadd = self.combinedPixels.data
self._wavelength = None
self._flux = None
self._variance = None
self._badpix = None
self._dates = None
self._run = None
self._ext = None
self._qc = None
self._exposed = None
@property
def wavelength(self):
"""Define wavelength solution."""
if getattr(self, '_wavelength', None) is None:
wave = ((np.arange(self.n_pix) - self.crpix1) * self.cdelt1) + self.crval1
self._wavelength = wave
return self._wavelength
@property
def flux(self):
if getattr(self, '_flux', None) is None:
self._flux = np.zeros((5000, self.numEpochs), dtype=float)
for i in range(self.numEpochs):
self._flux[:, i] = self.data[i * 3 + 3].data
return self._flux
@property
def variance(self):
if getattr(self, '_variance', None) is None:
self._variance = np.zeros((5000, self.numEpochs), dtype=float)
for i in range(self.numEpochs):
self._variance[:, i] = self.data[i * 3 + 4].data
return self._variance
@property
def badpix(self):
if getattr(self, '_badpix', None) is None:
self._badpix = np.zeros((5000, self.numEpochs), dtype=float)
for i in range(self.numEpochs):
self._badpix[:, i] = self.data[i * 3 + 5].data
return self._badpix
@property
def dates(self):
if getattr(self, '_dates', None) is None:
self._dates = np.zeros(self.numEpochs, dtype=float)
for i in range(self.numEpochs):
self._dates[i] = round(self.data[i * 3 + 3].header['UTMJD'],3)
# this give Modified Julian Date (UTC) that observation was taken
return self._dates
@property
def ext(self):
if getattr(self, '_ext', None) is None:
self._ext = []
for i in range(self.numEpochs):
self._ext.append(i * 3 + 3) # gives the extension in original fits file
return self._ext
@property
def run(self):
if getattr(self, '_run', None) is None:
self._run = []
for i in range(self.numEpochs):
source = self.data[i * 3 + 3].header['SOURCEF']
self._run.append(int(source[3:6])) # this gives the run number of the observation
return self._run
@property
def qc(self):
if getattr(self, '_qc', None) is None:
self._qc = []
for i in range(self.numEpochs):
self._qc.append(self.data[i * 3 + 3].header['QC'])
# this tell you if there were any problems with the spectra that need to be masked out
return self._qc
@property
def exposed(self):
if getattr(self, '_exposed', None) is None:
self._exposed = []
for i in range(self.numEpochs):
self._exposed.append(self.data[i * 3 + 3].header['EXPOSED'])
# this will give you the exposure time of each observation
return self._exposed
# -------------------------------------------------- #
# ------------------- calibSpec -------------------- #
# -------------------------------------------------- #
# This function does the bulk of the work. It will #
# 1) determine extensions which can be calibrated #
# 2) calculate the scale factors #
# 3) calculate the warping function #
# 4) output new fits file with scaled spectra #
# -------------------------------------------------- #
def calibSpec(obj_name, spectra, photo, spectraName, photoName, outBase, bands, filters, centers, plotFlag, coaddFlag,
redshift):
# Assumes scaling given is of the form
# gScale = scaling[0,:] gError = scaling[3,:]
# rScale = scaling[1,:] rError = scaling[4,:]
# iScale = scaling[2,:] iError = scaling[5,:]
# inCoaddWeather = scaling[6,:]
# inCoaddPhoto = scaling[7,:]
# gMag = scaling[8,:] gMagErr = scaling[9,:]
# rMag = scaling[10,:] rMagErr = scaling[11,:]
# iMag = scaling[12,:] iMagErr = scaling[13,:]
# First we decide which extensions are worth scaling
extensions, noPhotometry, badQC = prevent_Excess(spectra, photo, bands)
# Then we calculate the scale factors
nevermind, scaling = scaling_Matrix(spectra, extensions, badQC, noPhotometry, photo, bands, filters)
# Remove last minute trouble makers
extensions = [e for e in extensions if e not in nevermind]
badQC = badQC + nevermind
# And finally warp the data
for s in extensions:
# scale the spectra
if plotFlag != False:
plotName = plotFlag + obj_name + "_" + str(s)
else:
plotName = False
spectra.flux[:, s], spectra.variance[:, s] = warp_spectra(scaling[0:3, s], scaling[3:6, s], spectra.flux[:, s],
spectra.variance[:, s], spectra.wavelength, centers,
plotName)
if coaddFlag == False:
create_output_single(obj_name, extensions, scaling, spectra, noPhotometry, badQC, spectraName, photoName,
outBase, redshift)
elif coaddFlag in ['Run', 'Date']:
coadd_output(obj_name, extensions, scaling, spectra, noPhotometry, badQC, spectraName, photoName, outBase,
plotFlag, coaddFlag, redshift)
else:
print("What do you want me to do with this data? Please specify output type.")
return
# -------------------------------------------------- #
# ---------------- prevent_Excess ------------------ #
# -------------------------------------------------- #
# This function removes extensions from the list to #
# calibrate because of insufficient photometric data #
# or bad quality flags #
# -------------------------------------------------- #
def prevent_Excess(spectra, photo, bands):
# First, find the min/max date for which we have photometry taken on each side of the spectroscopic observation
# This will be done by finding the highest date for which we have photometry in each band
# and taking the max/min of those values
# This is done because we perform a linear interpolation between photometric data points to estimate the magnitudes
# observed at the specific time of the spectroscopic observation
maxPhot = np.zeros(3)
for e in range(len(photo['Date'][:])):
if photo['Band'][e] == bands[0]:
if photo['Date'][e] > maxPhot[0]:
maxPhot[0] = photo['Date'][e]
if photo['Band'][e] == bands[1]:
if photo['Date'][e] > maxPhot[1]:
maxPhot[1] = photo['Date'][e]
if photo['Band'][e] == bands[2]:
if photo['Date'][e] > maxPhot[2]:
maxPhot[2] = photo['Date'][e]
photLim = min(maxPhot)
minPhot = np.array([100000, 100000, 100000])
for e in range(len(photo['Date'][:])):
if photo['Band'][e] == bands[0]:
if photo['Date'][e] < minPhot[0]:
minPhot[0] = photo['Date'][e]
if photo['Band'][e] == bands[1]:
if photo['Date'][e] < minPhot[1]:
minPhot[1] = photo['Date'][e]
if photo['Band'][e] == bands[2]:
if photo['Date'][e] < minPhot[2]:
minPhot[2] = photo['Date'][e]
photLimMin = max(minPhot)
noPhotometry = []
badQC = []
allowedQC = ['ok', 'backup']
for s in range(spectra.numEpochs):
# Remove data with insufficient photometry
if spectra.dates[s] > photLim:
noPhotometry.append(s)
if spectra.dates[s] < photLimMin:
noPhotometry.append(s)
# Only allow spectra with quality flags 'ok' and 'backup'
if spectra.qc[s] not in allowedQC:
badQC.append(s)
extensions = []
# Make a list of extensions which need to be analyzed
for s in range(spectra.numEpochs):
if s not in noPhotometry and s not in badQC:
extensions.append(s)
return extensions, noPhotometry, badQC
# -------------------------------------------------- #
# ---------------- scaling_Matrix ------------------ #
# -------------------------------------------------- #
# finds the nearest photometry and interpolates mags #
# to find values at the time of the spectroscopic #
# observations. Calculates the mag that would be #
# observed from the spectra and calculates the scale #
# factor to bring them into agreement. Saves the #
# data in the scaling matrix. #
# -------------------------------------------------- #
def scaling_Matrix(spectra, extensions, badQC, noPhotometry, photo, bands, filters):
# scale factors for each extension saved in the following form
# gScale = scaling[0,:] gError = scaling[3,:]
# rScale = scaling[1,:] rError = scaling[4,:]
# iScale = scaling[2,:] iError = scaling[5,:]
# inCoaddWeather = scaling[6,:]
# inCoaddPhoto = scaling[7,:]
# gMag = scaling[8,:] gMagError = scaling[9,:] (interpolated from neighbouring observations)
# rMag = scaling[10,:] rMagError = scaling[11,:]
# iMag = scaling[12,:] iMagError = scaling[13,:]
scaling = np.zeros((14, spectra.numEpochs))
# Judge goodness of spectra
for e in range(spectra.numEpochs):
if e in badQC:
scaling[6, e] = False
else:
scaling[6, e] = True
if e in noPhotometry:
scaling[7, e] = False
else:
scaling[7, e] = True
ozdesPhoto = np.zeros((3, spectra.numEpochs))
desPhoto = np.zeros((3, spectra.numEpochs))
ozdesPhotoU = np.zeros((3, spectra.numEpochs))
desPhotoU = np.zeros((3, spectra.numEpochs))
filterCurves = readFilterCurves(bands, filters)
nevermind = []
for e in extensions:
# Find OzDES photometry
ozdesPhoto[0, e], ozdesPhotoU[0, e] = computeABmag(filterCurves[bands[0]].trans, filterCurves[bands[0]].wave,
spectra.wavelength, spectra.flux[:, e],
spectra.variance[:, e])
ozdesPhoto[1, e], ozdesPhotoU[1, e] = computeABmag(filterCurves[bands[1]].trans, filterCurves[bands[1]].wave,
spectra.wavelength, spectra.flux[:, e],
spectra.variance[:, e])
ozdesPhoto[2, e], ozdesPhotoU[2, e] = computeABmag(filterCurves[bands[2]].trans, filterCurves[bands[2]].wave,
spectra.wavelength, spectra.flux[:, e],
spectra.variance[:, e])
# Sometimes the total flux in the band goes zero and this obviously creates issues further down the line and
# is most noticeable when the calculated magnitude is nan. Sometimes it is because the data is very noisy
# or the occasional negative spectrum is a known artifact of the data, more common in early OzDES runs. In the
# case where the observation doesn't get cut based on quality flag it will start getting ignored here. The runs
# ignored will eventually be saved with the badQC extensions.
if np.isnan(ozdesPhoto[:, e]).any() == True:
nevermind.append(e)
# Find DES photometry
desPhoto[:, e], desPhotoU[:, e] = des_photo(photo, spectra.dates[e], bands)
scaling[8, e] = desPhoto[0, e]
scaling[10, e] = desPhoto[1, e]
scaling[12, e] = desPhoto[2, e]
scaling[9, e] = desPhotoU[0, e]
scaling[11, e] = desPhotoU[1, e]
scaling[13, e] = desPhotoU[2, e]
# Find Scale Factor
scaling[0, e], scaling[3, e] = scale_factors(desPhoto[0, e] - ozdesPhoto[0, e],
desPhotoU[0, e] + ozdesPhotoU[0, e])
scaling[1, e], scaling[4, e] = scale_factors(desPhoto[1, e] - ozdesPhoto[1, e],
desPhotoU[1, e] + ozdesPhotoU[1, e])
scaling[2, e], scaling[5, e] = scale_factors(desPhoto[2, e] - ozdesPhoto[2, e],
desPhotoU[2, e] + ozdesPhotoU[2, e])
return nevermind, scaling
# -------------------------------------------------- #
# --------------- interpolatePhot ----------------- #
# -------------------------------------------------- #
# Performs linear interpolation and propagates the #
# uncertainty to return you a variance. #
# -------------------------------------------------- #
def interpolatePhot(x, y, s, val):
# takes sigma returns variance
# x - x data points (list)
# y - y data points (list)
# s - sigma on y data points (list)
# val - x value to interpolate to (number)
mag = y[0] + (val - x[0]) * (y[1] - y[0]) / (x[1] - x[0])
err = s[0] ** 2 + (s[0] ** 2 + s[1] ** 2) * ((val - x[0]) / (x[1] - x[0])) ** 2
return mag, err
# -------------------------------------------------- #
# ------------------ des_photo -------------------- #
# -------------------------------------------------- #
# Finds nearest photometry on both sides of spectral #
# observations and interpolates to find value at the #
# time of the spectral observation #
# -------------------------------------------------- #
def des_photo(photo, spectral_mjd, bands):
"""Takes in an mjd from the spectra, looks through a light curve file to find the nearest photometric epochs and
performs linear interpolation to get estimate at date, return the photo mags."""
# Assumes dates are in chronological order!!!
for l in range(len(photo['Date']) - 1):
if photo['Band'][l] == bands[0] and photo['Date'][l] < spectral_mjd < photo['Date'][l + 1]:
g_date_v = np.array([photo['Date'][l], photo['Date'][l + 1]])
g_mag_v = np.array([photo['Mag'][l], photo['Mag'][l + 1]])
g_err_v = np.array([photo['Mag_err'][l], photo['Mag_err'][l + 1]])
if photo['Band'][l] == bands[1] and photo['Date'][l] < spectral_mjd < photo['Date'][l + 1]:
r_date_v = np.array([photo['Date'][l], photo['Date'][l + 1]])
r_mag_v = np.array([photo['Mag'][l], photo['Mag'][l + 1]])
r_err_v = np.array([photo['Mag_err'][l], photo['Mag_err'][l + 1]])
if photo['Band'][l] == bands[2] and photo['Date'][l] < spectral_mjd < photo['Date'][l + 1]:
i_date_v = np.array([photo['Date'][l], photo['Date'][l + 1]])
i_mag_v = np.array([photo['Mag'][l], photo['Mag'][l + 1]])
i_err_v = np.array([photo['Mag_err'][l], photo['Mag_err'][l + 1]])
g_mag, g_mag_err = interpolatePhot(g_date_v, g_mag_v, g_err_v, spectral_mjd)
r_mag, r_mag_err = interpolatePhot(r_date_v, r_mag_v, r_err_v, spectral_mjd)
i_mag, i_mag_err = interpolatePhot(i_date_v, i_mag_v, i_err_v, spectral_mjd)
return [g_mag, r_mag, i_mag], [g_mag_err, r_mag_err, i_mag_err]
# -------------------------------------------------- #
# ---------------- scale_factors ------------------ #
# -------------------------------------------------- #
# Calculates the scale factor and variance needed to #
# change spectroscopically derived magnitude to the #
# observed photometry. #
# -------------------------------------------------- #
def scale_factors(mag_diff, mag_diff_var):
# takes and returns variance
flux_ratio = np.power(10., 0.4 * mag_diff) # f_synthetic/f_photometry
scale_factor = (1. / flux_ratio)
scale_factor_sigma = mag_diff_var * (scale_factor * 0.4 * 2.3) ** 2 # ln(10) ~ 2.3
return scale_factor, scale_factor_sigma
# -------------------------------------------------- #
# ----------------- warp_spectra ------------------ #
# -------------------------------------------------- #
# Fits polynomial to scale factors and estimates #
# associated uncertainties with gaussian processes. #
# If the plotFlag variable is not False it will save #
# some diagnostic plots. #
# -------------------------------------------------- #
def warp_spectra(scaling, scaleErr, flux, variance, wavelength, centers, plotFlag):
# associate scale factors with centers of bands and fit 2D polynomial to form scale function.
scale = InterpolatedUnivariateSpline(centers, scaling, k=2)
fluxScale = flux * scale(wavelength)
# add in Gaussian process to estimate uncertainties, /10**-17 because it gets a bit panicky if you use small numbers
stddev = (scaleErr ** 0.5) / 10 ** -17
scale_v = scaling / 10 ** -17
kernel = kernels.RBF(length_scale=300, length_scale_bounds=(.01, 2000.0))
gp = GaussianProcessRegressor(kernel=kernel, alpha=stddev**2)
xprime = np.atleast_2d(centers).T
yprime = np.atleast_2d(scale_v).T
gp.fit(xprime, yprime)
xplot_prime = np.atleast_2d(wavelength).T
y_pred, sigma = gp.predict(xplot_prime, return_std=True)
y_pred = y_pred[:,0]
sigModel = (sigma/y_pred)*scale(wavelength)
# now scale the original variance and combine with scale factor uncertainty
varScale = variance * pow(scale(wavelength), 2) + sigModel ** 2
if plotFlag != False:
figa, ax1a, ax2a = ozplot.makeFigDouble(plotFlag, "Wavelength ($\AA$)", "f$_\lambda$ (arbitrary units)",
"f$_\lambda$ (10$^{-17}$ erg/s/cm$^2$/$\AA$)", [wavelength[0], wavelength[-1]])
ax1a.plot(wavelength, flux, color='black', label="Before Calibration")
ax1a.legend(loc=1, frameon=False, prop={'size': 20})
ax2a.plot(wavelength, fluxScale / 10 ** -17, color='black', label="After Calibration")
ax2a.legend(loc=1, frameon=False, prop={'size': 20})
plt.savefig(plotFlag + "_beforeAfter.png")
plt.close(figa)
figb, ax1b, ax2b = ozplot.makeFigDouble(plotFlag, "Wavelength ($\AA$)", "f$_\lambda$ (10$^{-17}$ erg/s/cm$^2$/$\AA$)",
"% Uncertainty", [wavelength[0], wavelength[-1]])
ax1b.plot(wavelength, fluxScale / 10 ** -17, color='black')
ax2b.plot(wavelength, 100*abs(pow(varScale, 0.5)/fluxScale), color='black', linestyle='-', label='Total')
ax2b.plot(wavelength, 100*abs(sigModel/fluxScale), color='blue', linestyle='-.', label='Warping')
ax2b.legend(loc=1, frameon=False, prop={'size': 20})
ax2b.set_ylim([0, 50])
plt.savefig(plotFlag + "_uncertainty.png")
plt.close(figb)
figc, axc = ozplot.makeFigSingle(plotFlag, "Wavelength ($\AA$)", "Scale Factor (10$^{-17}$ erg/s/cm$^2$/$\AA$/counts)")
axc.plot(wavelength, scale(wavelength)/10**-17, color='black')
axc.errorbar(centers, scaling/10**-17, yerr=stddev, fmt='s', color='mediumblue')
plt.savefig(plotFlag + "_scalefactors.png")
plt.close(figc)
return fluxScale, varScale
# -------------------------------------------------- #
# ------------ create_output_single --------------- #
# -------------------------------------------------- #
# Outputs the warped spectra to a new fits file. #
# -------------------------------------------------- #
def create_output_single(obj_name, extensions, scaling, spectra, noPhotometry, badQC, spectraName, photoName, outBase,
redshift):
outName = outBase + obj_name + "_scaled.fits"
print("Saving Data to " + outName)
hdulist = fits.HDUList(fits.PrimaryHDU())
noPhotometryExt = []
if len(noPhotometry) > 0:
for i in range(len(noPhotometry)):
noPhotometryExt.append(spectra.ext[noPhotometry[i]])
badQCExt = []
if len(badQC) > 0:
for i in range(len(badQC)):
badQCExt.append(spectra.ext[badQC[i]])
index = 0
# Create an HDU for each night
for i in extensions:
header = fits.Header()
header['SOURCE'] = obj_name
header['RA'] = spectra.RA
header['DEC'] = spectra.DEC
header['FIELD'] = spectra.field
header['CRPIX1'] = spectra.crpix1
header['CRVAL1'] = spectra.crval1
header['CDELT1'] = spectra.cdelt1
header['CTYPE1'] = 'wavelength'
header['CUNIT1'] = 'angstrom'
header['EPOCHS'] = len(extensions)
header['z'] = redshift[0]
# save the names of the input data and the extensions ignored
header['SFILE'] = spectraName
header['PFILE'] = photoName
header['NOPHOTO'] = ','.join(map(str, noPhotometryExt))
header['BADQC'] = ','.join(map(str, badQCExt))
# save the original spectrum's extension number and some other details
header["EXT"] = spectra.ext[i]
header["UTMJD"] = spectra.dates[i]
header["EXPOSE"] = spectra.exposed[i]
header["QC"] = spectra.qc[i]
# save scale factors/uncertainties
header["SCALEG"] = scaling[0, i]
header["ERRORG"] = scaling[3, i]
header["SCALER"] = scaling[1, i]
header["ERRORR"] = scaling[4, i]
header["SCALEI"] = scaling[2, i]
header["ERRORI"] = scaling[5, i]
# save photometry/uncertainties used to calculate scale factors
header["MAGG"] = scaling[8, i]
header["MAGUG"] = scaling[9, i]
header["MAGR"] = scaling[10, i]
header["MAGUR"] = scaling[11, i]
header["MAGI"] = scaling[12, i]
header["MAGUI"] = scaling[13, i]
if index == 0:
hdulist[0].header['SOURCE'] = obj_name
hdulist[0].header['RA'] = spectra.RA
hdulist[0].header['DEC'] = spectra.DEC
hdulist[0].header['CRPIX1'] = spectra.crpix1
hdulist[0].header['CRVAL1'] = spectra.crval1
hdulist[0].header['CDELT1'] = spectra.cdelt1
hdulist[0].header['CTYPE1'] = 'wavelength'
hdulist[0].header['CUNIT1'] = 'angstrom'
hdulist[0].header['EPOCHS'] = len(extensions)
# save the names of the input data and the extensions ignored
hdulist[0].header['SFILE'] = spectraName
hdulist[0].header['PFILE'] = photoName
hdulist[0].header['NOPHOTO'] = ','.join(map(str, noPhotometryExt))
hdulist[0].header['BADQC'] = ','.join(map(str, badQCExt))
# save the original spectrum's extension number and some other details
hdulist[0].header["EXT"] = spectra.ext[i]
hdulist[0].header["UTMJD"] = spectra.dates[i]
hdulist[0].header["EXPOSE"] = spectra.exposed[i]
hdulist[0].header["QC"] = spectra.qc[i]
# save scale factors/uncertainties
hdulist[0].header["SCALEG"] = scaling[0, i]
hdulist[0].header["ERRORG"] = scaling[3, i]
hdulist[0].header["SCALER"] = scaling[1, i]
hdulist[0].header["ERRORR"] = scaling[4, i]
hdulist[0].header["SCALEI"] = scaling[2, i]
hdulist[0].header["ERRORI"] = scaling[5, i]
# save photometry/uncertainties used to calculate scale factors
hdulist[0].header["MAGG"] = scaling[8, i]
hdulist[0].header["MAGUG"] = scaling[9, i]
hdulist[0].header["MAGR"] = scaling[10, i]
hdulist[0].header["MAGUR"] = scaling[11, i]
hdulist[0].header["MAGI"] = scaling[12, i]
hdulist[0].header["MAGUI"] = scaling[13, i]
hdulist[0].data = spectra.flux[:, i]
hdulist.append(fits.ImageHDU(data=spectra.variance[:, i], header=header))
hdulist.append(fits.ImageHDU(data=spectra.badpix[:, i], header=header))
index = 2
else:
hdulist.append(fits.ImageHDU(data=spectra.flux[:, i], header=header))
hdulist.append(fits.ImageHDU(data=spectra.variance[:, i], header=header))
hdulist.append(fits.ImageHDU(data=spectra.badpix[:, i], header=header))
hdulist.writeto(outName, overwrite=True)
hdulist.close()
return
# -------------------------------------------------- #
# ------------- create_output_coadd --------------- #
# -------------------------------------------------- #
# Outputs the warped and coadded spectra to a new #
# fits file. #
# -------------------------------------------------- #
def create_output_coadd(obj_name, runList, fluxArray, varianceArray, badpixArray, extensions, scaling, spectra, redshift
,badQC, noPhotometry, spectraName, photoName, outBase, coaddFlag):
outName = outBase + obj_name + "_scaled_" + coaddFlag + ".fits"
hdulist = fits.HDUList(fits.PrimaryHDU())
noPhotometryExt = []
if len(noPhotometry) > 0:
for i in range(len(noPhotometry)):
noPhotometryExt.append(spectra.ext[noPhotometry[i]])
badQCExt = []
if len(badQC) > 0:
for i in range(len(badQC)):
badQCExt.append(spectra.ext[badQC[i]])
#print("Output Filename: %s \n" % (outName))
# First save the total coadded spectrum for the source to the primary extension
hdulist[0].data = fluxArray[:, 0]
hdulist[0].header['CRPIX1'] = spectra.crpix1
hdulist[0].header['CRVAL1'] = spectra.crval1
hdulist[0].header['CDELT1'] = spectra.cdelt1
hdulist[0].header['CTYPE1'] = 'wavelength'
hdulist[0].header['CUNIT1'] = 'angstrom'
hdulist[0].header['SOURCE'] = obj_name
hdulist[0].header['RA'] = spectra.RA
hdulist[0].header['DEC'] = spectra.DEC
hdulist[0].header['FIELD'] = spectra.field
hdulist[0].header['OBSNUM'] = len(runList)
hdulist[0].header['z'] = redshift[0]
hdulist[0].header['SFILE'] = spectraName
hdulist[0].header['PFILE'] = photoName
hdulist[0].header['METHOD'] = coaddFlag
hdulist[0].header['NOPHOTO'] = ','.join(map(str, noPhotometryExt))
hdulist[0].header['BADQC'] = ','.join(map(str, badQCExt))
# First extension is the total coadded variance
header = fits.Header()
header['EXTNAME'] = 'VARIANCE'
header['CRPIX1'] = spectra.crpix1
header['CRVAL1'] = spectra.crval1
header['CDELT1'] = spectra.cdelt1
header['CTYPE1'] = 'wavelength'
header['CUNIT1'] = 'angstrom'
hdulist.append(fits.ImageHDU(data=varianceArray[:, 0], header=header))
# Second Extension is the total bad pixel map
header = fits.Header()
header['EXTNAME'] = 'BadPix'
header['CRPIX1'] = spectra.crpix1
header['CRVAL1'] = spectra.crval1
header['CDELT1'] = spectra.cdelt1
header['CTYPE1'] = 'wavelength'
header['CUNIT1'] = 'angstrom'
hdulist.append(fits.ImageHDU(data=badpixArray[:, 0], header=header))
# Create an HDU for each night
index1 = 1
for k in runList:
index = 0
date = 0
header = fits.Header()
header['CRPIX1'] = spectra.crpix1
header['CRVAL1'] = spectra.crval1
header['CDELT1'] = spectra.cdelt1
header['CTYPE1'] = 'wavelength'
header['CUNIT1'] = 'angstrom'
header['RUN'] = k
for i in extensions:
here = False
if coaddFlag == 'Run':
if spectra.run[i] == k:
here = True
if coaddFlag == 'Date':
if int(spectra.dates[i]) == k:
here = True
if here == True:
head0 = "EXT" + str(index)
header[head0] = spectra.ext[i]
head1 = "UTMJD" + str(index)
header[head1] = spectra.dates[i]
date += spectra.dates[i]
head2 = "EXPOSE" + str(index)
header[head2] = spectra.exposed[i]
head3 = "QC" + str(index)
header[head3] = spectra.qc[i]
head4 = "SCALEG" + str(index)
header[head4] = scaling[0, i]
head5 = "ERRORG" + str(index)
header[head5] = scaling[3, i]
head6 = "SCALER" + str(index)
header[head6] = scaling[1, i]
head7 = "ERRORR" + str(index)
header[head7] = scaling[4, i]
head8 = "SCALEI" + str(index)
header[head8] = scaling[2, i]
head9 = "ERRORI" + str(index)
header[head9] = scaling[5, i]
head10 = "MAGG" + str(index)
header[head10] = scaling[8, i]
head11 = "MAGUG" + str(index)
header[head11] = scaling[9, i]
head12 = "MAGR" + str(index)
header[head12] = scaling[10, i]
head13 = "MAGUR" + str(index)
header[head13] = scaling[11, i]
head14 = "MAGI" + str(index)
header[head14] = scaling[12, i]
head15 = "MAGUI" + str(index)
header[head15] = scaling[13, i]
index += 1
if date > 0:
header['OBSNUM'] = index
header['AVGDATE'] = date / index
hdu_flux = fits.ImageHDU(data=fluxArray[:, index1], header=header)
hdu_fluxvar = fits.ImageHDU(data=varianceArray[:, index1], header=header)
hdu_badpix = fits.ImageHDU(data=badpixArray[:, index1], header=header)
hdulist.append(hdu_flux)
hdulist.append(hdu_fluxvar)
hdulist.append(hdu_badpix)
index1 += 1
hdulist.writeto(outName, overwrite=True)
hdulist.close()
return
# -------------------------------------------------- #
# ----------------- coadd_output ------------------ #
# -------------------------------------------------- #
# Coadds the observations based on run or night. #
# -------------------------------------------------- #
def coadd_output(obj_name, extensions, scaling, spectra, noPhotometry, badQC, spectraName, photoName, outBase, plotFlag,
coaddFlag, redshift):
# Get a list of items (dates/runs) over which all observations will be coadded
coaddOver = []
for e in extensions:
# OzDES runs 7,8 were close together in time and run 8 had bad weather so there was only observations of 1
# field - coadd with run 7 to get better signal to noise
if spectra.run[e] == 8:
spectra.run[e] = 7
if coaddFlag == 'Run':
if spectra.run[e] not in coaddOver:
coaddOver.append(spectra.run[e])
if coaddFlag == 'Date':
if int(spectra.dates[e]) not in coaddOver:
coaddOver.append(int(spectra.dates[e]))
coaddFlux = np.zeros((5000, len(coaddOver) + 1))
coaddVar = np.zeros((5000, len(coaddOver) + 1))
coaddBadPix = np.zeros((5000, len(coaddOver) + 1))
speclistC = [] # For total coadd of observation
index = 1
for c in coaddOver:
speclist = []
for e in extensions:
opt = ''
if coaddFlag == 'Run':
opt = spectra.run[e]
if coaddFlag == 'Date':
opt = int(spectra.dates[e])
if opt == c:
speclist.append(SingleSpec(obj_name, spectra.wavelength, spectra.flux[:,e], spectra.variance[:,e],
spectra.badpix[:,e]))
speclistC.append(SingleSpec(obj_name, spectra.wavelength, spectra.flux[:,e], spectra.variance[:,e],
spectra.badpix[:,e]))
if len(speclist) > 1:
runCoadd = outlier_reject_and_coadd(obj_name, speclist)
coaddFlux[:, index] = runCoadd.flux
coaddVar[:, index] = runCoadd.fluxvar
coaddVar[:, index] = runCoadd.fluxvar
coaddBadPix[:,index] = runCoadd.isbad.astype('uint8')
if len(speclist) == 1:
coaddFlux[:, index] = speclist[0].flux
coaddVar[:, index] = speclist[0].fluxvar
coaddBadPix[:, index] = speclist[0].isbad.astype('uint8')
index += 1
if len(speclistC) > 1:
allCoadd = outlier_reject_and_coadd(obj_name, speclistC)
coaddFlux[:, 0] = allCoadd.flux
coaddVar[:, 0] = allCoadd.fluxvar
coaddBadPix[:, 0] = allCoadd.isbad.astype('uint8')
if len(speclistC) == 1:
coaddFlux[:, 0] = speclistC[0].flux
coaddVar[:, 0] = speclistC[0].fluxvar
coaddBadPix[:, 0] = speclistC[0].isbad.astype('uint8')
mark_as_bad(coaddFlux, coaddVar)
create_output_coadd(obj_name, coaddOver, coaddFlux, coaddVar, coaddBadPix, extensions, scaling, spectra, redshift,
badQC, noPhotometry, spectraName, photoName, outBase, coaddFlag)
return
# -------------------------------------------------- #
# Modified from code originally provided by #
# <NAME> #
# -------------------------------------------------- #
# ------------------ mark_as_bad ------------------- #
# -------------------------------------------------- #
# Occasionally you get some big spikes in the data #
# that you do not want messing with your magnitude #
# calculations. Remove these by looking at single #
# bins that have a significantly 4.5 larger than #
# average fluxes or variances and change those to #
# nans. Nans will be interpolated over. The #
# threshold should be chosen to weigh removing #
# extreme outliers and removing noise. #
# -------------------------------------------------- #
def mark_as_bad(fluxes, variances):
number = int(fluxes.size/fluxes.shape[0])
for epoch in range(number):
if number == 1:
flux = fluxes[:]
variance = variances[:]
else:
flux = fluxes[:, epoch]
variance = variances[:, epoch]
nBins = len(flux)
# define the local average in flux and variance to compare outliers to
for i in range(nBins):
if i < 50:
avg = np.nanmean(variance[0:99])
avgf = np.nanmean(flux[0:99])
elif i > nBins - 50:
avg = np.nanmean(variance[i-50:nBins-1])
avgf = np.nanmean(flux[i-50:nBins-1])
else:
avg = np.nanmean(variance[i-50:i+50])
avgf = np.nanmean(flux[i-50:i+50])
# find outliers and set that bin and the neighbouring ones to nan.
if np.isnan(variance[i]) == False and variance[i] > 4.5*avg:
flux[i] = np.nan
if i > 2 and i < 4996:
flux[i - 1] = np.nan
flux[i - 2] = np.nan
flux[i - 3] = np.nan
flux[i + 1] = np.nan
flux[i + 2] = np.nan
flux[i + 3] = np.nan
if np.isnan(flux[i]) == False and flux[i] > 4.5 * avgf:
flux[i] = np.nan
if i > 2 and i < 4996:
flux[i-1] = np.nan
flux[i-2] = np.nan
flux[i-3] = np.nan
flux[i+1] = np.nan
flux[i+2] = np.nan
flux[i+3] = np.nan
if np.isnan(flux[i]) == False and flux[i] < -4.5 * avgf:
flux[i] = np.nan
if i > 2 and i < 4996:
flux[i-1] = np.nan
flux[i-2] = np.nan
flux[i-3] = np.nan
flux[i+1] = np.nan
flux[i+2] = np.nan
flux[i+3] = np.nan
# interpolates nans (added here and bad pixels in the data)
filter_bad_pixels(flux, variance)
return
# -------------------------------------------------- #
# Modified from code originally provided by #
# <NAME> #
# -------------------------------------------------- #
# --------------- filter_bad_pixels ---------------- #
# -------------------------------------------------- #
# Interpolates over nans in the spectrum. #
# -------------------------------------------------- #
def filter_bad_pixels(fluxes, variances):
number = int(fluxes.size/fluxes.shape[0])
for epoch in range(number):
if (number == 1):
flux = fluxes[:]
variance = variances[:]
else:
flux = fluxes[:, epoch]
variance = variances[:, epoch]
nBins = len(flux)
flux[0] = 0.0
flux[-1] = 0.0
variance[0] = 100*np.nanmean(variance)
variance[-1] = 100*np.nanmean(variance)
bad_pixels = np.logical_or.reduce((np.isnan(flux), np.isnan(variance), variance < 0))
bin = 0
binEnd = 0
while (bin < nBins):
if (bad_pixels[bin] == True):
binStart = bin
binNext = bin + 1
while (binNext < nBins):
if bad_pixels[binNext] == False:
binEnd = binNext - 1
binNext = nBins
binNext = binNext + 1
ya = float(flux[binStart - 1])
xa = float(binStart - 1)
sa = variance[binStart - 1]
yb = flux[binEnd + 1]
xb = binEnd + 1
sb = variance[binEnd + 1]
step = binStart
while (step < binEnd + 1):
flux[step] = ya + (yb - ya) * (step - xa) / (xb - xa)
variance[step] = sa + (sb + sa) * ((step - xa) / (xb - xa)) ** 2
step = step + 1
bin = binEnd
bin = bin + 1
return
# -------------------------------------------------- #
# The following 4 functions were written by Chris #
# Lidman, <NAME>, and maybe others for the #
# initial processing of the OzDES spectra. They #
# were taken from the DES_coaddSpectra.py functions. #
# -------------------------------------------------- #
# -------------------- OzExcept -------------------- #
# -------------------------------------------------- #
# -------------------------------------------------- #
# A simple exception class #
# -------------------------------------------------- #
class OzExcept(Exception):
"""
Simple exception class
"""
def __init__(self, msg):
self.msg = msg
def __str__(self):
return "{0}: {1}".format(self.__class__.__name__, msg)
# -------------------------------------------------- #
# ----------------- VerboseMessager ---------------- #
# -------------------------------------------------- #
# -------------------------------------------------- #
# Verbose messaging for routines below. #
# -------------------------------------------------- #
class VerboseMessager(object):
"""
Verbose messaging for routines below
"""
def __init__(self, verbose=False):
self.verbose = verbose
def __call__(self, *args):
if self.verbose:
print("Something strange is happening")
sys.stdout.flush()
# -------------------------------------------------- #
# ------------------- SingleSpec ------------------- #
# -------------------------------------------------- #
# -------------------------------------------------- #
# Class representing a single spectrum for analysis. #
# -------------------------------------------------- #
class SingleSpec(object):
"""
Class representing a single spectrum for analysis
"""
## Added filename to SingleSpec
def __init__(self, obj_name, wl, flux, fluxvar, badpix):
self.name = obj_name
# ---------------------------
# self.pivot = int(fibrow[9])
# self.xplate = int(fibrow[3])
# self.yplate = int(fibrow[4])
# self.ra = np.degrees(fibrow[1])
# self.dec = np.degrees(fibrow[2])
# self.mag=float(fibrow[10])
# self.header=header
self.wl = np.array(wl)
self.flux = np.array(flux)
self.fluxvar = np.array(fluxvar)
# If there is a nan in either the flux, or the variance, mark it as bad
# JKH: this was what was here originally, my version complains about it
# self.fluxvar[fluxvar < 0] = np.nan
for i in range(5000):
if (self.fluxvar[i] < 0):
self.fluxvar[i] = np.nan
# The following doesn't take into account
#self.isbad = np.any([np.isnan(self.flux), np.isnan(self.fluxvar)], axis=0)
self.isbad = badpix.astype(bool)
# -------------------------------------------------- #
# ------------ outlier_reject_and_coadd ------------ #
# -------------------------------------------------- #
# -------------------------------------------------- #
# OzDES coadding function to reject outliers and #
# coadd all of the spectra in the inputted list. #
# -------------------------------------------------- #
def outlier_reject_and_coadd(obj_name, speclist):
"""
Reject outliers on single-object spectra to be coadded.
Assumes input spectra have been resampled to a common wavelength grid,
so this step needs to be done after joining and resampling.
Inputs
speclist: list of SingleSpec instances on a common wavelength grid
show: boolean; show diagnostic plot? (debug only; default=False)
savefig: boolean; save diagnostic plot? (debug only; default=False)
Output
result: SingleSpec instance of coadded spectrum, with bad pixels
set to np.nan (runz requires this)
"""
# Edge cases
if len(speclist) == 0:
print("outlier_reject: empty spectrum list")
return None
elif len(speclist) == 1:
tgname = speclist[0].name
vmsg("Only one spectrum, no coadd needed for {0}".format(tgname))
return speclist[0]
# Have at least two spectra, so let's try to reject outliers
# At this stage, all spectra have been mapped to a common wavelength scale
wl = speclist[0].wl
tgname = speclist[0].name
# Retrieve single-object spectra and variance spectra.
flux_2d = np.array([s.flux for s in speclist])
fluxvar_2d = np.array([s.fluxvar for s in speclist])
badpix_2d = np.array([s.isbad for s in speclist])
# Baseline parameters:
# outsig Significance threshold for outliers (in sigma)
# nbin Bin width for median rebinning
# ncoinc Maximum number of spectra in which an artifact can appear
outsig, nbin, ncoinc = 5, 25, 1
nspec, nwl = flux_2d.shape
# Run a median filter of the spectra to look for n-sigma outliers.
# These incantations are kind of complicated but they seem to work
# i) Compute the median of a wavelength section (nbin) along the observation direction
# 0,1 : observation,wavelength, row index, column index
# In moving to numpy v1.10.2, we replaced median with nanmedian
fmed = np.reshape([np.nanmedian(flux_2d[:, j:j + nbin], axis=1)
for j in np.arange(0, nwl, nbin)], (-1, nspec)).T
# Now expand fmed and flag pixels that are more than outsig off
fmed_2d = np.reshape([fmed[:, int(j / nbin)] for j in
|
np.arange(nwl)
|
numpy.arange
|
import os.path as osp
import mmcv
import numpy as np
import pycocotools.mask as maskUtils
import random
import cv2
import copy
import math
from skimage import img_as_ubyte
from mmdet.core import BitmapMasks, PolygonMasks
from ..builder import PIPELINES
random.seed(42)
@PIPELINES.register_module()
class LoadImageFromFile(object):
"""Load an image from file.
Required keys are "img_prefix" and "img_info" (a dict that must contain the
key "filename"). Added or updated keys are "filename", "img", "img_shape",
"ori_shape" (same as `img_shape`), "pad_shape" (same as `img_shape`),
"scale_factor" (1.0) and "img_norm_cfg" (means=0 and stds=1).
Args:
to_float32 (bool): Whether to convert the loaded image to a float32
numpy array. If set to False, the loaded image is an uint8 array.
Defaults to False.
color_type (str): The flag argument for :func:`mmcv.imfrombytes`.
Defaults to 'color'.
file_client_args (dict): Arguments to instantiate a FileClient.
See :class:`mmcv.fileio.FileClient` for details.
Defaults to ``dict(backend='disk')``.
"""
def __init__(self,
to_float32=False,
color_type='color',
file_client_args=dict(backend='disk')):
self.to_float32 = to_float32
self.color_type = color_type
self.file_client_args = file_client_args.copy()
self.file_client = None
def __call__(self, results):
"""Call functions to load image and get image meta information.
Args:
results (dict): Result dict from :obj:`mmdet.CustomDataset`.
Returns:
dict: The dict contains loaded image and meta information.
"""
if self.file_client is None:
self.file_client = mmcv.FileClient(**self.file_client_args)
if results['img_prefix'] is not None:
filename = osp.join(results['img_prefix'],
results['img_info']['filename'])
else:
filename = results['img_info']['filename']
img_bytes = self.file_client.get(filename)
img = mmcv.imfrombytes(img_bytes, flag=self.color_type)
if self.to_float32:
img = img.astype(np.float32)
results['filename'] = filename
results['ori_filename'] = results['img_info']['filename']
results['img'] = img
results['img_shape'] = img.shape
results['ori_shape'] = img.shape
results['img_fields'] = ['img']
return results
def __repr__(self):
repr_str = (f'{self.__class__.__name__}('
f'to_float32={self.to_float32}, '
f"color_type='{self.color_type}', "
f'file_client_args={self.file_client_args})')
return repr_str
@PIPELINES.register_module()
class LoadMultiChannelImageFromFiles(object):
"""Load multi-channel images from a list of separate channel files.
Required keys are "img_prefix" and "img_info" (a dict that must contain the
key "filename", which is expected to be a list of filenames).
Added or updated keys are "filename", "img", "img_shape",
"ori_shape" (same as `img_shape`), "pad_shape" (same as `img_shape`),
"scale_factor" (1.0) and "img_norm_cfg" (means=0 and stds=1).
Args:
to_float32 (bool): Whether to convert the loaded image to a float32
numpy array. If set to False, the loaded image is an uint8 array.
Defaults to False.
color_type (str): The flag argument for :func:`mmcv.imfrombytes`.
Defaults to 'color'.
file_client_args (dict): Arguments to instantiate a FileClient.
See :class:`mmcv.fileio.FileClient` for details.
Defaults to ``dict(backend='disk')``.
"""
def __init__(self,
to_float32=False,
color_type='unchanged',
file_client_args=dict(backend='disk')):
self.to_float32 = to_float32
self.color_type = color_type
self.file_client_args = file_client_args.copy()
self.file_client = None
def __call__(self, results):
"""Call functions to load multiple images and get images meta
information.
Args:
results (dict): Result dict from :obj:`mmdet.CustomDataset`.
Returns:
dict: The dict contains loaded images and meta information.
"""
if self.file_client is None:
self.file_client = mmcv.FileClient(**self.file_client_args)
if results['img_prefix'] is not None:
filename = [
osp.join(results['img_prefix'], fname)
for fname in results['img_info']['filename']
]
else:
filename = results['img_info']['filename']
img = []
for name in filename:
img_bytes = self.file_client.get(name)
img.append(mmcv.imfrombytes(img_bytes, flag=self.color_type))
img = np.stack(img, axis=-1)
if self.to_float32:
img = img.astype(np.float32)
results['filename'] = filename
results['ori_filename'] = results['img_info']['filename']
results['img'] = img
results['img_shape'] = img.shape
results['ori_shape'] = img.shape
# Set initial values for default meta_keys
results['pad_shape'] = img.shape
results['scale_factor'] = 1.0
num_channels = 1 if len(img.shape) < 3 else img.shape[2]
results['img_norm_cfg'] = dict(
mean=np.zeros(num_channels, dtype=np.float32),
std=np.ones(num_channels, dtype=np.float32),
to_rgb=False)
return results
def __repr__(self):
repr_str = (f'{self.__class__.__name__}('
f'to_float32={self.to_float32}, '
f"color_type='{self.color_type}', "
f'file_client_args={self.file_client_args})')
return repr_str
@PIPELINES.register_module()
class LoadAnnotations(object):
"""Load mutiple types of annotations.
Args:
with_bbox (bool): Whether to parse and load the bbox annotation.
Default: True.
with_label (bool): Whether to parse and load the label annotation.
Default: True.
with_mask (bool): Whether to parse and load the mask annotation.
Default: False.
with_seg (bool): Whether to parse and load the semantic segmentation
annotation. Default: False.
poly2mask (bool): Whether to convert the instance masks from polygons
to bitmaps. Default: True.
file_client_args (dict): Arguments to instantiate a FileClient.
See :class:`mmcv.fileio.FileClient` for details.
Defaults to ``dict(backend='disk')``.
"""
def __init__(self,
with_bbox=True,
with_label=True,
with_mask=False,
with_seg=False,
poly2mask=True,
file_client_args=dict(backend='disk')):
self.with_bbox = with_bbox
self.with_label = with_label
self.with_mask = with_mask
self.with_seg = with_seg
self.poly2mask = poly2mask
self.file_client_args = file_client_args.copy()
self.file_client = None
def _load_bboxes(self, results):
"""Private function to load bounding box annotations.
Args:
results (dict): Result dict from :obj:`mmdet.CustomDataset`.
Returns:
dict: The dict contains loaded bounding box annotations.
"""
ann_info = results['ann_info']
results['gt_bboxes'] = ann_info['bboxes'].copy()
gt_bboxes_ignore = ann_info.get('bboxes_ignore', None)
if gt_bboxes_ignore is not None:
results['gt_bboxes_ignore'] = gt_bboxes_ignore.copy()
results['bbox_fields'].append('gt_bboxes_ignore')
results['bbox_fields'].append('gt_bboxes')
return results
def _load_labels(self, results):
"""Private function to load label annotations.
Args:
results (dict): Result dict from :obj:`mmdet.CustomDataset`.
Returns:
dict: The dict contains loaded label annotations.
"""
results['gt_labels'] = results['ann_info']['labels'].copy()
return results
def _poly2mask(self, mask_ann, img_h, img_w):
"""Private function to convert masks represented with polygon to
bitmaps.
Args:
mask_ann (list | dict): Polygon mask annotation input.
img_h (int): The height of output mask.
img_w (int): The width of output mask.
Returns:
numpy.ndarray: The decode bitmap mask of shape (img_h, img_w).
"""
if isinstance(mask_ann, list):
# polygon -- a single object might consist of multiple parts
# we merge all parts into one mask rle code
rles = maskUtils.frPyObjects(mask_ann, img_h, img_w)
rle = maskUtils.merge(rles)
elif isinstance(mask_ann['counts'], list):
# uncompressed RLE
rle = maskUtils.frPyObjects(mask_ann, img_h, img_w)
else:
# rle
rle = mask_ann
mask = maskUtils.decode(rle)
return mask
def process_polygons(self, polygons):
"""Convert polygons to list of ndarray and filter invalid polygons.
Args:
polygons (list[list]): Polygons of one instance.
Returns:
list[numpy.ndarray]: Processed polygons.
"""
polygons = [
|
np.array(p)
|
numpy.array
|
from flask import Flask, request, send_file
from flask_cors import CORS, cross_origin
import cv2
import json
from PIL import Image
import numpy as np
from functions.AddLines import getedges
from functions.BriCon import Brightness_contrast
from functions.EyePatch import AddEyePatch
from functions.monocle import AddMonocle
from functions.moustache import AddMoustache
from functions.outline import edge
from functions.cartoon import cartoon
from functions.brightness import adjust_brightness
app=Flask("predict_car")
CORS(app, support_credentials=True)
@app.route('/',methods=["POST"])
@cross_origin(supports_credentials=True)
def CartoonifyImage():
file=request.files['image']
rec_img=Image.open(file.stream)
img = cv2.cvtColor(
|
np.array(rec_img)
|
numpy.array
|
"""
SO map class for handling healpix and CAR maps.
This is a wrapper around healpix and enlib (pixell).
"""
import os
from copy import deepcopy
import astropy.io.fits as pyfits
import healpy as hp
import matplotlib.pyplot as plt
import numpy as np
from pixell import colorize, curvedsky, enmap, enplot, powspec, reproject
from pspy.pspy_utils import ps_lensed_theory_to_dict
class so_map:
"""Class defining a ``so_map`` object."""
def __init__(self):
self.pixel = None
self.nside = None
self.ncomp = None
self.data = None
self.geometry = None
self.coordinate = None
def copy(self):
"""Create a copy of the ``so_map`` object."""
return deepcopy(self)
def info(self):
"""Print information about the ``so_map`` object."""
print("pixellisation:", self.pixel)
print("number of components:", self.ncomp)
print("number of pixels:", self.data.shape[:] if self.ncomp == 1 else self.data.shape[1:])
print("nside:", self.nside)
print("geometry:", self.geometry)
print("coordinates:", self.coordinate)
def write_map(self, file_name):
"""Write the ``so_map`` to disk.
Parameters
----------
filename : string
the name of the fits file
"""
if self.pixel == "HEALPIX":
hp.fitsfunc.write_map(file_name, self.data, overwrite=True)
if self.pixel == "CAR":
enmap.write_map(file_name, self.data)
def upgrade(self, factor):
"""Upgrade the ``so_map``.
Parameters
----------
factor : integer
factor of increased pixel resolution (should be a factor of 2)
"""
assert factor % 2 == 0, "factor should be a factor of 2"
upgrade = self.copy()
if self.pixel == "HEALPIX":
nside_out = int(self.nside * factor)
upgrade.data = hp.pixelfunc.ud_grade(self.data, nside_out=nside_out)
upgrade.nside = nside_out
if self.pixel == "CAR":
upgrade.data = enmap.upgrade(self.data, factor)
upgrade.geometry = upgrade.data.geometry[1:]
return upgrade
def downgrade(self, factor):
"""Downgrade the ``so_map``.
Parameters
----------
factor : integer
factor of decreased pixel resolution (should be a factor of 2)
"""
assert factor % 2 == 0, "factor should be a factor of 2"
downgrade = self.copy()
if self.pixel == "HEALPIX":
nside_out = int(self.nside / factor)
downgrade.data = hp.pixelfunc.ud_grade(self.data, nside_out=nside_out)
downgrade.nside = nside_out
if self.pixel == "CAR":
downgrade.data = enmap.downgrade(self.data, factor)
downgrade.geometry = downgrade.data.geometry[1:]
return downgrade
def synfast(self, clfile):
"""Fill a ``so_map`` with a cmb gaussian simulation.
Parameters
----------
clfile : CAMB data file
lensed power spectra file from CAMB
"""
synfast = self.copy()
if self.pixel == "HEALPIX":
l, ps = ps_lensed_theory_to_dict(clfile, output_type="Cl", start_at_zero=True)
if self.ncomp == 1:
synfast.data = hp.sphtfunc.synfast(ps["TT"], self.nside, new=True, verbose=False)
else:
synfast.data = hp.sphtfunc.synfast(
(ps["TT"], ps["EE"], ps["BB"], ps["TE"]), self.nside, new=True, verbose=False
)
if self.pixel == "CAR":
ps = powspec.read_spectrum(clfile)[: self.ncomp, : self.ncomp]
synfast.data = curvedsky.rand_map(self.data.shape, self.data.wcs, ps)
return synfast
def get_lmax_limit(self):
"""Return the maximum lmax corresponding to the ``so_map`` pixellisation"""
if self.pixel == "HEALPIX":
l_max_limit = 3 * self.nside - 1
elif self.pixel == "CAR":
cdelt = self.data.wcs.wcs.cdelt[1]
l_max_limit = 360 / cdelt / 4
return l_max_limit
def plot(
self,
color="planck",
color_range=None,
file_name=None,
ticks_spacing_car=1,
title="",
cbar=True,
hp_gnomv=None,
):
"""Plot a ``so_map``.
Parameters
----------
color: cmap
a matplotlib colormap (or 'planck')
color_range: scalar for single component or len(3) list for T,Q,U.
the range of the colorscale
file_name: string
file_name is the name of the png file that will be created, if None the plot
will be displayed.
title: string
the title of the plot.
cbar: boolean
set to True to display the colorbar.
ticks_spacing_CAR: float
for CAR plot, choose the spacing of the ticks.
hp_gnomv: tuple
gnomview projection for HEALPIX plotting, expected (lon_c,lat_c,xsize,reso).
"""
try:
colorize.mpl_setdefault(color)
except KeyError:
if self.pixel == "CAR":
raise KeyError(
"Color name must be a pixell color map name {}!".format(
list(colorize.schemes.keys())
)
)
if self.pixel == "HEALPIX":
cmap = plt.get_cmap(color)
cmap.set_bad("white")
cmap.set_under("white")
if self.ncomp == 1:
min_range = -color_range if color_range is not None else None
max_range = +color_range if color_range is not None else None
if hp_gnomv is not None:
lon, lat, xsize, reso = hp_gnomv
hp.gnomview(
self.data,
min=min_range,
max=max_range,
cmap=cmap,
notext=True,
title=title,
cbar=cbar,
rot=(lon, lat, 0),
xsize=xsize,
reso=reso,
)
else:
hp.mollview(
self.data,
min=min_range,
max=max_range,
cmap=cmap,
notext=True,
title=title,
cbar=cbar,
)
if file_name is not None:
plt.savefig(file_name + ".png", bbox_inches="tight")
plt.clf()
plt.close()
else:
plt.show()
else:
fields = ["T", "Q", "U"]
min_ranges = {field: None for field in fields}
max_ranges = {field: None for field in fields}
if color_range is not None:
for i, field in enumerate(fields):
min_ranges[field] = -color_range[i]
max_ranges[field] = +color_range[i]
for data, field in zip(self.data, fields):
if hp_gnomv is not None:
lon, lat, xsize, reso = hp_gnomv
hp.gnomview(
data,
min=min_ranges[field],
max=max_ranges[field],
cmap=cmap,
notext=True,
title=field + "" + title,
cbar=cbar,
rot=(lon, lat, 0),
xsize=xsize,
reso=reso,
)
else:
hp.mollview(
data,
min=min_ranges[field],
max=max_ranges[field],
cmap=cmap,
notext=True,
title=field + "" + title,
cbar=cbar,
)
if file_name is not None:
plt.savefig(file_name + "_%s" % field + ".png", bbox_inches="tight")
plt.clf()
plt.close()
else:
plt.show()
if self.pixel == "CAR":
if self.ncomp == 1:
if color_range is not None:
max_range = "%s" % (color_range)
else:
max_range = "%s" % (np.max(self.data))
plots = enplot.get_plots(
self.data, color=color, range=max_range, colorbar=1, ticks=ticks_spacing_car
)
for plot in plots:
if file_name is not None:
enplot.write(file_name + ".png", plot)
else:
plot.img.show()
if self.ncomp == 3:
fields = ["T", "Q", "U"]
if color_range is not None:
max_range = "%s:%s:%s" % (color_range[0], color_range[1], color_range[2])
else:
max_range = "%s:%s:%s" % (
np.max(self.data[0]),
|
np.max(self.data[1])
|
numpy.max
|
from numpy import array, shape, zeros, argmax
from numpy.linalg import norm
import matplotlib.pyplot as plt
from lib import parse_CHGCAR, parse_LOCPOT
def find_vacuum_potential(ifile,**args):
if 'dim' in args:
dim=args['dim']
else:
dim=2
x,y,e,lv,coord=calc_plane_averaged_density(ifile,dim=dim)
maxdiff=zeros(shape(e)[dim])
for i in range(shape(e)[dim]):
for j in coord[:,dim]:
for k in range(-1,2):
if maxdiff[i]<abs(x[i]-j+norm(lv[dim])*k):
maxdiff[i]=abs(x[i]-j+norm(lv[dim])*k)
max_index=argmax(maxdiff)
return y[max_index]
def calc_plane_averaged_density(ifile,**args):
if 'dim' in args:
dim=args['dim']
else:
dim=2
if 'filetype' in args:
filetype=args['filetype']
else:
filetype='LOCPOT'
if filetype=='LOCPOT':
e,lv,coord,atomtypes,atomnums=parse_LOCPOT(ifile)
else:
e,lv,coord,atomtypes,atomnums=parse_CHGCAR(ifile)
x=array([i*norm(lv[dim])/(shape(e)[dim]-1) for i in range(shape(e)[dim])])
y=zeros(shape(e)[dim])
for i in range(
|
shape(e)
|
numpy.shape
|
#! /usr/bin/env python
import pandas as pd
import numpy as np
from scipy.stats import linregress
import matplotlib.pyplot as plt
from scipy.signal import savgol_filter
from scipy.fftpack import rfft, irfft, fftfreq
def normalize(df):
result = df.copy()
eps = 0.0000001
for feature_name in df.columns:
max_value = df[feature_name].max()
min_value = df[feature_name].min()
result[feature_name] = (df[feature_name] - min_value) / (max_value - min_value + eps) + eps
return result
def extent_trans_X(x, n, k):
return 1 - np.exp(-1 * np.exp(k) * pow(x, n))
def running_mean(x, n=5):
return np.convolve(x, np.ones((n,))/n)[(n-1):]
# This part reads in the excel file, and parses it into dataframse
# path_in = 'C:/Users/isle132/Documents/NGDE/Data/From Forrest Test Data'
# exp_data_file = 'FTIR Plate Run.xlsx'
# exp_data_file = 'Copy of FTIR Nucleation Run.xlsx'
path_in = 'C:/Users/isle132/Documents/NGDE/Data/'
exp_data_file = 'FTIR Nucleation Run v2.xlsx'
xl_exp_data = pd.ExcelFile(path_in+'/'+exp_data_file)
df_xl_in = xl_exp_data.parse("High Conc Run Py", index_col=0)
# df_xl_in = xl_exp_data.parse("Medium Conc Run Py", index_col=0)
# df_xl_in = xl_exp_data.parse("Low Conc Run Py", index_col=0)
# df_xl_in = xl_exp_data.parse("Nucleation Data", index_col=0)
# df_xl_in.columns = df_xl_in.columns.str.replace(' minutes', '')
injection_time = 5.0
# high concentration growth, time after injection
growth_onset = 4.0
# medium concentration growth, time after injection
# growth_onset = 10
time = df_xl_in.columns
all_t_signal = df_xl_in.copy()
all_t_smooth_signal = df_xl_in.copy()
# FFT FILTER
sample_freq = all_t_signal.index[1] - all_t_signal.index[0] # units of frequency cm**-1
fft_freqs = fftfreq(len(all_t_signal), d=sample_freq) # bins into cycles / (cm**-1)
for time_column in all_t_signal:
ft_signal_data = rfft(all_t_signal[time_column])
ft_signal_data[fft_freqs < -0.10] = 0 # Cut out all freqs larger than 0.05 cycles / (cm**-1)
ft_signal_smooth = irfft(ft_signal_data)
all_t_smooth_signal[time_column] = ft_signal_smooth
# Subset the frequencies near the peak
freq_sel = (all_t_smooth_signal.index.get_level_values(0) <= 1600) & \
(all_t_smooth_signal.index.get_level_values(0) >= 1590)
test_signal = all_t_smooth_signal.loc[freq_sel].T
test_signal = test_signal[test_signal.index >= injection_time] # injection time at 5 minutes
test_signal.index = test_signal.index - injection_time # injection time at 5 minutes
normalized_data_unproc = normalize(test_signal)
# SAVITZKY GOLAY FILTER
# apply a Savitzky Golay filter to smooth the data, using a 9 point window with order 3 polynomial
normalized_data = normalized_data_unproc.apply(savgol_filter, args=(9, 3,))
# take values for time greater than 1, to prevent singularity in log
avrami_transform = normalized_data.ix[1:].apply(lambda x: np.log(np.log(1/(1-x))))
avrami_transform.index = list(map(lambda x: np.log(x), avrami_transform.index.astype(float)))
impingement_factor = 1
austin_rickett_transform = normalized_data.ix[1:].apply(lambda x: np.log(pow(1-x, -1 * impingement_factor)-1) +
np.log(impingement_factor))
austin_rickett_transform.index = list(map(lambda x: np.log(x), austin_rickett_transform.index.astype(float)))
x_ln_time = avrami_transform.index.values
avrami_slopes = pd.DataFrame(index=x_ln_time[1:]) # make the empty array
# set up the time selectors
nucl_ln_time_sel = (avrami_transform.index.get_level_values(0) <= np.log(growth_onset-0.8)) & \
(avrami_transform.index.get_level_values(0) >= 0.2)
growth_ln_time_sel = (avrami_transform.index.get_level_values(0) >= np.log(growth_onset)) & \
(avrami_transform.index.get_level_values(0) <= np.log(40.0))
x_ln_time_nucl_sub = avrami_transform.loc[nucl_ln_time_sel].index.values
x_ln_time_growth_sub = avrami_transform.loc[growth_ln_time_sel].index.values
# Do Linear Regression Individually for each Frequency
avrami_coef_w = []
avrami_inter_w = []
avrami_r_w = []
avrami_coef_n = []
avrami_inter_n = []
avrami_r_n = []
avrami_coef_n_s = []
avrami_inter_n_s = []
avrami_r_n_s = []
avrami_coef_g = []
avrami_inter_g = []
avrami_r_g = []
for column in avrami_transform:
y_w = avrami_transform[column].values
y_n = avrami_transform.loc[nucl_ln_time_sel, column].values
y_g = avrami_transform.loc[growth_ln_time_sel, column].values
slope_w, intercept_w, r_value_w, p_value_w, std_err_w = linregress(x_ln_time, y_w)
avrami_coef_w.append(slope_w)
avrami_inter_w.append(intercept_w)
avrami_r_w.append(r_value_w)
slope_n, intercept_n, r_value_n, p_value_n, std_err_n = linregress(x_ln_time_nucl_sub, y_n)
avrami_coef_n.append(slope_n)
avrami_inter_n.append(intercept_n)
avrami_r_n.append(r_value_n)
slope_g, intercept_g, r_value_g, p_value_g, std_err_g = linregress(x_ln_time_growth_sub, y_g)
avrami_coef_g.append(slope_g)
avrami_inter_g.append(intercept_g)
avrami_r_g.append(r_value_g)
avrami_slopes[column] = np.diff(y_w)/np.diff(x_ln_time)
# Get the slopes from the avrami plots - time exponent
avrami_slopes.fillna(method='backfill')
# apply a Savitzky Golay filter to smooth the data, using a 7 point window with order 3 polynomial
# avrami_slopes_smooth = avrami_slopes.apply(savgol_filter, args=(5, 2,), mode='mirror')
avrami_slopes_smooth = avrami_slopes.copy()
normalized_data['mean'] = normalized_data.mean(axis=1)
avrami_slopes['mean'] = avrami_slopes.mean(axis=1)
avrami_slopes_smooth['mean'] = avrami_slopes_smooth.mean(axis=1)
# Get the n vs X data
transformed_fraction = pd.DataFrame(index=normalized_data.ix[1:, 'mean'].values[1:])
transformed_fraction['mean'] = avrami_slopes_smooth['mean'].values
transformed_fraction = transformed_fraction.sort_index()
# print out the Regression Information
print('For the entire Data set')
print('average is ', np.average(avrami_coef_w), ' x + ', np.average(avrami_inter_w),
'\nSTDev is ', np.std(avrami_coef_w), ' x + ', np.std(avrami_inter_w),
'\nMax is ', np.max(avrami_coef_w), ' x + ', np.max(avrami_inter_w),
'\nMin is ', np.min(avrami_coef_w), ' x + ', np.min(avrami_inter_w),
'\nR**2 is ', np.average(avrami_r_w))
print('For the nucleation subset of the full Data set')
print('average is ', np.average(avrami_coef_n), ' x + ', np.average(avrami_inter_n),
'\nSTDev is ', np.std(avrami_coef_n), ' x + ', np.std(avrami_inter_n),
'\nMax is ', np.max(avrami_coef_n), ' x + ', np.max(avrami_inter_n),
'\nMin is ', np.min(avrami_coef_n), ' x + ', np.min(avrami_inter_n),
'\nR**2 is ', np.average(avrami_r_n))
print('For the growth subset of the full Data set')
print('average is ', np.average(avrami_coef_g), ' x + ', np.average(avrami_inter_g),
'\nSTDev is ', np.std(avrami_coef_g), ' x + ', np.std(avrami_inter_g),
'\nMax is ', np.max(avrami_coef_g), ' x + ', np.max(avrami_inter_g),
'\nMin is ', np.min(avrami_coef_g), ' x + ', np.min(avrami_inter_g),
'\nR**2 is ', np.average(avrami_r_g))
# Prepare data for plotting
reg_plot_y_w = x_ln_time * np.average(avrami_coef_w) + np.average(avrami_inter_w)
reg_plot_y_n = x_ln_time * np.average(avrami_coef_n) + np.average(avrami_inter_n)
reg_plot_y_g = x_ln_time * np.average(avrami_coef_g) + np.average(avrami_inter_g)
x_time_extent = normalized_data.index.values
X_n = extent_trans_X(x_time_extent,
|
np.average(avrami_coef_n)
|
numpy.average
|
__author__ = 'joerg'
import os
import numpy as np
def get_speaker_lists(rootDir):
np.random.seed(100)
dirlist = ['DR5', 'DR6', 'DR7', 'DR3', 'DR2', 'DR1', 'DR4', 'DR8']
train_speaker = []
valid_speaker = []
for i in dirlist:
region_speakers = []
for dirName, subdirList, fileList in os.walk(rootDir + i + "/"):
#print(dirName)
path,folder_name = os.path.split(dirName)
if folder_name.__len__() >= 1:
region_speakers.append(folder_name)
len = region_speakers.__len__()
valid_len = int(round(len * 0.1))
random_valid =
|
np.random.randint(0,len-1,valid_len)
|
numpy.random.randint
|
import numpy as np
import cv2 as cv
from skimage.transform import hough_circle, hough_circle_peaks
from skimage.feature import canny
from scipy.optimize import linear_sum_assignment
from scipy.spatial import distance as scipy_distance
from scipy.spatial import Voronoi as ScipyVoronoi
from sklearn.metrics import mutual_info_score
# for singular spectrum analysis
import scipy.linalg as linalg
def find_circles_thres(current_frame_gray, num_of_rafts, radii_hough=[17, 19],
thres_value=70, sigma_canny=1.0, low_threshold_canny=25, high_threshold_canny=127,
min_sep_dist=20, raft_center_threshold=60,
top_left_x=390, top_left_y=450, width_x=850, height_y=850):
"""
find the centers of each raft
:param current_frame_gray: image in grayscale
:param num_of_rafts:
:param radii_hough: the range of Hough radii
:param thres_value: threshold value
:param sigma_canny:
:param low_threshold_canny:
:param high_threshold_canny:
:param min_sep_dist:
:param raft_center_threshold:
:param top_left_x:
:param top_left_y:
:param width_x:
:param height_y:
:return: raft_centers, raft_radii, raft_count
"""
# key data set initialization
raft_centers = np.zeros((num_of_rafts, 2), dtype=int)
raft_radii = np.zeros(num_of_rafts, dtype=int)
# crop the image
image_cropped = current_frame_gray[top_left_y: top_left_y + height_y, top_left_x: top_left_x + width_x]
# threshold the image
retval, image_thres = cv.threshold(image_cropped, thres_value, 255, 0)
# find edges
image_edges = canny(image_thres, sigma=sigma_canny, low_threshold=low_threshold_canny,
high_threshold=high_threshold_canny)
# use Hough transform to find circles
hough_results = hough_circle(image_edges, np.arange(*radii_hough))
accums, cx, cy, radii = hough_circle_peaks(hough_results, np.arange(*radii_hough))
# assuming that the first raft (highest accumulator score) is a good one
# raft_centers[0,0] = cx[0]
# raft_centers[0,1] = cy[0]
# raft_radii[0] = radii[0]
raft_count = 0 # starting from 1!
# remove circles that belong to the same raft and circles that happened to be in between rafts
for accum_score, detected_cx, detected_cy, detected_radius in zip(accums, cx, cy, radii):
new_raft = 1
if image_cropped[detected_cy, detected_cx] < raft_center_threshold:
new_raft = 0
elif image_cropped[detected_cy - detected_radius // 2: detected_cy + detected_radius // 2,
detected_cx - detected_radius // 2:detected_cx + detected_radius // 2].mean() \
< raft_center_threshold:
new_raft = 0
# elif (detected_cx - width_x/2)**2 + (detected_cy - height_y/2)**2 > lookup_radius**2:
# new_raft = 0
else:
cost_matrix = scipy_distance.cdist(np.array([detected_cx, detected_cy], ndmin=2),
raft_centers[:raft_count, :], 'euclidean')
if np.any(cost_matrix < min_sep_dist): # raft still exist
new_raft = 0
if new_raft == 1:
raft_centers[raft_count, 0] = detected_cx
# note that raft_count starts with 1, also note that cx corresponds to columns number
raft_centers[raft_count, 1] = detected_cy # cy is row number
raft_radii[raft_count] = detected_radius
raft_count = raft_count + 1
if raft_count == num_of_rafts:
# error_message = 'all rafts found'
break
# convert the xy coordinates of the cropped image into the coordinates of the original image
raft_centers[:, 0] = raft_centers[:, 0] + top_left_x
raft_centers[:, 1] = raft_centers[:, 1] + top_left_y
return raft_centers, raft_radii, raft_count
def find_circles_adaptive(current_frame_gray, num_of_rafts, radii_hough,
adaptive_thres_blocksize=9, adaptive_thres_const=-20,
min_sep_dist=20, raft_center_threshold=60,
top_left_x=390, top_left_y=450, width_x=850, height_y=850):
"""
find the centers of each raft
:param current_frame_gray:
:param num_of_rafts:
:param radii_hough:
:param adaptive_thres_blocksize:
:param adaptive_thres_const:
:param min_sep_dist:
:param raft_center_threshold:
:param top_left_x:
:param top_left_y:
:param width_x:
:param height_y:
:return: raft_centers, raft_radii, raft_count
"""
# key data set initialization
raft_centers = np.zeros((num_of_rafts, 2), dtype=int)
raft_radii = np.zeros(num_of_rafts, dtype=int)
# crop the image
image_cropped = current_frame_gray[top_left_y: top_left_y + height_y, top_left_x: top_left_x + width_x]
# threshold the image
image_thres = cv.adaptiveThreshold(image_cropped, 255, cv.ADAPTIVE_THRESH_MEAN_C, cv.THRESH_BINARY,
adaptive_thres_blocksize, adaptive_thres_const)
# use Hough transform to find circles
hough_results = hough_circle(image_thres, np.arange(*radii_hough))
accums, cx, cy, radii = hough_circle_peaks(hough_results, np.arange(*radii_hough))
# assuming that the first raft (highest accumulator score) is a good one
# raft_centers[0,0] = cx[0]
# raft_centers[0,1] = cy[0]
# raft_radii[0] = radii[0]
raft_count = 0 # starting from 1!
# remove circles that belong to the same raft and circles that happened to be in between rafts
for accumScore, detected_cx, detected_cy, detected_radius in zip(accums, cx, cy, radii):
new_raft = 1
if image_cropped[detected_cy, detected_cx] < raft_center_threshold:
new_raft = 0
elif image_cropped[detected_cy - detected_radius // 2: detected_cy + detected_radius // 2,
detected_cx - detected_radius // 2:detected_cx + detected_radius // 2].mean() \
< raft_center_threshold:
new_raft = 0
# elif (detected_cx - width_x/2)**2 + (detected_cy - height_y/2)**2 > lookup_radius**2:
# new_raft = 0
else:
cost_matrix = scipy_distance.cdist(np.array([detected_cx, detected_cy], ndmin=2),
raft_centers[:raft_count, :], 'euclidean')
if np.any(cost_matrix < min_sep_dist): # raft still exist
new_raft = 0
if new_raft == 1:
raft_centers[raft_count, 0] = detected_cx
# note that raft_count starts with 1, also note that cx corresonds to columns number
raft_centers[raft_count, 1] = detected_cy
# cy is row number
raft_radii[raft_count] = detected_radius
raft_count = raft_count + 1
if raft_count == num_of_rafts:
# error_message = 'all rafts found'
break
# convert the xy coordinates of the cropped image into the coordinates of the original image
raft_centers[:, 0] = raft_centers[:, 0] + top_left_x
raft_centers[:, 1] = raft_centers[:, 1] + top_left_y
return raft_centers, raft_radii, raft_count
def find_and_sort_circles(image_gray, num_of_rafts, prev_pos, radii_hough, thres_value=30, sigma_Canny=1.0,
low_threshold_canny=25, high_threshold_canny=127, max_displ=50):
"""
For each raft detected in the prev_pos, go through the newly found circles in descending order of scores,
and the first one within max_displ is the stored as the new position of the raft.
:param image_gray: gray scale image
:param num_of_rafts: number of rafts to be located
:param prev_pos: previous positions of rafts
:param radii_hough: [starting radius, ending radius], to be unpacked as an argument for hough_circle
:param thres_value:
:param sigma_Canny: the width of the Gaussian filter for Canny edge detection
:param low_threshold_canny: low threshold for Canny
:param high_threshold_canny: high threshold for Canny
:param max_displ: maximum displacement
:return:
"""
# key data set initialization
raft_centers = np.zeros((num_of_rafts, 2), dtype=int)
raft_radii = np.zeros(num_of_rafts, dtype=int)
# threshold the image first
retval, image_thres = cv.threshold(image_gray, thres_value, 255, 0)
# kernel = np.ones((3,3),np.uint8)
# image_thres = cv.morphologyEx(image_thres, cv.MORPH_OPEN, kernel)
# use canny and then Hough transform to find circles
image_edges = canny(image_thres, sigma=sigma_Canny, low_threshold=low_threshold_canny,
high_threshold=high_threshold_canny)
hough_results = hough_circle(image_edges, np.arange(*radii_hough))
accums, cx, cy, radii = hough_circle_peaks(hough_results, np.arange(*radii_hough))
raft_count = 0
for raftID in np.arange(num_of_rafts):
for accumScore, detected_cx, detected_cy, detected_radius in zip(accums, cx, cy, radii):
distance = np.sqrt((detected_cx - prev_pos[raftID, 0]) ** 2 + (detected_cy - prev_pos[raftID, 1]) ** 2)
if distance < max_displ:
raft_centers[raftID, 0] = detected_cx
# note that raft_count starts with 1, also note that cx corresonds to columns number
raft_centers[raftID, 1] = detected_cy
# cy is row number
raft_radii[raftID] = detected_radius
raft_count += 1
break
return raft_centers, raft_radii, raft_count
def detect_by_contours(image_gray):
original = image_gray.copy()
lowcut = original.mean() + 1.0 * original.std()
retval, image_thres = cv.threshold(original, lowcut, 255, cv.THRESH_BINARY)
kernel = np.ones((3, 3), np.uint8)
image = cv.morphologyEx(image_thres, cv.MORPH_OPEN, kernel)
_, contours, hierarchy = cv.findContours(image, cv.RETR_LIST, cv.CHAIN_APPROX_SIMPLE)
# drawing = test_image.copy()
centers = []
radii = []
for contour in contours:
area = cv.contourArea(contour)
# there is one contour that contains all others, filter it out, Area can be moved to function definition also.
if area < 2000:
continue
center, br2 = cv.minEnclosingCircle(contour)
# I tried to match the raft boundary using this 0.9
radii.append(br2)
centers.append(center)
raft_centers = np.array(centers, dtype=int)
raft_radii = np.array(radii, dtype=int)
return raft_centers, raft_radii
def parse_main_folder_name(main_folder_name):
"""
parse the name of the main folder here, and return the follwing parts
date, string
raft_geometry, string
thin_film_prop, string
magnet_field_prop, string
comments, string
"""
parts = main_folder_name.split('_')
date = parts[0]
raft_geometry = parts[1]
thin_film_prop = parts[2]
magnet_field_prop = parts[3]
if len(parts) > 4:
comments = parts[4]
else:
comments = 'none'
return date, raft_geometry, thin_film_prop, magnet_field_prop, comments
def parse_subfolder_name(subfolder_name):
"""
parse the subfolder name here, and return the following variables
num_of_rafts, int
batch_number, int
spin_speed, int
magnification, int
comments, string
"""
name_lowercase = subfolder_name.lower()
parts = name_lowercase.split('_')
num_of_rafts = int(parts[0].partition('raft')[0])
batch_number = int(parts[1])
spin_speed = float(parts[2].partition('rp')[0])
spin_unit = ''.join(parts[2].partition('rp')[1:])
# if parts[2].partition('rp')[0].isdigit():
# spin_speed = int(parts[2].partition('rp')[0])
# spin_unit = ''.join(parts[2].partition('rp')[1:])
# elif parts[2].partition('hz')[0].isdigit():
# spin_speed = int(parts[2].partition('hz')[0])
# spin_unit = ''.join(parts[2].partition('hz')[1:])
magnification = float(parts[3].partition('x')[0])
if len(parts) > 4:
comments = ''.join(parts[4:])
else:
comments = 'none'
return num_of_rafts, batch_number, spin_speed, spin_unit, magnification, comments
def calculate_distance(p1, p2):
"""
calculate the distance between p1 and p2
"""
dist = np.sqrt((p1[0] - p2[0]) ** 2 + (p1[1] - p2[1]) ** 2)
return dist
def calculate_orbiting_angle(orbiting_center, raft):
"""
calculate the orbiting angle of a raft with respect to a center
"""
# note the negative sign before the first component, the y component
# it is to make the orbiting angle in a right-handed coordiante.
angle = np.arctan2(-(raft[1] - orbiting_center[1]), (raft[0] - orbiting_center[0])) * 180 / np.pi
return angle
def numbering_rafts(rafts_loc, rafts_radii, num_of_rafts):
"""
sort the rafts into layers and number them sequentially from inner most layer to the outmost layer
return sorted rafts_loc and rafts_radii, and layer index
"""
orbiting_center = np.mean(rafts_loc, axis=0)
orbiting_dist = np.sqrt((rafts_loc[:, 0] - orbiting_center[0]) ** 2 + (rafts_loc[:, 1] - orbiting_center[1]) ** 2)
sorted_index = orbiting_dist.argsort()
dist_sorted = orbiting_dist[sorted_index]
rafts_loc_sorted = rafts_loc[sorted_index, :]
rafts_radii_sorted = rafts_radii[sorted_index]
# assign layer
layer_index = np.ones(num_of_rafts, dtype=int)
layer_num = 1
for raft_id in np.arange(1, num_of_rafts):
if dist_sorted[raft_id] - dist_sorted[raft_id - 1] > rafts_radii_sorted[raft_id]:
layer_num = layer_num + 1
layer_index[raft_id] = layer_num
# calculate orbiting angle, note the two negative signs in front of both y- and x- components.
# For y-component, it is for flipping image axis.
# For x-component, it is make the counting start at x-axis and go clockwise.
# Note the value of arctan2 is [-pi, pi]
orbiting_angles = np.arctan2(-(rafts_loc_sorted[:, 1] - orbiting_center[1]),
-(rafts_loc_sorted[:, 0] - orbiting_center[0])) * 180 / np.pi
# concatenate and sort
rafts_loc_radii_dist_angle_layer = \
np.column_stack((rafts_loc_sorted[:, 0], rafts_loc_sorted[:, 1],
rafts_radii_sorted, dist_sorted, orbiting_angles, layer_index))
sorted_index2 = np.lexsort((orbiting_angles, layer_index))
rafts_loc_radii_dist_angle_layer_sorted = rafts_loc_radii_dist_angle_layer[sorted_index2]
rafts_loc_sorted2 = rafts_loc_radii_dist_angle_layer_sorted[:, 0:2].astype(int)
rafts_radii_sorted2 = rafts_loc_radii_dist_angle_layer_sorted[:, 2].astype(int)
dist_sorted2 = rafts_loc_radii_dist_angle_layer_sorted[:, 3]
angles_sorted2 = rafts_loc_radii_dist_angle_layer_sorted[:, 4]
layer_index_sorted2 = rafts_loc_radii_dist_angle_layer_sorted[:, 5]
return rafts_loc_sorted2, rafts_radii_sorted2, dist_sorted2, angles_sorted2, layer_index_sorted2
def crop_image(grayscale_image, raft_center, width):
"""
crop the area of the raft
"""
top_row = int(raft_center[1] - width / 2)
# note that y corresponds to rows, and is directed from top to bottom in scikit-image
bottom_row = int(raft_center[1] + width / 2)
left_column = int(raft_center[0] - width / 2)
right_column = int(raft_center[0] + width / 2)
raft_image = grayscale_image[top_row:bottom_row, left_column:right_column]
return raft_image
def tracking_rafts(prev_rafts_locations, detected_centers):
"""
sort the detected_centers according to the locations of rafts in the previous frame
the row number of col_ind is raft number in prev_rafts_locations,
the value in col_ind is the corresponding raft number in the detected_centers
"""
cost_matrix = scipy_distance.cdist(prev_rafts_locations, detected_centers, 'euclidean')
row_ind, col_ind = linear_sum_assignment(cost_matrix)
return col_ind
def counting_effused_rafts(prev_centers, prev_count, curr_centers, curr_count, boundary_x, max_displacement):
"""
test if the raft crosses the boundary of container
"""
effused_raft_to_left = 0
effused_raft_to_right = 0
cost_matrix = scipy_distance.cdist(prev_centers[:prev_count], curr_centers[:curr_count], 'euclidean')
# note that row index refers to previous raft number, column index refers to current raft number
# select the boundary crossing to be in the middle of the cropped image, so only deals with existing rafts
for raftID in np.arange(prev_count):
if np.any(cost_matrix[raftID, :] < max_displacement): # raft still exist
curr_raft_id = np.nonzero(cost_matrix[raftID, :] < max_displacement)[0][
0] # [0][0] is to convert array into scalar
if (prev_centers[raftID, 0] >= boundary_x) and (curr_centers[curr_raft_id, 0] < boundary_x):
effused_raft_to_left = effused_raft_to_left + 1
elif (prev_centers[raftID, 0] < boundary_x) and (curr_centers[curr_raft_id, 0] >= boundary_x):
effused_raft_to_right = effused_raft_to_right + 1
return effused_raft_to_left, effused_raft_to_right
def get_rotation_angle(prev_image, curr_image, size_of_cropped_image):
"""
extract the angle of rotation theta between two frames
:param curr_image:
:param prev_image:
:param size_of_cropped_image:
"""
max_value = np.amax(prev_image)
if prev_image.dtype == 'float' and max_value <= 1:
prev_image = np.uint8(prev_image * 255)
curr_image = np.uint8(curr_image * 255)
if prev_image.dtype == 'float' and max_value > 1:
prev_image = np.uint8(prev_image)
curr_image = np.uint8(curr_image)
prev_image = cv.equalizeHist(prev_image)
curr_image = cv.equalizeHist(curr_image)
# Initiate ORB detector
orb = cv.ORB_create(nfeatures=200)
# find the keypoints and descriptors with ORB
kp1, des1 = orb.detectAndCompute(prev_image, None)
kp2, des2 = orb.detectAndCompute(curr_image, None)
# do feature matching
bf = cv.BFMatcher(cv.NORM_HAMMING, crossCheck=True)
matches = bf.match(des1, des2)
matches = sorted(matches, key=lambda x: x.distance)
# calculate perspective transform matrix
src_pts = np.float32([kp1[m.queryIdx].pt for m in matches]).reshape(-1, 1, 2)
dst_pts = np.float32([kp2[m.trainIdx].pt for m in matches]).reshape(-1, 1, 2)
transform_matrix, mask = cv.findHomography(src_pts, dst_pts, cv.RANSAC, 5.0)
if transform_matrix is None:
transform_matrix, mask = cv.findHomography(src_pts, dst_pts, 0)
if transform_matrix is None:
transform_matrix, mask = cv.findHomography(src_pts, dst_pts, 0)
vector_along_x_axis_from_center = \
np.float32([[size_of_cropped_image / 2, size_of_cropped_image / 2],
[size_of_cropped_image, size_of_cropped_image / 2]]).reshape(-1, 1, 2)
vector_transformed = cv.perspectiveTransform(vector_along_x_axis_from_center, transform_matrix)
theta = - np.arctan2(vector_transformed[1, 0, 1] - vector_transformed[0, 0, 1],
vector_transformed[1, 0, 0] - vector_transformed[0, 0, 0]) * 180 / np.pi
# negative sign is to make the sign of the angle to correspond to one in a right-handed coordinate system
return theta
def draw_rafts(img_bgr, rafts_loc, rafts_radii, num_of_rafts):
"""
draw circles around rafts
"""
circle_thickness = int(2)
circle_color = (0, 0, 255) # openCV: BGR
output_img = img_bgr
for raft_id in np.arange(num_of_rafts):
output_img = cv.circle(output_img, (rafts_loc[raft_id, 0], rafts_loc[raft_id, 1]), rafts_radii[raft_id],
circle_color, circle_thickness)
return output_img
def draw_raft_orientations(img_bgr, rafts_loc, rafts_ori, rafts_radii, num_of_rafts):
"""
draw lines to indicate the orientation of each raft
"""
line_thickness = int(2)
line_color = (255, 0, 0)
output_img = img_bgr
for raft_id in np.arange(num_of_rafts):
line_start = (rafts_loc[raft_id, 0], rafts_loc[raft_id, 1])
line_end = (int(rafts_loc[raft_id, 0] + np.cos(rafts_ori[raft_id] * np.pi / 180) * rafts_radii[raft_id]),
int(rafts_loc[raft_id, 1] - np.sin(rafts_ori[raft_id] * np.pi / 180) * rafts_radii[raft_id]))
output_img = cv.line(output_img, line_start, line_end, line_color, line_thickness)
return output_img
def draw_raft_number(img_bgr, rafts_loc, num_of_rafts):
"""
draw the raft number at the center of the rafts
"""
font_face = cv.FONT_HERSHEY_SIMPLEX
font_scale = 0.5
font_color = (0, 255, 255) # BGR
font_thickness = 1
output_img = img_bgr
for raft_id in np.arange(num_of_rafts):
text_size, _ = cv.getTextSize(str(raft_id + 1), font_face, font_scale, font_thickness)
output_img = cv.putText(output_img, str(raft_id + 1),
(rafts_loc[raft_id, 0] - text_size[0] // 2, rafts_loc[raft_id, 1] + text_size[1] // 2),
font_face, font_scale, font_color, font_thickness, cv.LINE_AA)
return output_img
def draw_effused_raft_count(img_bgr, raft_effused, raft_to_left, raft_to_right, topleft_x, topleft_y, width_x,
height_y):
"""
draw effused rafts
"""
font_face = cv.FONT_HERSHEY_SIMPLEX
font_scale = 1
font_color = (0, 0, 255) # BGR
font_thickness = 2
line_color = (0, 0, 255) # BGR
line_thickness = 1
output_img = img_bgr
output_img = cv.line(output_img, (topleft_x + width_x // 2, topleft_y),
(topleft_x + width_x // 2, topleft_y + height_y), line_color, line_thickness)
output_img = cv.putText(output_img, 'Effused: ' + str(raft_effused), (topleft_x, topleft_y - 30), font_face,
font_scale, font_color, font_thickness, cv.LINE_AA)
output_img = cv.putText(output_img, 'To left: ' + str(raft_to_left), (topleft_x, topleft_y - 60), font_face,
font_scale, font_color, font_thickness, cv.LINE_AA)
output_img = cv.putText(output_img, 'To right: ' + str(raft_to_right), (topleft_x, topleft_y - 90), font_face,
font_scale, font_color, font_thickness, cv.LINE_AA)
return output_img
# functions used in the post-processing file
def calculate_centers_of_mass(x_all, y_all):
"""
calculate the centers of all rafts for each frame
xAll - x position, (# of frames, # of rafts), unit: pixel
yAll - y position (# of frames, # of rafts)
"""
num_of_frames, num_of_rafts = x_all.shape
x_centers = x_all[:, 0:num_of_rafts].mean(axis=1)
y_centers = y_all[:, 0:num_of_rafts].mean(axis=1)
x_relative_to_centers = x_all - x_centers[:, np.newaxis]
y_relative_to_centers = y_all - y_centers[:, np.newaxis]
distances_to_centers = np.sqrt(x_relative_to_centers ** 2 + y_relative_to_centers ** 2)
orbiting_angles = np.arctan2(y_relative_to_centers, x_relative_to_centers) * 180 / np.pi
return distances_to_centers, orbiting_angles, x_centers, y_centers
def calculate_polar_angle(p1, p2):
"""
calculate the polar angle of the vector from p1 to p2.
"""
# note the negative sign before the first component, which is y component
# the y in scikit-image is flipped.
# it is to convert the angle into right-handed coordinate
# the range is from -pi to pi
angle = np.arctan2(-(p2[1] - p1[1]), (p2[0] - p1[0])) * 180 / np.pi
return angle
def adjust_orbiting_angles(orbiting_angles_series, orbiting_angles_diff_threshold=200):
"""
adjust the orbiting angles to get rid of the jump of 360 when it crosses from -180 to 180, or the reverse
adjust single point anormaly.
"""
orbiting_angles_diff = np.diff(orbiting_angles_series)
index_neg = orbiting_angles_diff < -orbiting_angles_diff_threshold
index_pos = orbiting_angles_diff > orbiting_angles_diff_threshold
insertion_indices_neg = np.nonzero(index_neg)
insertion_indices_pos = np.nonzero(index_pos)
orbiting_angles_diff_corrected = orbiting_angles_diff.copy()
orbiting_angles_diff_corrected[insertion_indices_neg[0]] += 360
orbiting_angles_diff_corrected[insertion_indices_pos[0]] -= 360
orbiting_angles_corrected = orbiting_angles_series.copy()
orbiting_angles_corrected[1:] = orbiting_angles_diff_corrected[:]
orbiting_angles_adjusted = np.cumsum(orbiting_angles_corrected)
return orbiting_angles_adjusted
def adjust_orbiting_angles2(orbiting_angles_series, orbiting_angles_diff_threshold=200):
"""
2nd version of ajust_orbiting_angles
adjust the orbiting angles to get rid of the jump of 360
when it crosses from -180 to 180, or the reverse
orbiting_angle_series has the shape (raft num, frame num)
"""
orbiting_angles_diff = np.diff(orbiting_angles_series, axis=1)
index_neg = orbiting_angles_diff < -orbiting_angles_diff_threshold
index_pos = orbiting_angles_diff > orbiting_angles_diff_threshold
insertion_indices_neg = np.nonzero(index_neg)
insertion_indices_pos = np.nonzero(index_pos)
orbiting_angles_diff_corrected = orbiting_angles_diff.copy()
orbiting_angles_diff_corrected[insertion_indices_neg[0], insertion_indices_neg[1]] += 360
orbiting_angles_diff_corrected[insertion_indices_pos[0], insertion_indices_pos[1]] -= 360
orbiting_angles_corrected = orbiting_angles_series.copy()
orbiting_angles_corrected[:, 1:] = orbiting_angles_diff_corrected[:]
orbiting_angles_adjusted = np.cumsum(orbiting_angles_corrected, axis=1)
return orbiting_angles_adjusted
def mutual_info_matrix(time_series, num_of_bins):
"""
Calculate mutual information for each pair of rafts
time_series - rows are raft numbers, and columns are times
numOfBins- numOfBins for calculating histogram
the result is in unit of bits.
"""
num_of_rafts, interval_width = time_series.shape
mi_matrix = np.zeros((num_of_rafts, num_of_rafts))
for i in range(num_of_rafts):
for j in range(i + 1):
i0 = time_series[i, :].copy()
j0 = time_series[j, :].copy()
c_xy = np.histogram2d(i0, j0, num_of_bins)[0]
mi = mutual_info_score(None, None, contingency=c_xy) * np.log2(np.e)
# in unit of bits, * np.log2(np.e) to convert nats to bits
mi_matrix[i, j] = mi
mi_matrix[j, i] = mi
return mi_matrix
def shannon_entropy(c):
"""calculate the Shannon entropy of 1 d data. The unit is bits """
c_normalized = c / float(np.sum(c))
c_normalized_nonzero = c_normalized[np.nonzero(c_normalized)] # gives 1D array
entropy = -sum(c_normalized_nonzero * np.log2(c_normalized_nonzero)) # unit in bits
return entropy
def fft_distances(sampling_rate, distances):
"""
given sampling rate and distances, and output frequency vector and one-sided power spectrum
sampling_rate: unit Hz
distances: numpy array, unit micron
"""
# sampling_interval = 1/sampling_rate # unit s
# times = np.linspace(0,sampling_length*sampling_interval, sampling_length)
sampling_length = len(distances) # total number of frames
fft_dist = np.fft.fft(distances)
p2 = np.abs(fft_dist / sampling_length)
p1 = p2[0:int(sampling_length / 2) + 1]
p1[1:-1] = 2 * p1[1:-1] # one-sided power spectrum
frequencies = sampling_rate / sampling_length * np.arange(0, int(sampling_length / 2) + 1)
return frequencies, p1
def draw_clusters(img_bgr, connectivity_matrix, rafts_loc):
"""
draw lines between centers of connected rafts
"""
line_thickness = 2
line_color = (0, 255, 0)
output_img = img_bgr
raft1s, raft2s = np.nonzero(connectivity_matrix)
for raftA, raftB in zip(raft1s, raft2s):
output_img = cv.line(output_img, (rafts_loc[raftA, 0], rafts_loc[raftA, 1]),
(rafts_loc[raftB, 0], rafts_loc[raftB, 1]), line_color, line_thickness)
return output_img
def draw_voronoi(img_bgr, rafts_loc):
"""
draw Voronoi patterns
"""
points = rafts_loc
vor = ScipyVoronoi(points)
output_img = img_bgr
# drawing Voronoi vertices
vertex_size = int(3)
vertex_color = (255, 0, 0)
for x, y in zip(vor.vertices[:, 0], vor.vertices[:, 1]):
output_img = cv.circle(output_img, (int(x), int(y)), vertex_size, vertex_color)
# drawing Voronoi edges
edge_color = (0, 255, 0)
edge_thickness = int(2)
for simplex in vor.ridge_vertices:
simplex = np.asarray(simplex)
if np.all(simplex >= 0):
output_img = cv.line(output_img, (int(vor.vertices[simplex[0], 0]), int(vor.vertices[simplex[0], 1])),
(int(vor.vertices[simplex[1], 0]), int(vor.vertices[simplex[1], 1])), edge_color,
edge_thickness)
center = points.mean(axis=0)
for pointidx, simplex in zip(vor.ridge_points, vor.ridge_vertices):
simplex = np.asarray(simplex)
if np.any(simplex < 0):
i = simplex[simplex >= 0][0] # finite end Voronoi vertex
t = points[pointidx[1]] - points[pointidx[0]] # tangent
t = t / np.linalg.norm(t)
n = np.array([-t[1], t[0]]) # normal
midpoint = points[pointidx].mean(axis=0)
far_point = vor.vertices[i] + np.sign(np.dot(midpoint - center, n)) * n * 100
output_img = cv.line(output_img, (int(vor.vertices[i, 0]), int(vor.vertices[i, 1])),
(int(far_point[0]), int(far_point[1])), edge_color, edge_thickness)
return output_img
def draw_at_bottom_left_of_raft_number_float(img_bgr, rafts_loc, neighbor_count_wt, num_of_rafts):
"""
write a subscript to indicate nearest neighbor count or weighted nearest neighbor count
"""
font_face = cv.FONT_ITALIC
font_scale = 0.5
font_color = (0, 165, 255) # BGR
font_thickness = 1
output_img = img_bgr
for raft_id in np.arange(num_of_rafts):
text_size, _ = cv.getTextSize(str(raft_id + 1), font_face, font_scale, font_thickness)
output_img = cv.putText(output_img, '{:.2}'.format(neighbor_count_wt[raft_id]),
(rafts_loc[raft_id, 0] + text_size[0] // 2, rafts_loc[raft_id, 1] + text_size[1]),
font_face, font_scale, font_color, font_thickness, cv.LINE_AA)
return output_img
def draw_at_bottom_left_of_raft_number_integer(img_bgr, rafts_loc, neighbor_count_wt, num_of_rafts):
"""
write a subscript to indicate nearest neighbor count or weighted nearest neighbor count
"""
font_face = cv.FONT_ITALIC
font_scale = 0.5
font_color = (0, 165, 255) # BGR
font_thickness = 1
output_img = img_bgr
for raft_id in np.arange(num_of_rafts):
text_size, _ = cv.getTextSize(str(raft_id + 1), font_face, font_scale, font_thickness)
output_img = cv.putText(output_img, '{:}'.format(neighbor_count_wt[raft_id]),
(rafts_loc[raft_id, 0] + text_size[0] // 2, rafts_loc[raft_id, 1] + text_size[1]),
font_face, font_scale, font_color, font_thickness, cv.LINE_AA)
return output_img
def draw_neighbor_counts(img_bgr, rafts_loc, num_of_rafts):
"""
draw the raft number at the center of the rafts
"""
points = rafts_loc
vor = ScipyVoronoi(points)
neighbor_counts = np.zeros(num_of_rafts, dtype=int)
for raft_id in range(num_of_rafts):
neighbor_counts[raft_id] = np.count_nonzero(vor.ridge_points.ravel() == raft_id)
font_face = cv.FONT_ITALIC
font_scale = 0.5
font_color = (0, 165, 255) # BGR
font_thickness = 1
output_img = img_bgr
for raft_id in np.arange(num_of_rafts):
text_size, _ = cv.getTextSize(str(raft_id + 1), font_face, font_scale, font_thickness)
output_img = cv.putText(output_img, str(neighbor_counts[raft_id]),
(rafts_loc[raft_id, 0] + text_size[0] // 2, rafts_loc[raft_id, 1] + text_size[1]),
font_face, font_scale, font_color, font_thickness, cv.LINE_AA)
return output_img
def polygon_area(x, y):
"""
calculate the area of a polygon given the x and y coordinates of vertices
ref: https://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates
"""
return 0.5 * np.abs(np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1)))
def ssa_decompose(y, dim):
"""
from Vimal
Singular Spectrum Analysis decomposition for a time series
:param y: time series (array)
:param dim: the embedding dimension
:return: (pc, s, v) where
pc is the matrix with the principal components of y
s is the vector of the singular values of y given dim
v is the matrix of the singular vectors of y given dim
"""
n = len(y)
t = n - (dim - 1)
yy = linalg.hankel(y, np.zeros(dim))
yy = yy[:-dim + 1, :] / np.sqrt(t)
# here we use gesvd driver (as in Matlab)
_, s, v = linalg.svd(yy, full_matrices=False, lapack_driver='gesvd')
# find principal components
vt = np.matrix(v).T
pc = np.matrix(yy) * vt
return np.asarray(pc), s, np.asarray(vt)
def ssa_reconstruct(pc, v, k):
"""
from Vimal
Series reconstruction for given SSA decomposition using vector of components
:param pc: matrix with the principal components from SSA
:param v: matrix of the singular vectors from SSA
:param k: vector with the indices of the components to be reconstructed
:return: the reconstructed time series
"""
if np.isscalar(k):
k = [k]
if pc.ndim != 2:
raise ValueError('pc must be a 2-dimensional matrix')
if v.ndim != 2:
raise ValueError('v must be a 2-dimensional matrix')
t, dim = pc.shape
n_points = t + (dim - 1)
if any(filter(lambda x: dim < x or x < 0, k)):
raise ValueError('k must be vector of indexes from range 0..%d' % dim)
pc_comp = np.asarray(np.matrix(pc[:, k]) * np.matrix(v[:, k]).T)
xr = np.zeros(n_points)
times = np.zeros(n_points)
# reconstruction loop
for i in range(dim):
xr[i: t + i] = xr[i: t + i] + pc_comp[:, i]
times[i: t + i] = times[i: t + i] + 1
xr = (xr / times) *
|
np.sqrt(t)
|
numpy.sqrt
|
import os
import argparse
import numpy as np
import pandas as pd
from time import time
from scipy.stats import norm
from scipy.spatial.distance import euclidean
from editing_dist_n_lcs_dp import edit_distance
from editing_dist_n_lcs_dp import lcs
#global variables
# BREAK_POINTS = []
# LOOKUP_TABLE = []
# TODO BUILD CLASS
# TODO find optimal VOCAB_SIZE & PAA_SIZE OR WINDOW_SIZE
# TODO compare multiple series
# TODO find motifs (cycles)
def matrix_to_df(cols, matrix):
"""
Convert matrix of time series to pd.DataFrame
"""
df = pd.DataFrame()
for i in range(len(cols)):
df[cols[i]] = matrix[i]
return df
def znorm(ts):
"""
Standardize data
"""
return (ts - np.mean(ts)) /
|
np.std(ts)
|
numpy.std
|
from unittest import TestCase
import os
import tempfile
import pickle
import itertools
import numpy as np
from scipy import sparse
import neoml
import threading
class MultithreadedTestCase(TestCase):
def _thread_function(self, target, kwargs):
print(f"python thread {threading.get_ident()} started")
target(**kwargs)
print(f"python thread {threading.get_ident()} finished")
def _test_mt(self, target, result, enable_assert=False):
import time
threads = []
system_time, user_time = time.perf_counter(), time.process_time()
for _ in range(4):
t = threading.Thread(target=self._thread_function, args=(target, {'result': result}))
threads.append(t)
t.start()
for t in threads:
t.join()
system_time, user_time = time.perf_counter() - system_time, time.process_time() - user_time
print()
print('System time {0:.6f} sec.'.format(system_time))
print('User time {0:.6f} sec.'.format(user_time))
if enable_assert:
self.assertTrue(system_time < user_time)
def run(self, result=None):
self._test_mt(super().run, result=result)
class MathEngineTestCase(MultithreadedTestCase):
def test_gpu_math_engine(self):
check = False
try:
print(neoml.MathEngine.GpuMathEngine(666).info)
except ValueError as err:
check = True
self.assertEqual(check, True)
check = False
try:
print(neoml.MathEngine.GpuMathEngine(-666).info)
except ValueError as err:
check = True
self.assertEqual(check, True)
gpu = neoml.MathEngine.enum_gpu()
index = 0
for x in gpu:
math_engine = neoml.MathEngine.GpuMathEngine(index)
self.assertTrue(isinstance(math_engine, neoml.MathEngine.GpuMathEngine))
index += 1
def test_cpu_math_engine(self):
math_engine = neoml.MathEngine.CpuMathEngine()
self.assertTrue(isinstance(math_engine, neoml.MathEngine.CpuMathEngine))
blob = neoml.Blob.vector(math_engine, 10, "int32")
self.assertEqual(math_engine.peak_memory_usage, 40)
class BlobTestCase(MultithreadedTestCase):
def test_pickle(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
a = np.ones((2, 3, 4, 5), dtype=np.int32)
shape = (2, 3, 1, 4, 1, 1, 5)
blob = neoml.Blob.asblob(math_engine, a, shape, False)
dir = tempfile.mkdtemp()
path = os.path.join(dir, 'blob.pickle')
binary_file = open(path, mode='wb')
pickle.dump(blob, binary_file)
binary_file.close()
binary_file = open(path, mode='rb')
loaded_blob = pickle.load(binary_file)
binary_file.close()
os.remove(path)
os.rmdir(dir)
self.assertEqual(blob.shape, loaded_blob.shape)
def test_load_store(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
a = np.ones((2, 3, 4, 5), dtype=np.int32)
shape = (2, 3, 1, 4, 1, 1, 5)
blob = neoml.Blob.asblob(math_engine, a, shape, False)
dir = tempfile.mkdtemp()
path = os.path.join(dir, 'blob.pickle')
neoml.Blob.store(blob, path)
loaded_blob = neoml.Blob.load(math_engine, path)
os.remove(path)
os.rmdir(dir)
self.assertEqual(blob.shape, loaded_blob.shape)
def test_copy(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
a = np.ones((4, 4, 4, 4), dtype=np.int32)
shape = (4, 4, 1, 4, 4, 1, 1)
blob = neoml.Blob.asblob(math_engine, a, shape, False)
blob2 = blob.copy(math_engine)
self.assertEqual(blob2.shape, blob.shape)
a2 = blob2.asarray()
self.assertEqual(a2.shape, a.shape)
def test_asblob(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
float_array = np.ones((2, 5, 7, 16), dtype=np.float32)
shape = (1, 2, 1, 5, 7, 1, 16)
float_blob = neoml.Blob.asblob(math_engine, float_array, shape, False)
self.assertEqual(float_blob.shape, shape)
self.assertEqual(float_blob.batch_len, 1)
self.assertEqual(float_blob.batch_width, 2)
self.assertEqual(float_blob.list_size, 1)
self.assertEqual(float_blob.height, 5)
self.assertEqual(float_blob.width, 7)
self.assertEqual(float_blob.depth, 1)
self.assertEqual(float_blob.channels, 16)
self.assertEqual(float_blob.size, 2 * 5 * 7 * 16)
self.assertEqual(float_blob.object_count, 2)
self.assertEqual(float_blob.object_size, 5 * 7 * 16)
blob_float_array = float_blob.asarray()
blob_float_array2 = float_blob.asarray(True)
self.assertEqual(blob_float_array.shape, blob_float_array2.shape)
float_array[0][1][1][1] = 2.0
self.assertEqual(float_array[0][1][1][1], blob_float_array[0][1][1][1])
self.assertEqual(1.0, blob_float_array2[0][1][1][1])
def test_vector(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
float_blob = neoml.Blob.vector(math_engine, 16, "float32")
self.assertEqual(float_blob.batch_len, 16)
self.assertEqual(float_blob.batch_width, 1)
self.assertEqual(float_blob.list_size, 1)
self.assertEqual(float_blob.height, 1)
self.assertEqual(float_blob.width, 1)
self.assertEqual(float_blob.depth, 1)
self.assertEqual(float_blob.channels, 1)
self.assertEqual(float_blob.size, 16)
self.assertEqual(float_blob.object_count, 16)
self.assertEqual(float_blob.object_size, 1)
def test_matrix(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
float_blob = neoml.Blob.matrix(math_engine, 16, 32, "int32")
self.assertEqual(float_blob.batch_len, 16)
self.assertEqual(float_blob.batch_width, 32)
self.assertEqual(float_blob.list_size, 1)
self.assertEqual(float_blob.height, 1)
self.assertEqual(float_blob.width, 1)
self.assertEqual(float_blob.depth, 1)
self.assertEqual(float_blob.channels, 1)
self.assertEqual(float_blob.size, 16 * 32)
self.assertEqual(float_blob.object_count, 16 * 32)
self.assertEqual(float_blob.object_size, 1)
def test_tensor(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
shape = (1, 2, 3, 4, 5, 6, 7)
float_blob = neoml.Blob.tensor(math_engine, shape, "int32")
self.assertEqual(float_blob.batch_len, 1)
self.assertEqual(float_blob.batch_width, 2)
self.assertEqual(float_blob.list_size, 3)
self.assertEqual(float_blob.height, 4)
self.assertEqual(float_blob.width, 5)
self.assertEqual(float_blob.depth, 6)
self.assertEqual(float_blob.channels, 7)
self.assertEqual(float_blob.size, 1 * 2 * 3 * 4 * 5 * 6 * 7)
self.assertEqual(float_blob.object_count, 2 * 3)
self.assertEqual(float_blob.object_size, 4 * 5 * 6 * 7)
def test_list_blob(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
float_blob = neoml.Blob.list_blob(math_engine, 2, 3, 4, 5, "int32")
self.assertEqual(float_blob.batch_len, 2)
self.assertEqual(float_blob.batch_width, 3)
self.assertEqual(float_blob.list_size, 4)
self.assertEqual(float_blob.height, 1)
self.assertEqual(float_blob.width, 1)
self.assertEqual(float_blob.depth, 1)
self.assertEqual(float_blob.channels, 5)
self.assertEqual(float_blob.size, 2 * 3 * 4 * 5)
self.assertEqual(float_blob.object_count, 2 * 3 * 4)
self.assertEqual(float_blob.object_size, 5)
def test_image2d(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
float_blob = neoml.Blob.image2d(math_engine, 2, 3, 4, 5, 6, "float32")
self.assertEqual(float_blob.batch_len, 2)
self.assertEqual(float_blob.batch_width, 3)
self.assertEqual(float_blob.list_size, 1)
self.assertEqual(float_blob.height, 4)
self.assertEqual(float_blob.width, 5)
self.assertEqual(float_blob.depth, 1)
self.assertEqual(float_blob.channels, 6)
self.assertEqual(float_blob.size, 2 * 3 * 4 * 5 * 6)
self.assertEqual(float_blob.object_count, 2 * 3)
self.assertEqual(float_blob.object_size, 4 * 5 * 6)
def test_image3d(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
float_blob = neoml.Blob.image3d(math_engine, 2, 3, 4, 5, 6, 7, "float32")
self.assertEqual(float_blob.batch_len, 2)
self.assertEqual(float_blob.batch_width, 3)
self.assertEqual(float_blob.list_size, 1)
self.assertEqual(float_blob.height, 4)
self.assertEqual(float_blob.width, 5)
self.assertEqual(float_blob.depth, 6)
self.assertEqual(float_blob.channels, 7)
self.assertEqual(float_blob.size, 2 * 3 * 4 * 5 * 6 * 7)
self.assertEqual(float_blob.object_count, 2 * 3)
self.assertEqual(float_blob.object_size, 4 * 5 * 6 * 7)
class SolverTestCase(MultithreadedTestCase):
def test_nesterov_gradient(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
solver = neoml.Dnn.NesterovGradient(math_engine, learning_rate=0.6, l1=0.6, l2=0.6,
moment_decay_rate=0.6, max_gradient_norm=0.6,
second_moment_decay_rate=0.6, epsilon=0.6, ams_grad=True)
self.assertAlmostEqual(solver.l1, 0.6, delta=1e-3)
self.assertAlmostEqual(solver.l2, 0.6, delta=1e-3)
self.assertAlmostEqual(solver.learning_rate, 0.6, delta=1e-3)
self.assertAlmostEqual(solver.max_gradient_norm, 0.6, delta=1e-3)
self.assertAlmostEqual(solver.moment_decay_rate, 0.6, delta=1e-3)
self.assertAlmostEqual(solver.second_moment_decay_rate, 0.6, delta=1e-3)
self.assertAlmostEqual(solver.epsilon, 0.6, delta=1e-3)
self.assertEqual(solver.ams_grad, True)
def test_adaptive_gradient(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
solver = neoml.Dnn.AdaptiveGradient(math_engine, learning_rate=0.6, l1=0.6, l2=0.6,
moment_decay_rate=0.6, max_gradient_norm=0.6,
second_moment_decay_rate=0.6, epsilon=0.6, ams_grad=True)
self.assertAlmostEqual(solver.l1, 0.6, delta=1e-3)
self.assertAlmostEqual(solver.l2, 0.6, delta=1e-3)
self.assertAlmostEqual(solver.learning_rate, 0.6, delta=1e-3)
self.assertAlmostEqual(solver.max_gradient_norm, 0.6, delta=1e-3)
self.assertAlmostEqual(solver.moment_decay_rate, 0.6, delta=1e-3)
self.assertAlmostEqual(solver.second_moment_decay_rate, 0.6, delta=1e-3)
self.assertAlmostEqual(solver.epsilon, 0.6, delta=1e-3)
self.assertEqual(solver.ams_grad, True)
def test_simple_gradient(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
solver = neoml.Dnn.SimpleGradient(math_engine, learning_rate=0.6, l1=0.6, l2=0.6,
moment_decay_rate=0.6, max_gradient_norm=0.6)
self.assertAlmostEqual(solver.l1, 0.6, delta=1e-3)
self.assertAlmostEqual(solver.l2, 0.6, delta=1e-3)
self.assertAlmostEqual(solver.learning_rate, 0.6, delta=1e-3)
self.assertAlmostEqual(solver.max_gradient_norm, 0.6, delta=1e-3)
self.assertAlmostEqual(solver.moment_decay_rate, 0.6, delta=1e-3)
class LayersTestCase(MultithreadedTestCase):
def test_lstm(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
dnn = neoml.Dnn.Dnn(math_engine)
source1 = neoml.Dnn.Source(dnn, "source1")
lstm = neoml.Dnn.Lstm(source1, 7, 0.6, name="lstm")
sink1 = neoml.Dnn.Sink((lstm, 0), "sink1")
sink2 = neoml.Dnn.Sink((lstm, 1), "sink2")
layer = dnn.layers['lstm']
self.assertEqual(layer.name, 'lstm')
input1 = neoml.Blob.asblob(math_engine, np.ones((5, 3, 16), dtype=np.float32), (5, 3, 1, 1, 1, 1, 16))
inputs = {"source1": input1}
outputs = dnn.run(inputs)
out1 = outputs["sink1"].asarray()
out2 = outputs["sink2"].asarray()
self.assertEqual(lstm.hidden_size, 7)
self.assertEqual(layer.hidden_size, 7)
self.assertEqual(lstm.reverse_sequence, False)
lstm.reverse_sequence = True
self.assertEqual(lstm.reverse_sequence, True)
self.assertEqual(layer.reverse_sequence, True)
self.assertAlmostEqual(lstm.dropout, 0.6, delta=1e-3)
lstm.dropout = 0.9
self.assertAlmostEqual(lstm.dropout, 0.9, delta=1e-3)
self.assertAlmostEqual(layer.dropout, 0.9, delta=1e-3)
self.assertEqual(lstm.activation, "sigmoid")
lstm.activation = "abs"
self.assertEqual(lstm.activation, "abs")
self.assertEqual(out1.shape, (5, 3, 7))
self.assertEqual(out2.shape, (5, 3, 7))
w_blob = lstm.input_weights
weights = w_blob.asarray()
lstm.input_weights = w_blob
f_blob = lstm.input_free_term
free_term = f_blob.asarray()
lstm.input_free_term = f_blob
w_blob = lstm.recurrent_weights
weights = w_blob.asarray()
lstm.recurrent_weights = w_blob
f_blob = lstm.recurrent_free_term
free_term = f_blob.asarray()
lstm.recurrent_free_term = f_blob
self.assertEqual(weights.shape, (28, 7))
self.assertEqual(free_term.shape, (28,))
def test_fully_connected(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
dnn = neoml.Dnn.Dnn(math_engine)
source1 = neoml.Dnn.Source(dnn, "source1")
source2 = neoml.Dnn.Source(dnn, "source2")
fully = neoml.Dnn.FullyConnected((source1, source2), 5, False, "fully")
sink1 = neoml.Dnn.Sink((fully, 0), "sink1")
sink2 = neoml.Dnn.Sink((fully, 1), "sink2")
layer = dnn.layers['fully']
self.assertEqual(layer.name, 'fully')
input1 = neoml.Blob.asblob(math_engine, np.ones((12, 16), dtype=np.float32), (12, 1, 1, 1, 1, 1, 16))
input2 = neoml.Blob.asblob(math_engine, np.ones((10, 16), dtype=np.float32), (10, 1, 1, 1, 1, 1, 16))
inputs = {"source1": input1, "source2": input2}
outputs = dnn.run(inputs)
out1 = outputs["sink1"].asarray()
out2 = outputs["sink2"].asarray()
self.assertEqual(fully.element_count, 5)
self.assertEqual(layer.element_count, 5)
self.assertEqual(fully.zero_free_term, False)
fully.zero_free_term = True
self.assertEqual(fully.zero_free_term, True)
self.assertEqual(layer.zero_free_term, True)
self.assertEqual(out1.shape, (12, 5))
self.assertEqual(out2.shape, (10, 5))
w_blob = fully.weights
weights = w_blob.asarray()
fully.weights = w_blob
f_blob = fully.free_term
free_term = f_blob.asarray()
fully.free_term = f_blob
self.assertEqual(weights.shape, (5, 16))
self.assertEqual(free_term.shape, (5,))
def test_concat_channels(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
dnn = neoml.Dnn.Dnn(math_engine)
source1 = neoml.Dnn.Source(dnn, "source1")
source2 = neoml.Dnn.Source(dnn, "source2")
concat = neoml.Dnn.ConcatChannels((source1, source2), "concat")
sink = neoml.Dnn.Sink(concat, "sink")
layer = dnn.layers['concat']
self.assertEqual(layer.name, 'concat')
input1 = neoml.Blob.asblob(math_engine, np.ones((16), dtype=np.float32), (1, 1, 1, 1, 1, 1, 16))
input2 = neoml.Blob.asblob(math_engine, np.ones((16), dtype=np.float32), (1, 1, 1, 1, 1, 1, 16))
inputs = {"source1": input1, "source2": input2}
outputs = dnn.run(inputs)
a = outputs["sink"].asarray()
self.assertEqual(outputs["sink"].channels, 32)
self.assertEqual(a.size, 32)
def test_concat_depth(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
dnn = neoml.Dnn.Dnn(math_engine)
source1 = neoml.Dnn.Source(dnn, "source1")
source2 = neoml.Dnn.Source(dnn, "source2")
concat = neoml.Dnn.ConcatDepth((source1, source2), "concat")
sink = neoml.Dnn.Sink(concat, "sink")
layer = dnn.layers['concat']
self.assertEqual(layer.name, 'concat')
input1 = neoml.Blob.asblob(math_engine, np.ones((16), dtype=np.float32), (1, 1, 1, 1, 1, 16, 1))
input2 = neoml.Blob.asblob(math_engine, np.ones((16), dtype=np.float32), (1, 1, 1, 1, 1, 16, 1))
inputs = {"source1": input1, "source2": input2}
outputs = dnn.run(inputs)
a = outputs["sink"].asarray()
self.assertEqual(outputs["sink"].depth, 32)
self.assertEqual(a.size, 32)
def test_concat_width(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
dnn = neoml.Dnn.Dnn(math_engine)
source1 = neoml.Dnn.Source(dnn, "source1")
source2 = neoml.Dnn.Source(dnn, "source2")
concat = neoml.Dnn.ConcatWidth((source1, source2), "concat")
sink = neoml.Dnn.Sink(concat, "sink")
layer = dnn.layers['concat']
self.assertEqual(layer.name, 'concat')
input1 = neoml.Blob.asblob(math_engine, np.ones((16), dtype=np.float32), (1, 1, 1, 1, 16, 1, 1))
input2 = neoml.Blob.asblob(math_engine, np.ones((16), dtype=np.float32), (1, 1, 1, 1, 16, 1, 1))
inputs = {"source1": input1, "source2": input2}
outputs = dnn.run(inputs)
a = outputs["sink"].asarray()
self.assertEqual(outputs["sink"].width, 32)
self.assertEqual(a.size, 32)
def test_concat_height(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
dnn = neoml.Dnn.Dnn(math_engine)
source1 = neoml.Dnn.Source(dnn, "source1")
source2 = neoml.Dnn.Source(dnn, "source2")
concat = neoml.Dnn.ConcatHeight((source1, source2), "concat")
sink = neoml.Dnn.Sink(concat, "sink")
layer = dnn.layers['concat']
self.assertEqual(layer.name, 'concat')
input1 = neoml.Blob.asblob(math_engine, np.ones((16), dtype=np.float32), (1, 1, 1, 16, 1, 1, 1))
input2 = neoml.Blob.asblob(math_engine, np.ones((16), dtype=np.float32), (1, 1, 1, 16, 1, 1, 1))
inputs = {"source1": input1, "source2": input2}
outputs = dnn.run(inputs)
a = outputs["sink"].asarray()
self.assertEqual(outputs["sink"].height, 32)
self.assertEqual(a.size, 32)
def test_concat_batch_width(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
dnn = neoml.Dnn.Dnn(math_engine)
source1 = neoml.Dnn.Source(dnn, "source1")
source2 = neoml.Dnn.Source(dnn, "source2")
concat = neoml.Dnn.ConcatBatchWidth((source1, source2), "concat")
sink = neoml.Dnn.Sink(concat, "sink")
layer = dnn.layers['concat']
self.assertEqual(layer.name, 'concat')
input1 = neoml.Blob.asblob(math_engine, np.ones((16), dtype=np.float32), (1, 16, 1, 1, 1, 1, 1))
input2 = neoml.Blob.asblob(math_engine, np.ones((16), dtype=np.float32), (1, 16, 1, 1, 1, 1, 1))
inputs = {"source1": input1, "source2": input2}
outputs = dnn.run(inputs)
a = outputs["sink"].asarray()
self.assertEqual(outputs["sink"].batch_width, 32)
self.assertEqual(a.size, 32)
def test_concat_batch_length(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
dnn = neoml.Dnn.Dnn(math_engine)
source1 = neoml.Dnn.Source(dnn, "source1")
source2 = neoml.Dnn.Source(dnn, "source2")
concat = neoml.Dnn.ConcatBatchLength((source1, source2), "concat")
sink = neoml.Dnn.Sink(concat, "sink")
layer = dnn.layers['concat']
self.assertEqual(layer.name, 'concat')
input1 = neoml.Blob.asblob(math_engine, np.ones((16), dtype=np.float32), (16, 1, 1, 1, 1, 1, 1))
input2 = neoml.Blob.asblob(math_engine, np.ones((15), dtype=np.float32), (15, 1, 1, 1, 1, 1, 1))
inputs = {"source1": input1, "source2": input2}
outputs = dnn.run(inputs)
a = outputs["sink"].asarray()
self.assertEqual(outputs["sink"].batch_len, 31)
self.assertEqual(a.size, 31)
def test_concat_list_size(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
dnn = neoml.Dnn.Dnn(math_engine)
source1 = neoml.Dnn.Source(dnn, "source1")
source2 = neoml.Dnn.Source(dnn, "source2")
concat = neoml.Dnn.ConcatListSize((source1, source2), "concat")
sink = neoml.Dnn.Sink(concat, "sink")
layer = dnn.layers['concat']
self.assertEqual(layer.name, 'concat')
input1 = neoml.Blob.asblob(math_engine, np.ones((15), dtype=np.float32), (1, 1, 15, 1, 1, 1, 1))
input2 = neoml.Blob.asblob(math_engine, np.ones((16), dtype=np.float32), (1, 1, 16, 1, 1, 1, 1))
inputs = {"source1": input1, "source2": input2}
outputs = dnn.run(inputs)
a = outputs["sink"].asarray()
self.assertEqual(outputs["sink"].list_size, 31)
self.assertEqual(a.size, 31)
def test_concat_object(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
dnn = neoml.Dnn.Dnn(math_engine)
source1 = neoml.Dnn.Source(dnn, "source1")
source2 = neoml.Dnn.Source(dnn, "source2")
concat = neoml.Dnn.ConcatObject((source1, source2), "concat")
sink = neoml.Dnn.Sink(concat, "sink")
layer = dnn.layers['concat']
self.assertEqual(layer.name, 'concat')
input1 = neoml.Blob.asblob(math_engine, np.ones((2, 3, 4, 5), dtype=np.float32), (1, 1, 1, 2, 3, 4, 5))
input2 = neoml.Blob.asblob(math_engine, np.ones((16), dtype=np.float32), (1, 1, 1, 16, 1, 1, 1))
inputs = {"source1": input1, "source2": input2}
outputs = dnn.run(inputs)
a = outputs["sink"].asarray()
self.assertEqual(outputs["sink"].channels, 136)
self.assertEqual(a.size, 136)
def test_enum_binarization(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
dnn = neoml.Dnn.Dnn(math_engine)
source1 = neoml.Dnn.Source(dnn, "source1")
bin = neoml.Dnn.EnumBinarization(source1, 5, "bin")
sink = neoml.Dnn.Sink(bin, "sink")
layer = dnn.layers['bin']
self.assertEqual(layer.name, 'bin')
self.assertEqual(bin.enum_size, 5)
bin.enum_size = 4
self.assertEqual(bin.enum_size, 4)
self.assertEqual(layer.enum_size, 4)
input1 = neoml.Blob.asblob(math_engine, np.ones((4, 3, 3, 3), dtype=np.float32), (4, 1, 1, 3, 3, 3, 1))
inputs = {"source1": input1}
outputs = dnn.run(inputs)
a = outputs["sink"]
self.assertEqual(a.shape, (4, 1, 1, 3, 3, 3, 4))
def test_bitset_vectorization(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
dnn = neoml.Dnn.Dnn(math_engine)
source1 = neoml.Dnn.Source(dnn, "source1")
bin = neoml.Dnn.BitSetVectorization(source1, 5, "bin")
sink = neoml.Dnn.Sink(bin, "sink")
layer = dnn.layers['bin']
self.assertEqual(layer.name, 'bin')
self.assertEqual(bin.bit_set_size, 5)
bin.bit_set_size = 4
self.assertEqual(bin.bit_set_size, 4)
self.assertEqual(layer.bit_set_size, 4)
input1 = neoml.Blob.asblob(math_engine, np.ones((4, 3, 3, 3), dtype=np.int32), (4, 1, 1, 3, 3, 3, 1))
inputs = {"source1": input1}
outputs = dnn.run(inputs)
a = outputs["sink"]
self.assertEqual(a.shape, (4, 1, 1, 3, 3, 3, 4))
def test_dotproduct(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
dnn = neoml.Dnn.Dnn(math_engine)
source1 = neoml.Dnn.Source(dnn, "source1")
source2 = neoml.Dnn.Source(dnn, "source2")
dotProduct = neoml.Dnn.DotProduct((source1, source2), "dotProduct")
sink = neoml.Dnn.Sink(dotProduct, "sink")
layer = dnn.layers['dotProduct']
self.assertEqual(layer.name, 'dotProduct')
input1 = neoml.Blob.asblob(math_engine, np.ones((16), dtype=np.float32), (1, 1, 1, 1, 1, 1, 16))
input2 = neoml.Blob.asblob(math_engine, np.ones((16), dtype=np.float32), (1, 1, 1, 1, 1, 1, 16))
inputs = {"source1": input1, "source2": input2}
outputs = dnn.run(inputs)
a = outputs["sink"].asarray()
self.assertEqual(a.size, 1)
self.assertEqual(a[0], 16)
def test_dropout(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
dnn = neoml.Dnn.Dnn(math_engine)
source = neoml.Dnn.Source(dnn, "source")
dropout = neoml.Dnn.Dropout(source, 0.5, True, True, "dropout")
sink = neoml.Dnn.Sink(dropout, "sink")
layer = dnn.layers['dropout']
self.assertEqual(layer.name, 'dropout')
input = neoml.Blob.asblob(math_engine, np.ones((2, 3, 5, 4), dtype=np.float32), (2, 3, 1, 5, 1, 1, 4))
inputs = {"source": input}
outputs = dnn.run(inputs)
a = outputs["sink"].asarray()
self.assertEqual(a.shape, input.asarray().shape)
self.assertEqual(dropout.rate, 0.5)
self.assertEqual(dropout.spatial, True)
self.assertEqual(dropout.batchwise, True)
self.assertEqual(layer.rate, 0.5)
def test_accumulative_lookup(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
dnn = neoml.Dnn.Dnn(math_engine)
source = neoml.Dnn.Source(dnn, "source")
lookup = neoml.Dnn.AccumulativeLookup(source, 5, 6, "lookup")
sink = neoml.Dnn.Sink(lookup, "sink")
layer = dnn.layers['lookup']
self.assertEqual(layer.name, 'lookup')
self.assertEqual(lookup.size, 6)
self.assertEqual(lookup.count, 5)
input = neoml.Blob.asblob(math_engine, np.ones((2, 5, 3), dtype=np.int32), (2, 1, 1, 5, 1, 1, 3))
inputs = {"source": input}
outputs = dnn.run(inputs)
out = outputs["sink"].asarray()
self.assertEqual(out.shape, (2, 6))
def test_multichannel_lookup(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
dnn = neoml.Dnn.Dnn(math_engine)
source = neoml.Dnn.Source(dnn, "source")
lookup = neoml.Dnn.MultichannelLookup((source,), [(1, 4)], "lookup")
sink = neoml.Dnn.Sink(lookup, "sink")
layer = dnn.layers['lookup']
self.assertEqual(layer.name, 'lookup')
self.assertEqual(lookup.dimensions, [(1, 4)])
lookup.dimensions = [(3, 5)]
self.assertEqual(layer.dimensions, [(3, 5)])
input = neoml.Blob.asblob(math_engine, np.ones((2, 5, 3), dtype=np.float32), (2, 1, 1, 5, 1, 1, 3))
inputs = {"source": input}
outputs = dnn.run(inputs)
out = outputs["sink"].asarray()
self.assertEqual(out.shape, (2, 5, 7))
blob = lookup.get_embeddings(0)
lookup.set_embeddings(0, blob)
uniform = neoml.Dnn.Uniform()
lookup.initialize(uniform)
def test_tied_embeddings(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
dnn = neoml.Dnn.Dnn(math_engine)
source = neoml.Dnn.Source(dnn, "source")
tied = neoml.Dnn.TiedEmbeddings((source,), "embeddings", 0, "tied")
sink = neoml.Dnn.Sink(tied, "sink")
layer = dnn.layers['tied']
self.assertEqual(layer.name, 'tied')
self.assertEqual(tied.channel, 0)
tied.channel = 1
self.assertEqual(tied.channel, 1)
self.assertEqual(layer.channel, 1)
self.assertEqual(tied.embeddings_layer_name, "embeddings")
tied.embeddings_layer_name = "embeddings2"
self.assertEqual(tied.embeddings_layer_name, "embeddings2")
def test_accuracy(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
dnn = neoml.Dnn.Dnn(math_engine)
source1 = neoml.Dnn.Source(dnn, "source1")
source2 = neoml.Dnn.Source(dnn, "source2")
accuracy = neoml.Dnn.Accuracy((source1, source2), True, "accuracy")
sink = neoml.Dnn.Sink(accuracy, "sink")
layer = dnn.layers['accuracy']
self.assertEqual(layer.name, 'accuracy')
self.assertEqual(accuracy.reset, True)
self.assertEqual(layer.reset, True)
input1 = neoml.Blob.asblob(math_engine, np.ones((1, 16), dtype=np.float32), (1, 16, 1, 1, 1, 1, 1))
input2 = neoml.Blob.asblob(math_engine, np.ones((1, 16), dtype=np.float32), (1, 16, 1, 1, 1, 1, 1))
inputs = {"source1": input1, "source2": input2}
outputs = dnn.run(inputs)
a = outputs["sink"].asarray()
self.assertEqual(a.size, 1)
self.assertAlmostEqual(a[0], 1.0, delta=1e-3)
def test_confusion_matrix(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
dnn = neoml.Dnn.Dnn(math_engine)
source1 = neoml.Dnn.Source(dnn, "source1")
source2 = neoml.Dnn.Source(dnn, "source2")
accuracy = neoml.Dnn.ConfusionMatrix((source1, source2), True, "accuracy")
sink = neoml.Dnn.Sink(accuracy, "sink")
layer = dnn.layers['accuracy']
self.assertEqual(layer.name, 'accuracy')
self.assertEqual(accuracy.reset, True)
self.assertEqual(layer.reset, True)
input1 = neoml.Blob.asblob(math_engine, np.ones((16, 2), dtype=np.float32), (1, 16, 1, 1, 1, 1, 2))
input2 = neoml.Blob.asblob(math_engine, np.ones((16, 2), dtype=np.float32), (1, 16, 1, 1, 1, 1, 2))
inputs = {"source1": input1, "source2": input2}
outputs = dnn.run(inputs)
a = outputs["sink"].asarray()
self.assertEqual( accuracy.matrix.shape, (2, 2) )
self.assertEqual(a.size, 4)
self.assertAlmostEqual(a[0][0], 16.0, delta=1e-3)
def _test_activation(self, layer, kwargs={}):
math_engine = neoml.MathEngine.CpuMathEngine(1)
dnn = neoml.Dnn.Dnn(math_engine)
source = neoml.Dnn.Source(dnn, "source")
activation = getattr(neoml.Dnn, layer)(source, name="activation", **kwargs)
sink = neoml.Dnn.Sink(activation, "sink")
layer = dnn.layers['activation']
self.assertEqual(layer.name, 'activation')
input = neoml.Blob.asblob(math_engine, np.ones((1, 16), dtype=np.float32), (1, 16, 1, 1, 1, 1, 1))
inputs = {"source": input}
outputs = dnn.run(inputs)
out = outputs["sink"].asarray()
for k,v in kwargs.items():
self.assertAlmostEqual(getattr(activation, k), v, delta=1e-3,
msg='Field {} of {} activation differs'.format(k, layer))
self.assertEqual(getattr(activation, k), getattr(layer, k))
return out
def test_activation_linear(self):
out = self._test_activation('Linear', dict(multiplier=3.3, free_term=4.4))
self.assertTrue(np.isclose(out, 7.7).all())
def test_activation_elu(self):
out = self._test_activation('ELU', dict(alpha=3.3))
self.assertTrue(np.isclose(out, 1).all())
def test_activation_relu(self):
out = self._test_activation('ReLU', dict(threshold=3.3))
self.assertTrue(np.isclose(out, 1).all())
def test_activation_leaky_relu(self):
out = self._test_activation('LeakyReLU', dict(alpha=3.3))
self.assertTrue(np.isclose(out, 1).all())
def test_activation_hswish(self):
out = self._test_activation('HSwish')
self.assertTrue(np.isclose(out, 2./3).all())
def test_activation_gelu(self):
out = self._test_activation('GELU')
self.assertTrue(np.isclose(out, 0.84579575).all())
def test_activation_abs(self):
out = self._test_activation('Abs')
self.assertTrue(np.isclose(out, 1).all())
def test_activation_sigmoid(self):
out = self._test_activation('Sigmoid')
self.assertTrue(np.isclose(out, 0.7310586).all())
def test_activation_tanh(self):
out = self._test_activation('Tanh')
self.assertTrue(np.isclose(out, 0.7615942).all())
def test_activation_hardtanh(self):
out = self._test_activation('HardTanh')
self.assertTrue(np.isclose(out, 1).all())
def test_activation_hardsigmoid(self):
out = self._test_activation('HardSigmoid', dict(slope=5.5, bias=6.6))
self.assertTrue(np.isclose(out, 1).all())
def test_activation_power(self):
out = self._test_activation('Power', dict(exponent=5.5))
self.assertTrue(np.isclose(out, 1).all())
def test_add_object(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
dnn = neoml.Dnn.Dnn(math_engine)
source1 = neoml.Dnn.Source(dnn, "source1")
source2 = neoml.Dnn.Source(dnn, "source2")
add_to_object = neoml.Dnn.AddToObject((source1, source2), "add_to_object")
sink = neoml.Dnn.Sink(add_to_object, "sink")
layer = dnn.layers['add_to_object']
self.assertEqual(layer.name, 'add_to_object')
input1 = neoml.Blob.asblob(math_engine, np.ones((8, 4, 4), dtype=np.float32), (1, 8, 1, 4, 4, 1, 1))
input2 = neoml.Blob.asblob(math_engine, np.ones((4, 4), dtype=np.float32), (1, 1, 1, 4, 4, 1, 1))
inputs = {"source1": input1, "source2": input2}
outputs = dnn.run(inputs)
a = outputs["sink"].asarray()
self.assertEqual(a.size, 128)
self.assertAlmostEqual(a[1][1][1], 2.0, delta=1e-3)
def test_argmax(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
dnn = neoml.Dnn.Dnn(math_engine)
source = neoml.Dnn.Source(dnn, "source")
argmax = neoml.Dnn.Argmax(source, dimension="channels", name="argmax")
sink = neoml.Dnn.Sink(argmax, "sink")
layer = dnn.layers['argmax']
self.assertEqual(layer.name, 'argmax')
self.assertEqual(argmax.dimension, "channels")
argmax.dimension = "batch_length"
self.assertEqual(argmax.dimension, "batch_length")
input = neoml.Blob.asblob(math_engine, np.array([1, 2, 3, 1], dtype=np.float32), (4, 1, 1, 1, 1, 1, 1))
inputs = {"source": input}
outputs = dnn.run(inputs)
out = outputs["sink"].asarray()
self.assertEqual(out, 2)
def test_attention_decoder(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
dnn = neoml.Dnn.Dnn(math_engine)
source1 = neoml.Dnn.Source(dnn, "source1")
source2 = neoml.Dnn.Source(dnn, "source2")
decoder = neoml.Dnn.AttentionDecoder((source1, source2), "additive", 16, 32, 64, "decoder")
sink = neoml.Dnn.Sink(decoder, "sink")
layer = dnn.layers['decoder']
self.assertEqual(layer.name, 'decoder')
self.assertEqual(decoder.hidden_layer_size, 16)
self.assertEqual(decoder.output_object_size, 32)
self.assertEqual(decoder.output_seq_len, 64)
self.assertEqual(decoder.score, "additive")
decoder.score = "dot_product"
self.assertEqual(decoder.score, "dot_product")
decoder.score = "additive"
decoder.hidden_layer_size = 1
self.assertEqual(decoder.hidden_layer_size, 1)
self.assertEqual(layer.hidden_layer_size, 1)
decoder.output_object_size = 1
self.assertEqual(decoder.output_object_size, 1)
self.assertEqual(layer.output_object_size, 1)
decoder.output_seq_len = 1
self.assertEqual(decoder.output_seq_len, 1)
self.assertEqual(layer.output_seq_len, 1)
input1 = neoml.Blob.asblob(math_engine, np.ones(1, dtype=np.float32), (1, 1, 1, 1, 1, 1, 1))
input2 = neoml.Blob.asblob(math_engine, np.ones(1, dtype=np.float32), (1, 1, 1, 1, 1, 1, 1))
inputs = {"source1": input1, "source2": input2}
outputs = dnn.run(inputs)
out = outputs["sink"].asarray()
self.assertSequenceEqual(out, [1])
def test_batch_norm(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
dnn = neoml.Dnn.Dnn(math_engine)
source = neoml.Dnn.Source(dnn, "source")
batch_norm = neoml.Dnn.BatchNormalization(source, True, True, 0.3, "batch_norm")
sink = neoml.Dnn.Sink(batch_norm, "sink")
layer = dnn.layers['batch_norm']
self.assertEqual(layer.name, 'batch_norm')
arr = np.ones((5, 3, 2), dtype=np.float32)
input = neoml.Blob.asblob(math_engine, arr, (5, 1, 3, 2, 1, 1, 1))
inputs = {"source": input}
outputs = dnn.run(inputs)
out = outputs["sink"].asarray()
self.assertTrue(np.array_equal(arr, out))
self.assertEqual(batch_norm.channel_based, True)
self.assertEqual(batch_norm.zero_free_term, True)
self.assertAlmostEqual(batch_norm.slow_convergence_rate, 0.3, delta=1e-3)
self.assertEqual(layer.channel_based, True)
self.assertEqual(layer.zero_free_term, True)
self.assertAlmostEqual(layer.slow_convergence_rate, 0.3, delta=1e-3)
def test_matrix_multiplication(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
dnn = neoml.Dnn.Dnn(math_engine)
source1 = neoml.Dnn.Source(dnn, "source1")
source2 = neoml.Dnn.Source(dnn, "source2")
mult = neoml.Dnn.MatrixMultiplication((source1, source2), "mm")
sink = neoml.Dnn.Sink(mult, "sink")
layer = dnn.layers['mm']
self.assertEqual(layer.name, 'mm')
mult1 = np.array([[1, 2], [3, 4]], dtype=np.float32)
mult2 = np.array([[1, 2], [3, 4]], dtype=np.float32)
input1 = neoml.Blob.asblob(math_engine, mult1, (2, 1, 2, 1, 1, 1, 1))
input2 = neoml.Blob.asblob(math_engine, mult2, (2, 1, 2, 1, 1, 1, 1))
inputs = {"source1": input1, "source2": input2}
outputs = dnn.run(inputs)
out = outputs["sink"].asarray()
self.assertTrue(np.array_equal(out, mult1 * mult2))
def test_multihead_attention(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
dnn = neoml.Dnn.Dnn(math_engine)
source1 = neoml.Dnn.Source(dnn, "source1")
source2 = neoml.Dnn.Source(dnn, "source2")
source3 = neoml.Dnn.Source(dnn, "source3")
att = neoml.Dnn.MultiheadAttention((source1, source2, source3), 5, 9, 3, 0.3, "att")
sink = neoml.Dnn.Sink(att, "sink")
layer = dnn.layers['att']
self.assertEqual(layer.name, 'att')
self.assertEqual(att.head_count, 5)
att.head_count = 4
self.assertEqual(att.head_count, 4)
self.assertEqual(att.hidden_size, 9)
att.hidden_size = 8
self.assertEqual(att.hidden_size, 8)
self.assertEqual(att.output_size, 3)
att.output_size = 8
self.assertEqual(att.output_size, 8)
self.assertEqual(att.use_mask, False)
att.use_mask = True
self.assertEqual(att.use_mask, True)
att.use_mask = False
self.assertAlmostEqual(att.dropout_rate, 0.3, delta=1e-3)
att.dropout_rate = 0.4
self.assertAlmostEqual(att.dropout_rate, 0.4, delta=1e-3)
self.assertEqual(layer.hidden_size, 8)
self.assertEqual(layer.output_size, 8)
self.assertEqual(layer.use_mask, False)
self.assertAlmostEqual(layer.dropout_rate, 0.4, delta=1e-3)
input1 = neoml.Blob.asblob(math_engine, np.ones((4, 3, 3), dtype=np.float32), (1, 4, 3, 1, 1, 1, 3))
input2 = neoml.Blob.asblob(math_engine, np.ones((4, 2, 3), dtype=np.float32), (1, 4, 2, 1, 1, 1, 3))
input3 = neoml.Blob.asblob(math_engine, np.ones((4, 2, 2), dtype=np.float32), (1, 4, 2, 1, 1, 1, 4))
inputs = {"source1": input1, "source2": input2, "source3": input3}
outputs = dnn.run(inputs)
out = outputs["sink"].asarray()
self.assertEqual(out.shape, (4, 3, 8))
def test_image_to_pixel(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
dnn = neoml.Dnn.Dnn(math_engine)
source1 = neoml.Dnn.Source(dnn, "source1")
source2 = neoml.Dnn.Source(dnn, "source2")
conv = neoml.Dnn.ImageToPixel((source1, source2), "conv")
sink = neoml.Dnn.Sink(conv, "sink")
layer = dnn.layers['conv']
self.assertEqual(layer.name, 'conv')
input1 = neoml.Blob.asblob(math_engine, np.ones((4, 3, 2), dtype=np.float32), (1, 4, 3, 1, 1, 1, 2))
input2 = neoml.Blob.asblob(math_engine, np.zeros((4, 3), dtype=np.int32), (1, 4, 1, 1, 1, 1, 3))
inputs = {"source1": input1, "source2": input2}
outputs = dnn.run(inputs)
out = outputs["sink"].asarray()
self.assertEqual(out.shape, (4, 3, 2))
def test_pixel_to_image(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
dnn = neoml.Dnn.Dnn(math_engine)
source1 = neoml.Dnn.Source(dnn, "source1")
source2 = neoml.Dnn.Source(dnn, "source2")
conv = neoml.Dnn.PixelToImage((source1, source2), 4, 8, "conv")
sink = neoml.Dnn.Sink(conv, "sink")
layer = dnn.layers['conv']
self.assertEqual(layer.name, 'conv')
self.assertEqual(conv.height, 4)
conv.height = 5
self.assertEqual(conv.height, 5)
self.assertEqual(conv.width, 8)
conv.width = 9
self.assertEqual(conv.width, 9)
self.assertEqual(layer.height, 5)
self.assertEqual(layer.width, 9)
input1 = neoml.Blob.asblob(math_engine, np.ones((4, 3, 2), dtype=np.float32), (1, 4, 3, 1, 1, 1, 2))
input2 = neoml.Blob.asblob(math_engine, np.zeros((4, 3), dtype=np.int32), (1, 4, 1, 1, 1, 1, 3))
inputs = {"source1": input1, "source2": input2}
outputs = dnn.run(inputs)
out = outputs["sink"].asarray()
self.assertEqual(out.shape, (4, 5, 9, 2))
def test_image_resize(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
dnn = neoml.Dnn.Dnn(math_engine)
source1 = neoml.Dnn.Source(dnn, "source1")
conv = neoml.Dnn.ImageResize(source1, [5, 6, 7, 8], 0.1, "conv")
sink = neoml.Dnn.Sink(conv, "sink")
layer = dnn.layers['conv']
self.assertEqual(layer.name, 'conv')
self.assertEqual(conv.deltas, [5, 6, 7, 8] )
conv.deltas = [1, 2, 3, 4]
self.assertEqual(conv.deltas, [1, 2, 3, 4])
self.assertAlmostEqual(conv.default_value, 0.1)
conv.default_value = 0.2
self.assertAlmostEqual(conv.default_value, 0.2)
self.assertEqual(layer.deltas, [1, 2, 3, 4])
self.assertAlmostEqual(layer.default_value, 0.2)
input1 = neoml.Blob.asblob(math_engine, np.ones((2, 10, 11, 2), dtype=np.float32), (1, 1, 2, 10, 11, 1, 2))
inputs = {"source1": input1}
outputs = dnn.run(inputs)
out = outputs["sink"].asarray()
self.assertEqual(out.shape, (2, 17, 14, 2))
def test_crf(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
dnn = neoml.Dnn.Dnn(math_engine)
source1 = neoml.Dnn.Source(dnn, "source1")
source2 = neoml.Dnn.Source(dnn, "source2")
crf = neoml.Dnn.Crf((source1, source2), 5, 3, 0.3, "crf")
sink1 = neoml.Dnn.Sink((crf, 0), "sink1")
sink2 = neoml.Dnn.Sink((crf, 1), "sink2")
sink3 = neoml.Dnn.Sink((crf, 2), "sink3")
layer = dnn.layers['crf']
self.assertEqual(layer.name, 'crf')
self.assertEqual(crf.class_count, 5)
crf.class_count = 7
self.assertEqual(crf.class_count, 7)
self.assertEqual(crf.padding, 3)
crf.padding = 1
self.assertEqual(crf.padding, 1)
self.assertAlmostEqual(crf.dropout_rate, 0.3)
crf.dropout_rate = 0.2
self.assertAlmostEqual(crf.dropout_rate, 0.2)
self.assertEqual(crf.calc_best_prev_class, False)
crf.calc_best_prev_class = True
self.assertEqual(crf.calc_best_prev_class, True)
self.assertEqual(layer.class_count, 7)
self.assertEqual(layer.padding, 1)
self.assertAlmostEqual(layer.dropout_rate, 0.2)
self.assertEqual(layer.calc_best_prev_class, True)
hidden_weights = crf.hidden_weights
crf.hidden_weights = hidden_weights
free_terms = crf.free_terms
crf.free_terms = free_terms
transitions = crf.transitions
crf.transitions = transitions
input1 = neoml.Blob.asblob(math_engine, np.ones((5, 7), dtype=np.float32), (1, 1, 5, 1, 1, 1, 7))
input2 = neoml.Blob.asblob(math_engine, np.ones((5, ), dtype=np.int32), (1, 1, 5, 1, 1, 1, 1))
inputs = {"source1": input1, "source2": input2}
outputs = dnn.run(inputs)
out1 = outputs["sink1"].asarray()
out2 = outputs["sink2"].asarray()
out3 = outputs["sink3"].asarray()
self.assertEqual(out1.shape, (7,))
self.assertEqual(out2.shape, (7,))
self.assertEqual(out3.shape, (1,))
def test_crf_loss(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
dnn = neoml.Dnn.Dnn(math_engine)
source1 = neoml.Dnn.Source(dnn, "source1")
source2 = neoml.Dnn.Source(dnn, "source2")
source3 = neoml.Dnn.Source(dnn, "source3")
crfLoss = neoml.Dnn.CrfLoss((source1, source2, source3), 0.4, "loss")
layer = dnn.layers['loss']
self.assertEqual(layer.name, 'loss')
self.assertAlmostEqual(crfLoss.loss_weight, 0.4, delta=1e-3)
crfLoss.loss_weight = 0.6
self.assertAlmostEqual(crfLoss.loss_weight, 0.6, delta=1e-3)
self.assertAlmostEqual(layer.loss_weight, 0.6, delta=1e-3)
crfLoss.max_gradient = 0.6
self.assertAlmostEqual(crfLoss.max_gradient, 0.6, delta=1e-3)
self.assertAlmostEqual(layer.max_gradient, 0.6, delta=1e-3)
self.assertAlmostEqual(crfLoss.last_loss, 0, delta=1e-3)
input1 = neoml.Blob.asblob(math_engine, np.ones((3, 5), dtype=np.int32), (3, 1, 5, 1, 1, 1, 1))
input2 = neoml.Blob.asblob(math_engine, np.ones((3, 5), dtype=np.float32), (3, 1, 5, 1, 1, 1, 1))
input3 = neoml.Blob.asblob(math_engine, np.ones((3, 5), dtype=np.float32), (3, 1, 5, 1, 1, 1, 1))
inputs = {"source1": input1, "source2": input2, "source3": input3}
dnn.run(inputs)
self.assertAlmostEqual(crfLoss.last_loss, -2, delta=1e-3)
def test_crf_best_sequence(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
dnn = neoml.Dnn.Dnn(math_engine)
source1 = neoml.Dnn.Source(dnn, "source1")
source2 = neoml.Dnn.Source(dnn, "source2")
best = neoml.Dnn.BestSequence((source1, source2), "best")
sink = neoml.Dnn.Sink(best, "sink")
layer = dnn.layers['best']
self.assertEqual(layer.name, 'best')
input1 = neoml.Blob.asblob(math_engine, np.zeros((3, 5), dtype=np.int32), (3, 1, 5, 1, 1, 1, 1))
input2 = neoml.Blob.asblob(math_engine, np.ones((3, 5), dtype=np.float32), (3, 1, 5, 1, 1, 1, 1))
inputs = {"source1": input1, "source2": input2}
outputs = dnn.run(inputs)
out = outputs["sink"].asarray()
self.assertTrue(np.equal(out, [0., 0., 0.]).all())
def test_ctc_loss(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
dnn = neoml.Dnn.Dnn(math_engine)
source1 = neoml.Dnn.Source(dnn, "source1")
source2 = neoml.Dnn.Source(dnn, "source2")
ctcLoss = neoml.Dnn.CtcLoss((source1, source2), 6, False, 0.4, "loss")
layer = dnn.layers['loss']
self.assertEqual(layer.name, 'loss')
self.assertEqual(ctcLoss.blank, 6)
ctcLoss.blank = 5
self.assertEqual(ctcLoss.blank, 5)
self.assertEqual(layer.blank, 5)
self.assertAlmostEqual(ctcLoss.loss_weight, 0.4, delta=1e-3)
ctcLoss.loss_weight = 0.6
self.assertAlmostEqual(ctcLoss.loss_weight, 0.6, delta=1e-3)
self.assertAlmostEqual(layer.loss_weight, 0.6, delta=1e-3)
ctcLoss.max_gradient = 0.6
self.assertAlmostEqual(ctcLoss.max_gradient, 0.6, delta=1e-3)
self.assertAlmostEqual(ctcLoss.last_loss, 0, delta=1e-3)
self.assertEqual(ctcLoss.skip, False)
ctcLoss.skip = True
self.assertEqual(ctcLoss.skip, True)
input1 = neoml.Blob.asblob(math_engine, np.ones((64, 4, 5), dtype=np.float32), (3, 4, 1, 1, 1, 1, 5))
input2 = neoml.Blob.asblob(math_engine, np.ones((2, 4), dtype=np.int32), (2, 4, 1, 1, 1, 1, 1))
inputs = {"source1": input1, "source2": input2}
dnn.run(inputs)
self.assertAlmostEqual(ctcLoss.last_loss, 4.8283, delta=1e-4)
self.assertAlmostEqual(layer.last_loss, 4.8283, delta=1e-4)
def test_ctc_decoding(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
dnn = neoml.Dnn.Dnn(math_engine)
source1 = neoml.Dnn.Source(dnn, "source1")
source2 = neoml.Dnn.Source(dnn, "source2")
ctc = neoml.Dnn.CtcDecoding((source1, source2), 5, 0.4, 0.5, "ctc")
layer = dnn.layers['ctc']
self.assertEqual(layer.name, 'ctc')
self.assertEqual(ctc.blank, 5)
ctc.blank = 6
self.assertEqual(ctc.blank, 6)
self.assertAlmostEqual(ctc.blank_threshold, 0.4, delta=1e-3)
ctc.blank_threshold = 0.6
self.assertAlmostEqual(ctc.blank_threshold, 0.6, delta=1e-3)
self.assertAlmostEqual(ctc.arc_threshold, 0.5, delta=1e-3)
ctc.arc_threshold = 0.7
self.assertAlmostEqual(ctc.arc_threshold, 0.7, delta=1e-3)
self.assertEqual(ctc.sequence_length, 0)
self.assertEqual(ctc.batch_width, 0)
self.assertEqual(ctc.label_count, 0)
self.assertAlmostEqual(layer.blank_threshold, 0.6, delta=1e-3)
self.assertAlmostEqual(layer.arc_threshold, 0.7, delta=1e-3)
ctc.get_best_sequence(0)
input1 = neoml.Blob.asblob(math_engine, np.ones((3, 4, 5), dtype=np.float32), (3, 4, 1, 1, 1, 1, 5))
input2 = neoml.Blob.asblob(math_engine, np.ones((4, ), dtype=np.int32), (1, 4, 1, 1, 1, 1, 1))
inputs = {"source1": input1, "source2": input2}
dnn.run(inputs)
def test_gru(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
dnn = neoml.Dnn.Dnn(math_engine)
source1 = neoml.Dnn.Source(dnn, "source1")
gru = neoml.Dnn.Gru((source1,), 5, "gru")
sink = neoml.Dnn.Sink(gru, "sink")
layer = dnn.layers['gru']
self.assertEqual(layer.name, 'gru')
self.assertEqual(gru.hidden_size, 5)
gru.hidden_size = 6
self.assertEqual(gru.hidden_size, 6)
self.assertEqual(layer.hidden_size, 6)
main_weights = gru.main_weights
gru.main_weights = main_weights
main_free_term = gru.main_free_term
gru.main_free_term = main_free_term
gate_weights = gru.gate_weights
gru.gate_weights = gate_weights
gate_free_term = gru.gate_free_term
gru.gate_free_term = gate_free_term
input1 = neoml.Blob.asblob(math_engine,
|
np.ones((3, 2, 3), dtype=np.float32)
|
numpy.ones
|
from __future__ import print_function
import time
import numpy as np
import scipy as sc
import scipy.sparse as sp
import torch
import os
import re
import networkx as nx
import torch.utils.data as Data
import torch.optim.lr_scheduler
from torch.utils.data.dataset import Dataset
from torch.autograd import Variable
import sys
import csv
import gzip
import logging
import json
from sklearn.model_selection import KFold, StratifiedKFold, train_test_split
from sklearn.utils import shuffle
SEQ_MAX_LEN = 210 # the length of protein sequence
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
##### JSON modules #####
def save_json(data,filename):
with open(filename, 'w') as fp:
json.dump(data, fp, sort_keys=True, indent=4)
def load_json(filename):
with open(filename, 'r') as fp:
data = json.load(fp)
return data
##### JSON modules #####
def load_ikey2smiles():
file_path='data/Integrated/chemicals'
os.path.exists(file_path)
ikey2smiles={}
with open(os.path.join(file_path,'integrated_chemicals.tsv'),'r') as fin:
for line in fin:
line=line.strip().split('\t')
ikey=line[1]
smi=line[2]
ikey2smiles[ikey]=smi
return ikey2smiles
def padding(batch, max_len, pad):
padded = []
lengths = []
for seq in batch:
seq = seq[:min(len(seq), max_len)]
lengths.append(len(seq))
seq = seq + [pad] * (max_len - len(seq))
padded.append(seq)
return padded, lengths
def get_lstm_embedding(batch_repr,max_len=SEQ_MAX_LEN,pad=0):
batch_repr, lengths = padding(batch_repr, max_len, pad)
with torch.no_grad():
batch_repr = Variable(torch.LongTensor(batch_repr))
logging.debug("utils.get_lstm_embedding: batch_repr {}".format(batch_repr.size()))
return batch_repr
def load_mtl_edges_from_file(edgefile,allowed_uniprots=None,sep=',',header=True):
#default data format:
#InChIKey,UniProt,Binary,pKi,pKd,pIC50
#MAEHEIXUINDDHE-UHFFFAOYSA-N,P48736,1,6.130182,6.130182,6.130182
#missing entries (presented as 'nan') are converted to -1
edges={};ikeys=[];uniprots=[]
count_skipped=0
count_loaded=0
with open(edgefile,'r') as f:
if header:
next(f)
for line in f:
line=line.strip().split(sep)
ikey=line[0]
uni=line[1]
if allowed_uniprots and (uni not in allowed_uniprots):
count_skipped+=1
continue
ikeys.append(ikey)
uniprots.append(uni)
try:
b=np.float(line[2])
except:
b=-1
try:
ki=np.float(line[3])
except:
ki=-1
try:
kd=np.float(line[4])
except:
kd=-1
try:
ic=np.float(line[5])
except:
ic=-1
b=-1 if np.isnan(b) else b
ki=-1 if np.isnan(ki) else ki
kd=-1 if np.isnan(kd) else kd
ic=-1 if np.isnan(ic) else ic
val=(b,ki,kd,ic)
edge=ikey+'\t'+uni
edges[edge]=val
count_loaded+=1
logging.info("{} edges loaded. {} edges (not-allowed-uniprots) skipped from {}".format(count_loaded,
count_skipped,
edgefile))
ikeys=list(set(ikeys));uniprots=list(set(uniprots))
return edges,ikeys,uniprots
def load_edges_from_file(edgefile,sep=',',header=True):
#default data format:
#InChIKey,UniProt,activity (sep=',')
#MAEHEIXUINDDHE-UHFFFAOYSA-N,P48736,6.130
edges={};ikeys=[];uniprots=[]
count_skipped=0
count_loaded=0
with open(edgefile,'r') as f:
if header:
next(f)
for line in f:
line=line.strip().split(sep)
ikey=line[0]
uni=line[1]
# if allowed_uniprots and (uni not in allowed_uniprots):
# count_skipped+=1
# continue
ikeys.append(ikey)
uniprots.append(uni)
val=float(line[2])
edge=ikey+'\t'+uni
edges[edge]=val
count_loaded+=1
logging.info("{} edges loaded. {} edges (not-allowed-uniprots) skipped from {}".format(count_loaded,
count_skipped,
edgefile))
ikeys=list(set(ikeys));uniprots=list(set(uniprots))
return edges,ikeys,uniprots
def load_dict(path):
""" Load a dictionary and a corresponding reverse dictionary from the given file
where line number (0-indexed) is key and line string is value. """
retdict = list()
rev_retdict = dict()
with open(path) as fin:
for idx, line in enumerate(fin):
text = line.strip()
retdict.append(text)
rev_retdict[text] = idx
return retdict, rev_retdict
def load_repr(path, config, node_list):
""" Load the representations of each node in the `node_list` given
the representation type and configurations.
Args:
path: Path of the graph data directory
config: Node configuration JSON object
node_list: The list of nodes for which to load representations
Returns:
repr_info: A dictionary that contains representation information
node_list: List of nodes with loaded representations, the change
is in-place though.
"""
repr_type = config['representation']
if repr_type == TYPE_MOLECULE:
return load_molecule_repr(path, config, node_list)
elif repr_type == TYPE_SEQUENCE_PSSM:
return load_pssm_repr(path, config, node_list)
else:
raise ValueError("{0} Node type not supported!".format(repr_type))
def load_molecule_repr(path, config, node_list):
import deepnet.fingerprint.features as fp_feature
graph_vocab_path = os.path.join(path, config['graph_path'])
graph_list, _ = load_dict(graph_vocab_path)
for node, graph in zip(node_list, graph_list):
node.set_data(graph)
info = dict(embedding_type=TYPE_MOLECULE,
atom_size=fp_feature.num_atom_features(),
bond_size=fp_feature.num_bond_features())
return info, node_list
def load_uniprot2pssm(max_len=512,padding=0):
#maximum sequence length: max_len
#pssm padded with zeros if len<max_len
base_path='data/protein/'
pssm_dir=base_path+'kinase_domain_pssm_uniref50/'
#protfile=base_path+'prot_bsite_sample' #padding test
protfile=base_path+'prot_bsite'
uniprot2pssm={}
pssm_files=os.listdir(pssm_dir)
manual_dict={'P52333_JH1domain-catalytic':'P52333_Kin.Dom.2-C-terminal.dat',
'Q9P2K8_Kin.Dom.2,S808G':'Q9P2K8_S808G_Kin.Dom.2-C-terminal.dat',
'P23458' :'P23458_JH2domain-pseudokinase.dat',
'P29597' :'P29597_JH2domain-pseudokinase.dat',
'O75582' :'O75582_Kin.Dom.1-N-terminal.dat',
'Q15418' :'Q15418_Kin.Dom.1-N-terminal.dat',
'Q9P2K8' :'Q9P2K8_Kin.Dom.1-N-terminal.dat',
'Q9UK32' :'Q9UK32_Kin.Dom.2-C-terminal.dat'}
with open(protfile,'r') as f:
for line in f:
uniprot=line.strip()
line=line.strip()
line=line.replace('(','_').replace(')','')
line=line.replace('-nonphosphorylated','').replace('-phosphorylated','').replace('-autoinhibited','')
matchkd=re.search(r'Kin\.Dom',line,re.I)
matchjh=re.search(r'JH\ddomain',line,re.I)
if line in list(manual_dict.keys()):
fname=manual_dict[line]
elif matchkd:
matchkd=re.search(r'Kin\.Dom\.(\d)',line,re.I)
if matchkd is None:
fname=line+'.dat'
elif matchkd.group(1)==str(1):
fname=line+'-N-terminal.dat'
elif matchkd.group(1)==str(2):
fname=line+'-C-terminal.dat'
elif matchjh:
fname=line+'.dat'
else:
fname=line+'.dat'
if fname not in pssm_files:
fname=line.replace('\.dat','')+'_Kin.Dom.dat'
#print("PSSM file {} not found for protein {}".format(fname,line))
pssm=[]
with open(pssm_dir+fname,'r') as f:
for line in f:
line=line.strip().lstrip().split()
if len(line)==0: #empty line
continue
else:
try:
resnum=int(line[0])
except: #non-pssm field
continue
res_vector=np.array(line[2:22],dtype=np.float32)
pssm.append(res_vector)
pssm=
|
np.array(pssm,dtype=np.float32)
|
numpy.array
|
"""Black-box system identification."""
import importlib
import os
import numpy as np
import scipy.io
import sympy
import sym2num.model
import fem
import symfem
# Reload modules for testing
for m in (fem, symfem):
importlib.reload(m)
def save_generated_model(symmodel):
clsname = type(symmodel).__name__
nx = symmodel.nx
nu = symmodel.nu
ny = symmodel.ny
with open(f'{clsname}_nx{nx}_nu{nu}_ny{ny}.py', mode='w') as f:
code = symmodel.print_code()
print(code, file=f)
def get_model(nx, nu, ny):
clsname = 'InnovationBalDTModel'
modname = f'{clsname}_nx{nx}_nu{nu}_ny{ny}'
mod = importlib.import_module(modname)
genclsname = f'Generated{clsname}'
cls = getattr(mod, genclsname)
return cls()
def load_data():
# Retrieve data
u = np.loadtxt('/tmp/u.txt')
y = np.loadtxt('/tmp/y.txt')
return u, y
if __name__ == '__main__':
nx = 5
nu = 3
ny = 3
# Load experiment data
u, y = load_data()
#symmodel = symfem.InnovationBalDTModel(nx=nx, nu=nu, ny=ny)
#model = symmodel.compile_class()()
#save_generated_model(symmodel)
model = get_model(nx, nu, ny)
problem = fem.InnovationBalDTProblem(model, y, u)
N = len(y)
# Define initial guess for decision variables
dec0 = np.zeros(problem.ndec)
var0 = problem.variables(dec0)
var0['A'][:] = np.loadtxt('/tmp/a.txt')
var0['B'][:] = np.loadtxt('/tmp/b.txt')
var0['C'][:] = np.loadtxt('/tmp/c.txt')
var0['D'][:] = np.loadtxt('/tmp/d.txt')
var0['L'][:] = np.loadtxt('/tmp/k.txt')
var0['x'][:] =
|
np.loadtxt('/tmp/xpred.txt')
|
numpy.loadtxt
|
"""Genetic evaluation of individuals."""
import os
import sys
# import time
from collections import Counter
from itertools import compress
from numba import njit
import pkg_resources
import numpy as np
import pandas as pd
import scipy.linalg
import scipy.stats
def example_data():
"""Provide data to the package."""
cwd = os.getcwd()
stream = pkg_resources.resource_stream(__name__, 'data/chr.txt')
chrmosomedata = pd.read_table(stream, sep=" ")
stream = pkg_resources.resource_stream(__name__, 'data/group.txt')
groupdata = pd.read_table(stream, sep=" ")
stream = pkg_resources.resource_stream(__name__, 'data/effects.txt')
markereffdata = pd.read_table(stream, sep=" ")
stream = pkg_resources.resource_stream(__name__, 'data/phase.txt')
genodata = pd.read_table(stream, header=None, sep=" ")
stream = pkg_resources.resource_stream(__name__, 'data/ped.txt')
ped = pd.read_table(stream, header=None, sep=" ")
os.chdir(cwd)
return chrmosomedata, markereffdata, genodata, groupdata, ped
if __name__ == "__main__":
example_data()
@njit
def fnrep2(gen, aaxx, aaxx1):
"""Code phased genotypes into 1, 2, 3 and 4."""
qqq = np.empty((int(gen.shape[0]/2), gen.shape[1]), np.int_)
for i in range(qqq.shape[0]):
for j in range(qqq.shape[1]):
if gen[2*i, j] == aaxx and gen[2*i+1, j] == aaxx:
qqq[i, j] = 1
elif gen[2*i, j] == aaxx1 and gen[2*i+1, j] == aaxx1:
qqq[i, j] = 2
elif gen[2*i, j] == aaxx and gen[2*i+1, j] == aaxx1:
qqq[i, j] = 3
else:
qqq[i, j] = 4
return qqq
def haptogen(gen, progress=False):
"""Convert haplotypes to coded genotypes."""
if progress:
print("Converting phased haplotypes to genotypes")
if gen.shape[1] == 2:
gen = np.array(gen.iloc[:, 1]) # del col containing ID
# convert string to 2D array of integers
gen = [list(gen[i].rstrip()) for i in range(gen.shape[0])]
gen = np.array(gen, int)
# derives the frequency of alleles to determine the major allele
allele = np.asarray(np.unique(gen, return_counts=True)).T.astype(int)
if len(allele[:, 0]) != 2:
sys.exit("method only supports biallelic markers")
aaxx = allele[:, 0][np.argmax(allele[:, 1])] # major allele
aaasns = np.isin(allele[:, 0], aaxx, invert=True)
aaxx1 = int(allele[:, 0][aaasns]) # minor allele
gen = np.array(gen, int)
gen = fnrep2(gen, aaxx, aaxx1)
elif gen.shape[1] > 2:
gen = gen.iloc[:, 1:gen.shape[1]] # del col containing ID
# derives the frequency of alleles to determine the major allele
allele = np.asarray(np.unique(gen, return_counts=True)).T.astype(int)
if len(allele[:, 0]) != 2:
sys.exit("method only supports biallelic markers")
aaxx = allele[:, 0][np.argmax(allele[:, 1])] # major allele
aaasns = np.isin(allele[:, 0], aaxx, invert=True)
aaxx1 = int(allele[:, 0][aaasns]) # minor allele
gen = np.array(gen, int)
gen = fnrep2(gen, aaxx, aaxx1)
return gen
class Datacheck:
"""Check the input data for errors and store relevant info as an object."""
def __init__(self, gmap, meff, gmat, group, indwt, progress=False):
"""
Check input data for errors and store relevant info as class object.
Parameters
----------
gmap : pandas.DataFrame
Index: RangeIndex
Columns:
Name: CHR, dtype: int64; chromosome number
Name: SNPName, dtype: object; marker name
Name: Position: dtype: int64; marker position in bp
Name: group: dtype: float64; marker distance (cM) or reco rates
meff : pandas.DataFrame
Index: RangeIndex
Columns:
Name: trait names: float64; no. of columns = no of traits
gmat : pandas.DataFrame
Index: RangeIndex
Columns:
Name: ID, dtype: int64 or str; identification of individuals
Name: haplotypes, dtype: object; must be biallelic
group : pandas.DataFrame
Index: RangeIndex
Columns:
Name: group, dtype: object; group code of individuals, e.g., M, F
Name: ID, dtype: int64 or str; identification of individuals
indwt : list of index weights for each trait
progress : bool, optional; print progress of the function if True
Returns stored input files
-------
"""
# check: ensures number of traits match size of index weights
indwt = np.array(indwt)
if (meff.shape[1]-1) != indwt.size:
sys.exit('no. of index weights do not match marker effects cols')
# check: ensure individuals' genotypes match group and ID info
id_indgrp = pd.Series(group.iloc[:, 1]).astype(str) # no of inds
if not pd.Series(
pd.unique(gmat.iloc[:, 0])).astype(str).equals(id_indgrp):
sys.exit("ID of individuals in group & genotypic data don't match")
# check: ensure marker names in marker map and effects match
if not (gmap.iloc[:, 1].astype(str)).equals(meff.iloc[:, 0].astype(str)):
print("Discrepancies between marker names")
sys.exit("Check genetic map and marker effects")
# check: ensure marker or allele sub effect are all numeric
meff = meff.iloc[:, 1:meff.shape[1]]
test = meff.apply(
lambda s: pd.to_numeric(s, errors='coerce').notnull().all())
if not test.all():
sys.exit("Marker or allele sub effects contain non-numeric values")
# check: ensure unique maps match no of groups if map more than 1
grpg = pd.unique(group.iloc[:, 0]) # groups of individuals
grp_chrom = gmap.shape[1]-3 # no of unique maps
gmat = haptogen(gmat, progress)
if grp_chrom > 1 and grp_chrom != grpg.size:
sys.exit("no. of unique maps does not match no. of groups")
# check no of markers in genotype and map and marker effects match
no_markers = gmap.shape[0] # no of markers
if no_markers != gmat.shape[1] or no_markers != meff.shape[0]:
sys.exit("markers nos in gen, chrm or marker effects don't match")
# check: ordered marker distance or recombination rates
for grn in range(grp_chrom):
for chrm in pd.unique(gmap.iloc[:, 0]):
mpx = np.array(gmap.iloc[:, 3+grn][gmap.iloc[:, 0] == chrm])
if not (mpx == np.sort(sorted(mpx))).any():
sys.exit(
f"Faulty marker map on chr {chrm} for grp {grpg[grn]}")
if progress:
print('Data passed the test!')
print("Number of individuals: ", len(id_indgrp))
print("Number of groups: ", len(grpg), ": ", grpg)
print("Number of specific maps:", grp_chrom)
print("Number of chromosomes: ", len(pd.unique(gmap.iloc[:, 0])))
print("Total no. markers: ", no_markers)
print("Number of trait(s): ", meff.columns.size)
print("Trait name(s) and Index weight(s)")
if meff.columns.size == 1:
print(meff.columns[0], ": ", indwt[0])
elif meff.columns.size > 1:
for i in range(meff.columns.size):
print(meff.columns[i], ": ", indwt[i])
self.gmap = gmap
self.meff = meff
self.gmat = gmat
self.group = group
self.indwt = indwt
def elem_cor(mylist, mprc, ngp, mposunit, method, chrm):
"""Derive pop cov matrix."""
if method == 1: # Bonk et al's approach
if mposunit in ("cM", "cm", "CM", "Cm"):
tmp = np.exp(-2*(np.abs(mprc - mprc[:, None])/100))/4
elif mposunit in ("reco", "RECO"):
if mprc[0] != 0:
sys.exit(f"First value for reco rate on chr {chrm} isn't zero")
aaa = (1-(2*mprc))/4
ida = np.arange(aaa.size)
tmp = aaa[np.abs(ida - ida[:, None])]
elif method == 2: # Santos et al's approach
if mposunit in ("cM", "cm", "CM", "Cm"):
tmp = (-1*(np.abs(mprc - mprc[:, None])/200))+0.25
cutoff = (-1*(50/200))+0.25
tmp = np.where(tmp < cutoff, 0, tmp)
elif mposunit in ("reco", "RECO"):
if mprc[0] != 0:
sys.exit(f"First value for reco rate on chr {chrm} isn't zero")
aaa = (-1*(mprc/2))+0.25
ida = np.arange(aaa.size)
tmp = aaa[np.abs(ida - ida[:, None])]
cutoff = (-1*(0.5/2))+0.25
tmp = np.where(tmp < cutoff, 0, tmp)
# append chromosome-specific covariance matrix to list
mylist[int(ngp)].append(tmp)
return mylist
def popcovmat(info, mposunit, method):
"""
Derive population-specific covariance matrices.
Parameters
----------
info : class object
A class object created using the function "datacheck"
mposunit : string
A sting with containing "cM" or "reco".
method : int
An integer with a value of 1 for Bonk et al.'s approach or
2 for Santos et al's approach'
Returns
-------
mylist : list
A list containing group-specific pop covariance matrices for each chr.
"""
if mposunit not in ("cM", "cm", "CM", "Cm", "reco", "RECO"):
sys.exit("marker unit should be either cM or reco")
# unique group name for naming the list if map is more than 1
probn = pd.unique(info.group.iloc[:, 0].astype(str)).tolist()
chromos = pd.unique(info.gmap.iloc[:, 0]) # chromosomes
no_grp = info.gmap.shape[1]-3 # no of maps
mylist = [] # list stores chromosome-wise covariance matrix
for ngp in range(no_grp):
mylist.append([])
# marker position in cM or recombination rates
grouprecodist = info.gmap.iloc[:, 3+ngp]
for chrm in chromos:
mpo = np.array(grouprecodist[info.gmap.iloc[:, 0] == (chrm)])
elem_cor(mylist, mpo, ngp, mposunit, method, chrm)
if no_grp > 1:
# if map is more than one, name list using group names
mylist = dict(zip(probn, mylist))
return mylist
@njit
def makemems(gmat, meff):
"""Set up family-specific marker effects (Mendelian sampling)."""
qqq = np.zeros((gmat.shape))
for i in range(gmat.shape[0]):
for j in range(gmat.shape[1]):
if gmat[i, j] == 4:
qqq[i, j] = meff[j]*-1
elif gmat[i, j] == 3:
qqq[i, j] = meff[j]
else:
qqq[i, j] = 0
return qqq
@njit
def makemebv(gmat, meff):
"""Set up family-specific marker effects (GEBV)."""
qqq = np.zeros((gmat.shape))
for i in range(gmat.shape[0]):
for j in range(gmat.shape[1]):
if gmat[i, j] == 2:
qqq[i, j] = meff[j]*-1
elif gmat[i, j] == 1:
qqq[i, j] = meff[j]
else:
qqq[i, j] = 0
return qqq
def traitspecmatrices(gmat, meff):
"""Store trait-specific matrices in a list."""
notr = meff.shape[1] # number of traits
slist = [] # list stores trait-specific matrices
slist.append([])
for i in range(notr):
# specify data type for numba
mefff = np.array(meff.iloc[:, i], float)
matrix_ms = makemems(gmat, mefff)
slist[0].append(matrix_ms)
return slist
def namesdf(notr, trait_names):
"""Create names of dataframe columns for Mendelian co(var)."""
tnn = np.zeros((notr, notr), 'U20')
tnn = np.chararray(tnn.shape, itemsize=30)
for i in range(notr):
for trt in range(notr):
if i == trt:
tnn[i, trt] = str(trait_names[i])
elif i != trt:
tnn[i, trt] = "{}_{}".format(trait_names[i], trait_names[trt])
colnam = tnn[np.tril_indices(notr)]
return colnam
def mrmmult(temp, covmat):
"""Matrix multiplication (MRM' or m'Rm)."""
return temp @ covmat @ temp.T
def dgmrm(temp, covmat):
"""Matrix multiplication (MRM') for bigger matrices."""
temp1111 = scipy.linalg.blas.dgemm(alpha=1.0, a=temp, b=covmat)
return scipy.linalg.blas.dgemm(alpha=1.0, a=temp1111, b=temp.T)
def progr(itern, total):
"""Print progress of a task."""
fill, printend, prefix, suffix = '█', "\r", 'Progress:', 'Complete'
deci, length = 0, 50
percent = ("{0:." + str(deci) + "f}").format(100 * (itern / float(total)))
filledlen = int(length * itern // total)
bars = fill * filledlen + '-' * (length - filledlen)
print(f'\r{prefix} |{bars}| {percent}% {suffix}', end=printend)
if itern == total:
print()
def subindcheck(info, sub_id):
"""Check if inds provided in pd.DataFrame (sub_id) are in group data."""
sub_id = pd.DataFrame(sub_id).reset_index(drop=True)
if sub_id.shape[1] != 1:
sys.exit("Individuals' IDs (sub_id) should be provided in one column")
numbers = info.group.iloc[:, 1].astype(str).tolist()
sub_id = sub_id.squeeze().astype(str).tolist()
aaa = [numbers.index(x) if x in numbers else None for x in sub_id]
aaa = np.array(aaa)
if len(aaa) != len(sub_id):
sys.exit("Some individual ID could not be found in group data")
return aaa
def msvarcov_g_st(info, covmat, sub_id, progress=False):
"""Derive Mendelian sampling co(variance) for single trait."""
if sub_id is not None:
aaa = subindcheck(info, sub_id)
idn = info.group.iloc[aaa, 1].reset_index(drop=True).astype(str) # ID
groupsex = info.group.iloc[aaa, 0].reset_index(drop=True).astype(str)
matsub = info.gmat[aaa, :]
else:
idn = info.group.iloc[:, 1].reset_index(drop=True).astype(str) # ID
groupsex = info.group.iloc[:, 0].reset_index(drop=True).astype(str)
matsub = info.gmat
if (info.gmap.shape[1]-3 == 1 and len(pd.unique(groupsex)) > 1):
print("The same map will be used for all groups")
if progress:
progr(0, matsub.shape[0]) # print progress bar
snpindexxx = np.arange(start=0, stop=info.gmap.shape[0], step=1)
notr = info.meff.columns.size
slist = traitspecmatrices(matsub, info.meff)
# dataframe to save Mendelian sampling (co)variance and aggregate breeding
msvmsc = np.empty((matsub.shape[0], 1))
for i in range(matsub.shape[0]): # loop over no of individuals
mscov = np.zeros((notr, notr)) # Mendelian co(var) mat for ind i
for chrm in pd.unique(info.gmap.iloc[:, 0]):
# snp index for chromosome chrm
s_ind = np.array(snpindexxx[info.gmap.iloc[:, 0] == (chrm)])
# family-specific marker effects for ind i
temp = np.zeros((notr, len(s_ind)))
for trt in range(notr):
temp[trt, :] = slist[0][trt][i, s_ind]
if info.gmap.shape[1]-3 == 1:
mscov = mscov + mrmmult(temp, covmat[0][chrm-1])
else:
mscov = mscov + mrmmult(temp, covmat[groupsex[i]][chrm-1])
msvmsc[i, 0] = mscov
if progress:
progr(i + 1, matsub.shape[0]) # print progress bar
msvmsc = pd.DataFrame(msvmsc)
msvmsc.columns = info.meff.columns
msvmsc.insert(0, "ID", idn, True)
msvmsc.insert(1, "Group", groupsex, True) # insert group
return msvmsc
def msvarcov_g_mt(info, covmat, sub_id, progress=False):
"""Derive Mendelian sampling co(variance) for multiple traits."""
if sub_id is not None:
aaa = subindcheck(info, sub_id)
idn = info.group.iloc[aaa, 1].reset_index(drop=True).astype(str) # ID
groupsex = info.group.iloc[aaa, 0].reset_index(drop=True).astype(str)
matsub = info.gmat[aaa, :]
else:
idn = info.group.iloc[:, 1].reset_index(drop=True).astype(str) # ID
groupsex = info.group.iloc[:, 0].reset_index(drop=True).astype(str)
matsub = info.gmat
if (info.gmap.shape[1]-3 == 1 and len(pd.unique(groupsex)) > 1):
print("The same map will be used for all groups")
if progress:
progr(0, matsub.shape[0]) # print progress bar
snpindexxx = np.arange(start=0, stop=info.gmap.shape[0], step=1)
notr = info.meff.columns.size
slist = traitspecmatrices(matsub, info.meff)
# dataframe to save Mendelian sampling (co)variance and aggregate breeding
mad = len(np.zeros((notr+1, notr+1))[np.tril_indices(notr+1)])
msvmsc = np.empty((matsub.shape[0], mad))
for i in range(matsub.shape[0]): # loop over no of individuals
mscov = np.zeros((notr+1, notr+1)) # Mendelian co(var) mat for ind i
for chrm in pd.unique(info.gmap.iloc[:, 0]):
# snp index for chromosome chrm
s_ind = np.array(snpindexxx[info.gmap.iloc[:, 0] == (chrm)])
# family-specific marker effects for ind i
temp = np.zeros((notr+1, len(s_ind)))
for trt in range(notr):
temp[trt, :] = slist[0][trt][i, s_ind]
temp[notr, :] = np.matmul(info.indwt.T, temp[0:notr, :])
if info.gmap.shape[1]-3 == 1:
mscov = mscov + mrmmult(temp, covmat[0][chrm-1])
else:
mscov = mscov + mrmmult(temp, covmat[groupsex[i]][chrm-1])
msvmsc[i, :] = mscov[np.tril_indices(notr+1)]
if progress:
progr(i + 1, matsub.shape[0]) # print progress bar
msvmsc = pd.DataFrame(msvmsc)
tnames = np.concatenate((info.meff.columns, "AG"), axis=None)
colnam = namesdf(notr+1, tnames).decode('utf-8')
msvmsc.columns = colnam
msvmsc.insert(0, "ID", idn, True)
msvmsc.insert(1, "Group", groupsex, True) # insert group
return msvmsc
def msvarcov_g(info, covmat, sub_id, progress=False):
"""
Derive Mendelian sampling co(variance) and aggregate genotype.
Parameters
----------
info : class object
A class object created using the function "datacheck"
covmat : A list of pop cov matrices created using "popcovmat" function
sub_id : pandas.DataFrame with one column
Index: RangeIndex (minimum of 2 rows)
Containing ID numbers of specific individuals to be evaluated
progress : bool, optional; print progress of the function if True
Returns
-------
msvmsc : pandas.DataFrame
containing the Mendelian sampling (co)variance and aggregate genotype
Note: If sub_id is None, Mendelian (co-)variance will be estimated for
all individuals. Otherwise, Mendelian (co-)variance will be estimated for
the individuals in sub_id
"""
notr = info.meff.columns.size
if notr == 1:
msvmsc = msvarcov_g_st(info, covmat, sub_id, progress)
elif notr > 1:
msvmsc = msvarcov_g_mt(info, covmat, sub_id, progress)
return msvmsc
def array2sym(array):
"""Convert array to stdized symm mat, and back to array without diags."""
dfmsize = array.size
for notr in range(1, 10000):
if dfmsize == len(np.zeros((notr, notr))[np.tril_indices(notr)]):
break
iii, jjj = np.tril_indices(notr)
mat = np.empty((notr, notr), float)
mat[iii, jjj], mat[jjj, iii] = array, array
mat = np.array(mat)
mat1 = cov2corr(mat)
return np.array(mat1[np.tril_indices(notr, k=-1)])
def msvarcov_gcorr(msvmsc):
"""
Standardize Mendelian sampling co(variance) and aggregate genotype.
Parameters
----------
msvmsc : pandas.DataFrame
containing the Mendelian sampling (co)variance and aggregate genotype
created using msvarcov_g function
Returns
-------
dfcor : pandas.DataFrame
containing standardized Mendelian sampling (co)variance
"""
if msvmsc.columns.size == 3:
sys.exit("Correlation cannot be derived for a single trait")
dfm = msvmsc.iloc[:, 2:msvmsc.shape[1]] # exclude ID and group
dfmsize = dfm.shape[1]
# derive number of traits
for notr in range(1, 10000):
if dfmsize == len(np.zeros((notr, notr))[np.tril_indices(notr)]):
break
# standardize covariance between traits
dfcor = dfm.apply(array2sym, axis=1)
# extract column names
listnames = dfm.columns.tolist()
cnames = [x for x in listnames if "_" in x]
# convert pd.series of list to data frame
dfcor = pd.DataFrame.from_dict(dict(zip(dfcor.index, dfcor.values))).T
dfcor.columns = cnames
# insert ID and group info
dfcor = [pd.DataFrame(msvmsc.iloc[:, 0:2]), dfcor] # add ID and GRP
dfcor = pd.concat(dfcor, axis=1)
return dfcor
def calcgbv(info, sub_id):
"""Calculate breeding values for each trait."""
if sub_id is not None:
aaa = subindcheck(info, sub_id)
idn = info.group.iloc[aaa, 1].reset_index(drop=True).astype(str) # ID
groupsex = info.group.iloc[aaa, 0].reset_index(drop=True).astype(str)
matsub = info.gmat[aaa, :]
else:
idn = info.group.iloc[:, 1].reset_index(drop=True).astype(str) # ID
groupsex = info.group.iloc[:, 0].reset_index(drop=True).astype(str)
matsub = info.gmat
no_individuals = matsub.shape[0] # Number of individuals
trait_names = info.meff.columns # traits names
notr = trait_names.size # number of traits
if notr == 1:
gbv = np.zeros((no_individuals, notr))
mefff = np.array(info.meff.iloc[:, 0], float) # type spec for numba
matrix_me = makemebv(matsub, mefff) # fam-spec marker effects BV
gbv[:, 0] = matrix_me.sum(axis=1) # sum all effects
gbv = pd.DataFrame(gbv)
gbv.columns = trait_names
elif notr > 1:
gbv = np.zeros((no_individuals, notr+1))
for i in range(notr):
mefff = np.array(info.meff.iloc[:, i], float) # type spec 4 numba
matrix_me = makemebv(matsub, mefff) # fam-spec marker effects BV
gbv[:, i] = matrix_me.sum(axis=1) # sum all effects for each trait
gbv[:, notr] = gbv[:, notr] + info.indwt[i]*gbv[:, i] # Agg gen
gbv = pd.DataFrame(gbv)
colnames = np.concatenate((trait_names, "ABV"), axis=None)
gbv.columns = colnames
gbv.insert(0, "ID", idn, True) # insert ID
gbv.insert(1, "Group", groupsex, True) # insert group
return gbv
def calcprob(info, msvmsc, thresh):
"""Calculate the probability of breeding top individuals."""
aaa = subindcheck(info, pd.DataFrame(msvmsc.iloc[:, 0]))
gbvall = calcgbv(info, None) # calc GEBV for all inds used by thresh
gbv = gbvall.iloc[aaa, :].reset_index(drop=True) # GEBV matching msvmsc
no_individuals = gbv.shape[0] # Number of individuals
trait_names = info.meff.columns # traits names
notr = trait_names.size # number of traits
if notr == 1:
probdf = np.zeros((no_individuals, notr))
ttt = np.quantile(gbvall.iloc[:, (0+2)], q=1-thresh) # threshold
probdf[:, 0] = 1 - scipy.stats.norm.cdf(
ttt, loc=gbv.iloc[:, (0+2)], scale=np.sqrt(msvmsc.iloc[:, 0+2]))
probdf = pd.DataFrame(probdf)
probdf.columns = trait_names
elif notr > 1:
colnam = np.concatenate((info.meff.columns, "AG"), axis=None)
colnam = namesdf(notr+1, colnam).decode('utf-8')
ttt = np.quantile(gbvall.iloc[:, (notr+2)], q=1-thresh) # threshold
probdf = np.zeros((no_individuals, notr+1))
t_ind = np.arange(colnam.shape[0])[np.in1d(colnam, trait_names)]
for i in range(notr):
ttt = np.quantile(gbvall.iloc[:, (i+2)], q=1-thresh) # threshold
probdf[:, i] = scipy.stats.norm.cdf(
ttt, loc=gbv.iloc[:, (i+2)], scale=np.sqrt(
msvmsc.iloc[:, (t_ind[i])+2]))
probdf[:, i] = np.nan_to_num(probdf[:, i]) # convert Inf to zero
probdf[:, i] = 1 - probdf[:, i]
ttt = np.quantile(gbvall.iloc[:, (notr+2)], q=1-thresh)
probdf[:, notr] = scipy.stats.norm.cdf(
ttt, loc=gbv.iloc[:, (notr+2)], scale=np.sqrt(
msvmsc["AG"]))
probdf[:, notr] = np.nan_to_num(probdf[:, notr]) # Agg
probdf[:, notr] = 1 - probdf[:, notr]
probdf = pd.DataFrame(probdf) # convert matrix to dataframe
colnames = np.concatenate((trait_names, "ABV"), axis=None)
probdf.columns = colnames
probdf = [pd.DataFrame(gbv.iloc[:, 0:2]), probdf] # add ID and GRP
probdf = pd.concat(probdf, axis=1)
return probdf
def calcindex(info, msvmsc, const):
"""Calculate the index if constant is known."""
sub_id = pd.DataFrame(msvmsc.iloc[:, 0])
gbv = calcgbv(info, sub_id) # calc GEBV
no_individuals = gbv.shape[0] # Number of individuals
trait_names = info.meff.columns # traits names
notr = trait_names.size
if notr == 1:
indexdf = np.zeros((no_individuals, notr))
indexdf[:, 0] = (gbv.iloc[:, (0+2)]/2) + np.sqrt(
msvmsc.iloc[:, 0+2])*const
indexdf = pd.DataFrame(indexdf)
indexdf.columns = trait_names
elif notr > 1:
colnam = np.concatenate((info.meff.columns, "AG"), axis=None)
colnam = namesdf(notr+1, colnam).decode('utf-8')
indexdf = np.zeros((no_individuals, notr+1))
t_ind = np.arange(colnam.shape[0])[np.in1d(colnam, trait_names)]
for i in range(notr):
indexdf[:, i] = (gbv.iloc[:, (i+2)]/2) + np.sqrt(
msvmsc.iloc[:, (t_ind[i]+2)])*const
indexdf[:, notr] = (gbv.iloc[:, (notr+2)]/2) + np.sqrt(
msvmsc["AG"])*const
indexdf = pd.DataFrame(indexdf)
colnames = np.concatenate((trait_names, "ABV"), axis=None)
indexdf.columns = colnames
indexdf = [pd.DataFrame(gbv.iloc[:, 0:2]), indexdf] # add ID and GRP
indexdf = pd.concat(indexdf, axis=1)
return indexdf
def selstrat_g(selstrat, info, sub_id, msvmsc, throrconst):
"""
Calc selection criteria (GEBV, PBTI, or index using gametic approach.
Parameters
----------
selstrat : str
A str containing any of GEBV, PBTI or index
info : class object
A class object created using the function "datacheck"
sub_id : pandas.DataFrame with one column
Index: RangeIndex (minimum of 2 rows)
Containing ID numbers of specific individuals to be evaluated
msvmsc : pandas.DataFrame
DF created using the function "msvarcov_g"
throrconst : float
If selstrat is PBTI, a throrconst of value 0.05 sets threshold at
top 5% of GEBV. If selstrat is index, throrconst is a constant.
If selstrat is GEBV, throrconst can be any random value.
Returns
-------
data : pandas.DataFrame
Index: RangeIndex
Columns:
ID, Group, trait names and Aggregate Breeding Value (ABV)
Note: If selstrat is GEBV, None may be used for throrconst and msvmsc.
If sub_id is None and selstrat is GEBV, GEBVs will be estimated for all
individuals. However, if selstrat is not GEBV, the chosen selection
criterion will be estimated for all individuals in msvmsc data frame.
"""
if selstrat in ("PBTI", "pbti", "index", "INDEX") and msvmsc is None:
sys.exit("Provide Mendelian (co-)variance dataframe: 'msvmsc'")
if selstrat in ("PBTI", "pbti", "index", "INDEX") and throrconst is None:
sys.exit("Provide value for throrconst parameter")
if selstrat not in ('GEBV', 'gebv', 'PBTI', 'pbti', 'index', 'INDEX'):
sys.exit("selection strategy should be one of GEBV, PBTI or INDEX")
if selstrat in ('GEBV', 'gebv'):
data = calcgbv(info, sub_id)
elif selstrat in ('PBTI', 'pbti'):
if throrconst > 1 or throrconst < 0:
sys.exit("value must be in the range of 0 and 1")
data = calcprob(info, msvmsc, throrconst)
elif selstrat in ('index', 'INDEX'):
data = calcindex(info, msvmsc, throrconst)
return data
def cov2corr(cov):
"""Convert covariance to correlation matrix."""
cov = np.asanyarray(cov)
std_ = np.sqrt(np.diag(cov))
with np.errstate(invalid='ignore'):
corr = cov / np.outer(std_, std_)
return corr
def aggen(us_ind, no_markers, slst, indwt):
"""Set up additive effects matrix of aggregate genotype."""
mmfinal = np.empty((len(us_ind), no_markers))
xxx = 0
for iii in us_ind:
tmpmt1 = np.array([slst[0][trt][iii, :] for trt in range(indwt.size)])
mmfinal[xxx, :] = np.matmul(indwt.transpose(), tmpmt1)
xxx = xxx + 1
return mmfinal
def chr_int(xxxxx):
"""Format chromomosome of interest parameter."""
if 'all' in xxxxx:
xxxxx = 'all'
elif 'none' in xxxxx:
xxxxx = 'none'
else:
xxxxx = np.array([int(i) for i in xxxxx])
return xxxxx
def writechr(covtmpx, chrinterest, chrm, trtnam, probx, stdsim):
"""Write matrices to file."""
if isinstance(chrinterest, str):
if chrinterest == 'all':
chrfile1 = "{}/Sim mat for {} chrm {} grp {}.npy".format(
os.getcwd(), trtnam, chrm, probx)
np.save(chrfile1, covtmpx)
elif chrm in chrinterest:
chrfile1 = "{}/Sim mat for {} chrm {} grp {}.npy".format(
os.getcwd(), trtnam, chrm, probx) # output file
np.save(chrfile1, covtmpx)
if stdsim:
if isinstance(chrinterest, str):
if chrinterest == 'all':
chrfilec = "{}/Stdsim mat for {} chrm {} grp {}.npy".format(
os.getcwd(), trtnam, chrm, probx) # output file
np.save(chrfilec, cov2corr(covtmpx))
elif chrm in chrinterest:
chrfilec = "{}/Stdsim mat for {} chrm {} grp {}.npy".format(
os.getcwd(), trtnam, chrm, probx) # output file
np.save(chrfilec, cov2corr(covtmpx))
def writechrunspec(covtmpx, chrinterest, chrm, trtnam, stdsim):
"""Write matrices to file."""
if isinstance(chrinterest, str):
if chrinterest == 'all':
chrfile1 = "{}/Sim mat for {} chrm {}.npy".format(
os.getcwd(), trtnam, chrm)
np.save(chrfile1, covtmpx)
elif chrm in chrinterest:
chrfile1 = "{}/Sim mat for {} chrm {}.npy".format(
os.getcwd(), trtnam, chrm) # output file
np.save(chrfile1, covtmpx)
if stdsim:
if isinstance(chrinterest, str):
if chrinterest == 'all':
chrfilec = "{}/Stdsim mat for {} chrm {}.npy".format(
os.getcwd(), trtnam, chrm) # output file
np.save(chrfilec, cov2corr(covtmpx))
elif chrm in chrinterest:
chrfilec = "{}/Stdsim mat for {} chrm {}.npy".format(
os.getcwd(), trtnam, chrm) # output file
np.save(chrfilec, cov2corr(covtmpx))
def grtonum(numnx):
"""Map chracters to numeric (0-no of groups)."""
numnx = numnx.reset_index(drop=True)
probn = pd.unique(numnx).tolist()
alt_no = np.arange(0, len(probn), 1)
noli = numnx.tolist()
numnx = np.array(list(map(dict(zip(probn, alt_no)).get, noli, noli)))
return numnx, probn
def datret(info, rw_nms, pfnp, us_ind, slist, covmat, cov_indxx, stdsim,
progress):
"""Return sim mat based on aggregate genotypes."""
snpindexxxx = np.arange(start=0, stop=info.gmap.shape[0], step=1)
if info.meff.shape[1] == 1 and not stdsim:
mat = cov_indxx
elif info.meff.shape[1] == 1 and stdsim:
mat = cov2corr(cov_indxx)
elif info.meff.shape[1] > 1:
if info.gmap.shape[1]-3 > 1:
rw_nms = pd.DataFrame(rw_nms)
rw_nms.to_csv(f"order of inds in mat grp {pfnp}.csv", index=False)
if progress:
print('Creating similarity matrix based on aggregate genotype')
progr(0, max(pd.unique(info.gmap.iloc[:, 0])))
tmpmt1 = aggen(us_ind, info.gmap.shape[0], slist, info.indwt)
# stores ABV covariance btw inds
mat = np.zeros((len(us_ind), len(us_ind)))
# loop over chromososomes
for chrm in pd.unique(info.gmap.iloc[:, 0]):
s_ind = np.array(snpindexxxx[info.gmap.iloc[:, 0] == (chrm)])
if info.gmap.shape[1]-3 == 1:
covtmpx = abs(dgmrm(tmpmt1[:, s_ind], covmat[0][chrm-1]))
else:
covtmpx = abs(dgmrm(tmpmt1[:, s_ind], covmat[pfnp][chrm-1]))
mat = mat + covtmpx
if progress:
progr(chrm, max(pd.unique(info.gmap.iloc[:, 0])))
if stdsim:
mat = cov2corr(mat)
return mat
def mrmcals(info, us_ind, stdsim, slist, covmat, probn, chrinterest, save,
progress):
"""Compute similarity matrix for each chromosome."""
if progress:
progr(0, info.meff.columns.size)
for i in range(info.meff.columns.size):
cov_indxx = np.zeros((len(us_ind), len(us_ind)))
for chrm in pd.unique(info.gmap.iloc[:, 0]):
s_ind = np.array(np.arange(0, info.gmap.shape[0], 1
)[info.gmap.iloc[:, 0] == (chrm)])
if info.gmap.shape[1]-3 == 1: # map is 1
covtmpx = abs(dgmrm(slist[0][i][:, s_ind], covmat[0][chrm-1]))
else: # if map is more than 1
covtmpx = abs(dgmrm(slist[0][i][us_ind[:, None], s_ind],
covmat[probn][chrm-1]))
cov_indxx = cov_indxx + covtmpx # sums up chrm-specific sims
if len(pd.unique(info.group.iloc[:, 0].astype(str))) == 1:
writechrunspec(covtmpx, chrinterest, chrm,
info.meff.columns[i], stdsim)
else:
writechr(covtmpx, chrinterest, chrm, info.meff.columns[i],
probn, stdsim) # write sim to file
if stdsim:
if save is True:
if info.gmap.shape[1]-3 == 1:
covxfile = "{}/Stdsim mat for {}.npy".format(
os.getcwd(), info.meff.columns[i])
else:
covxfile = "{}/Stdsim mat for {} grp {}.npy".format(
os.getcwd(), info.meff.columns[i], probn)
np.save(covxfile, cov2corr(cov_indxx)) # write std sim mats
else:
if save is True:
if info.gmap.shape[1]-3 == 1:
covxfile = "{}/Sim mat for {}.npy".format(
os.getcwd(), info.meff.columns[i])
else:
covxfile = "{}/Sim mat for {} grp {}.npy".format(
os.getcwd(), info.meff.columns[i], probn)
np.save(covxfile, cov_indxx) # write sim matrices
if progress:
progr(i + 1, info.meff.columns.size)
return cov_indxx
def simmat_g(info, covmat, sub_id, chrinterest, save=False, stdsim=False,
progress=False):
"""
Compute similarity matrices using gametic approach.
Parameters
----------
info : class object
A class object created using the function "datacheck"
covmat : A list of pop cov matrices created using "popcovmat" function
sub_id : pandas.DataFrame with one column
Index: RangeIndex (minimum of 2 rows)
Containing ID numbers of specific individuals to be evaluated
chrinterest : str or list of int
list of chromosome numbers of interest or str with "all" or "none"
save : bool, optional; write trait-specific sim mats to file if true
stdsim : bool, optional; print write std sim mats to file if true
progress : bool, optional; print progress of the task if true
Returns
-------
multgrpcov : list containing simimlarity matrices for each group
"""
if sub_id is None:
inda = np.arange(0, info.gmat.shape[0], 1)
sub_id = pd.DataFrame(info.group.iloc[inda, 1])
aaa = subindcheck(info, sub_id)
else:
aaa = subindcheck(info, sub_id)
chrinterest = chr_int(chrinterest)
slist = traitspecmatrices(info.gmat[aaa, :], info.meff) # trt-spec mat
grp = info.gmap.shape[1]-3
if (grp == 1 and len(pd.unique(info.group.iloc[:, 0].astype(str))) > 1):
print("The same map will be used for all groups")
numbers, probn = grtonum(info.group.iloc[aaa, 0].astype(str))
multgrpcov = []
for gnp in range(grp):
multgrpcov.append([])
if grp == 1:
us_ind = np.arange(start=0, stop=info.gmat[aaa, :].shape[0],
step=1)
else:
tng = numbers == gnp
us_ind = np.array(list(compress(np.arange(0, len(tng), 1),
tng))).T
print("Processing group ", probn[gnp])
rw_nms = info.group.iloc[aaa, 1].reset_index(drop=True).astype(
str)[us_ind]
cov_indxx = mrmcals(info, us_ind, stdsim, slist, covmat, probn[gnp],
chrinterest, save, progress)
multgrpcov[int(gnp)].append(
datret(info, rw_nms, probn[gnp], us_ind, slist, covmat,
cov_indxx, stdsim, progress))
if len(probn) == 1:
break
if grp > 1 and len(probn):
multgrpcov = dict(zip(probn, multgrpcov))
return multgrpcov
def submsvmsc(msvmsc, sub_idz):
"""Extract index in msvmsc data frame."""
sub_idz = pd.DataFrame(sub_idz)
numbs = msvmsc.iloc[:, 0].astype(str).tolist()
sub_idz = sub_idz.reset_index(drop=True).squeeze()
mal = sub_idz.iloc[:, 0].astype(str).tolist()
fem = sub_idz.iloc[:, 1].astype(str).tolist()
if sub_idz is not None:
for i in mal:
if i not in numbs:
sys.exit("Individuals are not in msvmsc parameter")
for i in fem:
if i not in numbs:
sys.exit("Individuals are not in msvmsc parameter")
mal1 = [numbs.index(x) if x in numbs else None for x in mal]
fem1 = [numbs.index(x) if x in numbs else None for x in fem]
return mal1, fem1
def pot_parents(info, data, selmale, selfm):
"""Subset individuals of interest."""
trait_names = info.meff.columns
if trait_names.size == 1:
datamale = data[data.iloc[:, 1] == selmale[0]]
pos = subindcheck(info, pd.DataFrame(datamale.iloc[:, 0]))
datamale.insert(0, "pos", pos, True)
no_sire = int(datamale.shape[0] * selmale[1])
datamale = datamale.sort_values(
by=[trait_names[0]], ascending=False).iloc[0:no_sire, :]
datafemale = data[data.iloc[:, 1] == selfm[0]]
pos = subindcheck(info, pd.DataFrame(datafemale.iloc[:, 0]))
datafemale.insert(0, "pos", pos, True)
no_dam = int(datafemale.shape[0] * selfm[1])
datafemale = datafemale.sort_values(
by=[trait_names[0]], ascending=False).iloc[0:no_dam, :]
elif trait_names.size > 1:
datamale = data[data.iloc[:, 1] == selmale[0]]
pos = subindcheck(info, pd.DataFrame(datamale.iloc[:, 0]))
datamale.insert(0, "pos", pos, True)
no_sire = int(datamale.shape[0] * selmale[1])
datamale = datamale.sort_values(
by=['ABV'], ascending=False).iloc[0:no_sire, :]
datafemale = data[data.iloc[:, 1] == selfm[0]]
pos = subindcheck(info, pd.DataFrame(datafemale.iloc[:, 0]))
datafemale.insert(0, "pos", pos, True)
no_dam = int(datafemale.shape[0] * selfm[1])
datafemale = datafemale.sort_values(
by=['ABV'], ascending=False).iloc[0:no_dam, :]
matlist = np.array(np.meshgrid(
datamale.iloc[:, 0], datafemale.iloc[:, 0])).T.reshape(-1, 2)
ids = np.array(np.meshgrid(
datamale.iloc[:, 1], datafemale.iloc[:, 1])).T.reshape(-1, 2)
if trait_names.size == 1:
matndat = pd.DataFrame(index=range(matlist.shape[0]), columns=range(
4+trait_names.size))
else:
matndat = pd.DataFrame(
index=range(matlist.shape[0]), columns=range(5+trait_names.size))
matndat.iloc[:, [0, 1]] = ids
matndat.iloc[:, [2, 3]] = matlist
return matndat
def selsgebv(notr, matndat, gbv, maxmale):
"""Calculate breeding values for each trait (zygote)."""
mal = matndat.iloc[:, 2].tolist()
fem = matndat.iloc[:, 3].tolist()
if notr == 1:
matndat.iloc[:, 4] = (np.array(gbv.iloc[mal, (0+2)]) + np.array(
gbv.iloc[fem, (0+2)]))/2
elif notr > 1:
matndat.iloc[:, 4:(5+notr)] = (np.array(
gbv.iloc[mal, 2:(notr+3)]) + np.array(gbv.iloc[fem, 2:(notr+3)]))/2
idfxxx = np.unique(matndat.iloc[:, 3])
mmat = pd.DataFrame(index=range(len(idfxxx)),
columns=range(matndat.shape[1]))
for mmm in np.arange(0, len(idfxxx), 1):
axx = matndat.loc[matndat.iloc[:, 3] == idfxxx[mmm]]
tsire = np.array(axx.iloc[:, 2])
mmat.iloc[mmm, :] = axx.iloc[np.argmax(
axx.iloc[:, axx.columns.size-1]), :]
norepssire = Counter(mmat.iloc[:, 2])
lents = len(tsire)
for nrs in range(lents):
if norepssire[tsire[nrs]] <= maxmale-1:
mmat.iloc[mmm, :] = np.array(axx[axx.iloc[:, 2] == tsire[nrs]])
break
matndat = mmat
if notr == 1:
matndat.columns = np.concatenate((
['MaleID', 'FemaleID', 'MaleIndex', 'FemaleIndex'],
gbv.columns[gbv.columns.size-1]), axis=None)
else:
matndat.columns = np.concatenate((
['MaleID', 'FemaleID', 'MaleIndex', 'FemaleIndex'],
gbv.columns[2:gbv.columns.size].tolist()), axis=None)
return matndat
def selspbtizyg(notr, gbv, matndat, msvmsc, throrconst, maxmale):
"""Calculate prob of breeding top inds (zygote)."""
mal1, fem1 = submsvmsc(msvmsc, pd.DataFrame(matndat.iloc[:, 0:2]))
mal = matndat.iloc[:, 2].tolist()
fem = matndat.iloc[:, 3].tolist()
if notr == 1:
matndat.iloc[:, 4] = (np.array(gbv.iloc[mal, (0+2)]) + np.array(
gbv.iloc[fem, (0+2)]))/2
ttt = np.quantile(gbv.iloc[:, 0+2], q=1-throrconst)
msvtemp = np.array(msvmsc.iloc[mal1, 0+2]) + np.array(
msvmsc.iloc[fem1, 0+2])
matndat.iloc[:, 4] = 1 - scipy.stats.norm.cdf(
ttt, loc=matndat.iloc[:, 4], scale=np.sqrt(
msvtemp))
elif notr > 1:
trait_names = gbv.columns[2:2+notr]
colnam = np.concatenate((trait_names, "AG"), axis=None)
colnam = namesdf(notr+1, colnam).decode('utf-8')
t_ind = np.arange(colnam.shape[0])[np.in1d(colnam, trait_names)]
for i in range(notr):
matndat.iloc[:, 4+i] = (
np.array(gbv.iloc[mal, (i+2)]) + np.array(
gbv.iloc[fem, (i+2)]))/2
ttt = np.quantile(gbv.iloc[:, 2+i], q=1-throrconst)
msvtemp = np.array(msvmsc.iloc[mal1, t_ind[i]+2]) + np.array(
msvmsc.iloc[fem1, t_ind[i]+2])
matndat.iloc[:, 4+i] = 1 - scipy.stats.norm.cdf(
ttt, loc=matndat.iloc[:, 4+i], scale=np.sqrt(msvtemp))
matndat.iloc[:, 4+notr] = (
np.array(gbv.iloc[mal, (notr+2)]) + np.array(
gbv.iloc[fem, (notr+2)]))/2
ttt = np.quantile(gbv.iloc[:, 2+notr], q=1-throrconst)
msvtemp = np.array(msvmsc.loc[mal1, ["AG"]]) + np.array(
msvmsc.loc[fem1, ["AG"]])
matndat.iloc[:, 4+notr] = 1 - scipy.stats.norm.cdf(
ttt, loc=matndat.iloc[:, 4+notr], scale=np.sqrt(msvtemp.ravel()))
idfxxx = np.unique(matndat.iloc[:, 3])
mmat = pd.DataFrame(index=range(len(idfxxx)),
columns=range(matndat.shape[1]))
for mmm in np.arange(0, len(idfxxx), 1):
axx = matndat.loc[matndat.iloc[:, 3] == idfxxx[mmm]]
tsire = np.array(axx.iloc[:, 2])
mmat.iloc[mmm, :] = axx.iloc[np.argmax(
axx.iloc[:, axx.columns.size-1]), :]
norepssire = Counter(mmat.iloc[:, 2])
lents = len(tsire)
for nrs in range(lents):
if norepssire[tsire[nrs]] <= maxmale-1:
mmat.iloc[mmm, :] = np.array(axx[axx.iloc[:, 2] == tsire[nrs]])
break
matndat = mmat
if notr == 1:
matndat.columns = np.concatenate((
['MaleID', 'FemaleID', 'MaleIndex', 'FemaleIndex'],
gbv.columns[gbv.columns.size-1]), axis=None)
else:
matndat.columns = np.concatenate((
['MaleID', 'FemaleID', 'MaleIndex', 'FemaleIndex'],
gbv.columns[2:gbv.columns.size].tolist()), axis=None)
return matndat
def selsindex(notr, gbv, matndat, msvmsc, throrconst, maxmale):
"""Calculate the index if constant is known (zygote)."""
mal1, fem1 = submsvmsc(msvmsc, pd.DataFrame(matndat.iloc[:, 0:2]))
mal = matndat.iloc[:, 2].tolist()
fem = matndat.iloc[:, 3].tolist()
if notr == 1:
matndat.iloc[:, 4] = (np.array(gbv.iloc[mal, (0+2)]) + np.array(
gbv.iloc[fem, (0+2)]))/2
msvtemp = np.array(msvmsc.iloc[mal1, 0+2]) + np.array(
msvmsc.iloc[fem1, 0+2])
matndat.iloc[:, 4] = matndat.iloc[:, 4] + np.sqrt(msvtemp)*throrconst
elif notr > 1:
trait_names = gbv.columns[2:2+notr]
colnam = np.concatenate((trait_names, "AG"), axis=None)
colnam = namesdf(notr+1, colnam).decode('utf-8')
t_ind = np.arange(colnam.shape[0])[np.in1d(colnam, trait_names)]
for i in range(notr):
matndat.iloc[:, 4+i] = (
np.array(gbv.iloc[mal, (i+2)]) + np.array(
gbv.iloc[fem, (i+2)]))/2
msvtemp = np.array(msvmsc.iloc[mal1, t_ind[i]+2]) + np.array(
msvmsc.iloc[fem1, t_ind[i]+2])
matndat.iloc[:, 4+i] = matndat.iloc[:, 4+i] + np.sqrt(
msvtemp)*throrconst
matndat.iloc[:, 4+notr] = (
np.array(gbv.iloc[mal, (notr+2)]) + np.array(
gbv.iloc[fem, (notr+2)]))/2
msvtemp = np.array(msvmsc.loc[mal1, ["AG"]]) + np.array(
msvmsc.loc[fem1, ["AG"]])
matndat.iloc[:, 4+notr] = matndat.iloc[:, 4+notr] + (
np.sqrt(msvtemp)*throrconst).ravel()
idfxxx = np.unique(matndat.iloc[:, 3])
mmat = pd.DataFrame(index=range(len(idfxxx)),
columns=range(matndat.shape[1]))
for mmm in np.arange(0, len(idfxxx), 1):
axx = matndat.loc[matndat.iloc[:, 3] == idfxxx[mmm]]
tsire = np.array(axx.iloc[:, 2])
mmat.iloc[mmm, :] = axx.iloc[np.argmax(
axx.iloc[:, axx.columns.size-1]), :]
norepssire = Counter(mmat.iloc[:, 2])
lents = len(tsire)
for nrs in range(lents):
if norepssire[tsire[nrs]] <= maxmale-1:
mmat.iloc[mmm, :] = np.array(axx[axx.iloc[:, 2] == tsire[nrs]])
break
matndat = pd.DataFrame(mmat)
if notr == 1:
matndat.columns = np.concatenate((
['MaleID', 'FemaleID', 'MaleIndex', 'FemaleIndex'],
gbv.columns[gbv.columns.size-1]), axis=None)
else:
matndat.columns = np.concatenate((
['MaleID', 'FemaleID', 'MaleIndex', 'FemaleIndex'],
gbv.columns[2:gbv.columns.size].tolist()), axis=None)
return matndat
def subindcheckzyg(info, sub_idz):
"""Check sex and if matepairs provided in sub_idz are in group data."""
numbs = info.group.iloc[:, 1].astype(str).tolist()
sub_idz = pd.DataFrame(sub_idz).reset_index(drop=True).squeeze()
mal = sub_idz.iloc[:, 0].astype(str).tolist()
fem = sub_idz.iloc[:, 1].astype(str).tolist()
mal1 = [numbs.index(x) if x in numbs else None for x in mal]
fem1 = [numbs.index(x) if x in numbs else None for x in fem]
if len(pd.unique(info.group.iloc[mal1, 0])) != 1:
sys.exit("Group class in sub_idz is not unique to ID of males")
if len(pd.unique(info.group.iloc[fem1, 0])) != 1:
sys.exit("Group class in sub_idz is not unique to ID of females")
idn = sub_idz.reset_index(drop=True)
mgp = list(set(info.group.iloc[mal1, 0]))
fgp = list(set(info.group.iloc[fem1, 0]))
if len(mgp) > 1 or len(fgp) > 1:
sys.exit("multiple sexes detected in data")
probn = [mgp[0], fgp[0]]
return mal1, fem1, idn, probn
def calcgbvzygsub(info, sub_idz):
"""Calc breeding values for matepairs."""
mal1, fem1, idn, _ = subindcheckzyg(info, sub_idz)
no_individuals, trait_names = idn.shape[0], info.meff.columns
notr = trait_names.size
if notr == 1:
gbv = np.zeros((no_individuals, notr))
mefff = np.array(info.meff.iloc[:, 0], float)
matrix_me1 = makemebv(info.gmat[mal1, :], mefff)
matrix_me2 = makemebv(info.gmat[fem1, :], mefff)
gbv[:, 0] = (matrix_me1.sum(axis=1) + matrix_me2.sum(axis=1))/2
gbv = pd.DataFrame(gbv)
gbv.columns = trait_names
elif notr > 1:
gbv = np.zeros((no_individuals, notr+1))
for i in range(notr):
mefff = np.array(info.meff.iloc[:, i], float)
matrix_me1 = makemebv(info.gmat[mal1, :], mefff)
matrix_me2 = makemebv(info.gmat[fem1, :], mefff)
gbv[:, i] = (matrix_me1.sum(axis=1) + matrix_me2.sum(axis=1))/2
gbv[:, notr] = gbv[:, notr] + info.indwt[i]*gbv[:, i]
gbv = pd.DataFrame(gbv)
colnames = np.concatenate((trait_names, "ABV"), axis=None)
gbv.columns = colnames
gbv.insert(0, "FemaleIndex", fem1, True) # insert ID
gbv.insert(0, "MaleIndex", mal1, True) # insert ID
gbv.insert(0, "FemaleID", idn.iloc[:, 1], True) # insert ID
gbv.insert(0, "MaleID", idn.iloc[:, 0], True) # insert ID
return gbv
def calcprobzygsub(info, msvmsc, thresh, sub_idz):
"""Calculate the probability of breeding top individuals."""
subindcheckzyg(info, sub_idz)
mal1, fem1 = submsvmsc(msvmsc, sub_idz)
gbv = calcgbvzygsub(info, sub_idz)
trait_names = info.meff.columns # traits names
notr = trait_names.size
gbvall = calcgbv(info, None)
if notr == 1:
probdf = np.zeros((gbv.shape[0], notr))
ttt = np.quantile(gbvall.iloc[:, (0+2)], q=1-thresh)
msvmsc111 = np.array(msvmsc.iloc[mal1, (0+2)]) + np.array(
msvmsc.iloc[fem1, (0+2)])
probdf[:, 0] = 1 - scipy.stats.norm.cdf(
ttt, loc=gbv.iloc[:, (0+4)], scale=np.sqrt(msvmsc111))
probdf = pd.DataFrame(probdf)
probdf.columns = trait_names
elif notr > 1:
colnam = np.concatenate((trait_names, "AG"), axis=None)
colnam = namesdf(notr+1, colnam).decode('utf-8')
probdf = np.zeros((gbv.shape[0], notr+1))
t_ind = np.arange(colnam.shape[0])[np.in1d(colnam, trait_names)]
for i in range(notr):
ttt = np.quantile(gbvall.iloc[:, (i+2)], q=1-thresh)
msvmsc111 = np.array(msvmsc.iloc[mal1, (t_ind[i])+2]) + np.array(
msvmsc.iloc[fem1, (t_ind[i])+2])
probdf[:, i] = scipy.stats.norm.cdf(
ttt, loc=gbv.iloc[:, (i+4)], scale=np.sqrt(msvmsc111))
probdf[:, i] = np.nan_to_num(probdf[:, i])
probdf[:, i] = 1 - probdf[:, i]
ttt = np.quantile(gbvall.iloc[:, (notr+2)], q=1-thresh)
msvmsc111 = np.array(msvmsc.loc[mal1, ["AG"]]) + np.array(
msvmsc.loc[fem1, ["AG"]])
probdf[:, notr] = scipy.stats.norm.cdf(
ttt, loc=gbv.iloc[:, (notr+4)], scale=np.sqrt(msvmsc111.ravel()))
probdf[:, notr] = np.nan_to_num(probdf[:, notr])
probdf[:, notr] = 1 - probdf[:, notr]
probdf = pd.DataFrame(probdf) # convert matrix to dataframe
colnames = np.concatenate((trait_names, "ABV"), axis=None)
probdf.columns = colnames
probdf = pd.concat([gbv.iloc[:, 0:4], probdf], axis=1)
return probdf
def calcindexzygsub(info, msvmsc, const, sub_idz):
"""Calc index matepairs if constant is known."""
subindcheckzyg(info, sub_idz)
mal1, fem1 = submsvmsc(msvmsc, sub_idz)
gbv = calcgbvzygsub(info, sub_idz)
trait_names = info.meff.columns # traits names
notr = trait_names.size
if notr == 1:
indexdf = np.zeros((gbv.shape[0], notr))
msvmsc111 = np.array(msvmsc.iloc[mal1, (0+2)]) + np.array(
msvmsc.iloc[fem1, (0+2)])
indexdf[:, 0] = gbv.iloc[:, (0+4)] + np.sqrt(msvmsc111)*const
indexdf = pd.DataFrame(indexdf)
indexdf.columns = trait_names
elif notr > 1:
colnam = np.concatenate((trait_names, "AG"), axis=None)
colnam = namesdf(notr+1, colnam).decode('utf-8')
indexdf = np.zeros((gbv.shape[0], notr+1))
t_ind = np.arange(colnam.shape[0])[np.in1d(colnam, trait_names)]
for i in range(notr):
msvmsc111 = np.array(msvmsc.iloc[mal1, (t_ind[i])+2]) + np.array(
msvmsc.iloc[fem1, (t_ind[i])+2])
indexdf[:, i] = gbv.iloc[:, (i+4)] + np.sqrt(msvmsc111)*const
msvmsc111 = np.array(msvmsc.loc[mal1, ["AG"]]) + np.array(
msvmsc.loc[fem1, ["AG"]])
indexdf[:, notr] = gbv.iloc[:, (notr+4)] + (
np.sqrt(msvmsc111)*const).ravel()
indexdf = pd.DataFrame(indexdf)
colnames = np.concatenate((trait_names, "ABV"), axis=None)
indexdf.columns = colnames
indexdf = pd.concat([gbv.iloc[:, 0:4], indexdf], axis=1) # grp
return indexdf
def selstrat_z(selstrat, info, sub_idz, msvmsc, throrconst, selmale, selfm,
maxmale):
"""
Calculate selection criteria (GEBV, PBTI, or index) for zygotes.
Parameters
----------
selstrat : str
A str containing any of GEBV, PBTI or index
info : class object
A class object created using the function "datacheck"
sub_idz : pandas.DataFrame
Index: RangeIndex (minimum of 2 rows)
Containing ID numbers of specific individuals to be evaluated.
The 1st and 2nd columns must be IDS of males and females, respectively.
msvmsc : pandas.DataFrame
DF created using the function "msvarcov_g"
throrconst : float
If selstrat is PBTI, a throrconst of value 0.05 sets threshold at
top 5% of GEBV of the population. If selstrat is index, throrconst
a constant.
selmale : list
list of two items. 1st item is the str coding for males as in group
dataframe. The 2nd item is a float representing x% of males to be used
selfm : list
list of two items. 1st item is the str coding for females as in group
dataframe.The 2nd item is a float representing x% of females to be used
maxmale : integer
maximum number of allocations for males
Returns
-------
matndat : pandas.DataFrame
Index: RangeIndex
Columns:
MaleID, FemaleID, MaleIndex, FemaleIndex, trait names and ABV
Note: If selstrat is GEBV, None may be used for throrconst and msvmsc.
If sub_idz is None and selstrat is GEBV, GEBVs will be estimated for all
individuals. However, if selstrat is not GEBV, the chosen selection
criterion will be estimated for all individuals in msvmsc data frame.
"""
if len(pd.unique(info.group.iloc[:, 0])) == 1:
sys.exit("Inds are the same group. Use 'selstrat_g' function")
if selstrat not in ('gebv', 'GEBV', 'pbti', 'PBTI', 'index', 'INDEX'):
sys.exit("Options must be one of 'GEBV', PBTI', or 'INDEX'")
if selstrat in ("PBTI", "pbti", "index", "INDEX") and msvmsc is None:
sys.exit("Provide Mendelian (co-)variance dataframe: 'msvmsc'")
if selstrat in ("PBTI", "pbti", "index", "INDEX") and throrconst is None:
sys.exit("Provide value for throrconst parameter")
if sub_idz is None:
if maxmale is None:
sys.exit("Provide maximum allocation for males 'maxmale'")
elif selmale is None:
sys.exit("Provide value for propoertio of males to be selected")
elif selfm is None:
sys.exit("Provide value for propoertio of females to be selected")
if sub_idz is not None:
if selstrat in ('gebv', 'GEBV'):
matndat = calcgbvzygsub(info, sub_idz)
elif selstrat in ('pbti', 'PBTI'):
matndat = calcprobzygsub(info, msvmsc, throrconst, sub_idz)
elif selstrat in ('index', 'INDEX'):
matndat = calcindexzygsub(info, msvmsc, throrconst, sub_idz)
else:
for item in [selmale[0], selfm[0]]:
if item not in pd.unique(info.group.iloc[:, 0]).tolist():
sys.exit("Sex name does not match group names")
if selstrat in ('gebv', 'GEBV'):
matndat = pot_parents(info, calcgbv(info, None), selmale, selfm)
matndat = selsgebv(info.meff.columns.size, matndat,
calcgbv(info, None), maxmale)
elif selstrat in ('pbti', 'PBTI'):
probdf = calcprob(info, msvmsc, throrconst)
matndat = pot_parents(info, probdf, selmale, selfm)
matndat = selspbtizyg(info.meff.columns.size, calcgbv(info, None),
matndat, msvmsc, throrconst, maxmale)
elif selstrat in ('index', 'INDEX'):
indexdf = calcindex(info, msvmsc, throrconst)
matndat = pot_parents(info, indexdf, selmale, selfm)
matndat = selsindex(info.meff.columns.size, calcgbv(info, None),
matndat, msvmsc, throrconst, maxmale)
return matndat
def agggenmsezygsub(no_markers, no_individuals, slist, slist1, indwt):
"""Set up add effects mat of agg gen (zygote) subset."""
mmfinal = np.empty((no_individuals, no_markers))
mmfinal1 = np.empty((no_individuals, no_markers))
for i in range(no_individuals):
tmpmt1 = np.zeros((indwt.size, no_markers))
tmpmt2 = np.zeros((indwt.size, no_markers))
for trt in range(indwt.size):
tmpmt1[trt, :] = slist[0][trt][i, :]
tmpmt2[trt, :] = slist1[0][trt][i, :]
mmfinal[i, :] = np.matmul(indwt.transpose(), tmpmt1)
mmfinal1[i, :] = np.matmul(indwt.transpose(), tmpmt2)
return mmfinal, mmfinal1
def writechrzyg(covtmpx, chrinterest, chrm, trtnam, stdsim):
"""Write matrices to file (zygote)."""
if isinstance(chrinterest, str):
if chrinterest == 'all':
chrfile1 = "{}/Sim mat zygotes {} chrm {}.npy".format(
os.getcwd(), trtnam, chrm) # output
|
np.save(chrfile1, covtmpx)
|
numpy.save
|
# List of function for the on-going p-value simulation #
import pandas as pd
import random
from matplotlib import pyplot as plt
import seaborn as sn
import numpy as np
import scipy.stats as stats
from scipy.stats import norm
from numpy import std, mean, sqrt
from scipy.stats import t
def cohen_d(x,y):
nx = len(x)
ny = len(y)
dof = nx + ny - 2
return (mean(x) - mean(y)) / sqrt(((nx-1)*std(x, ddof=1) ** 2 + (ny-1)*
|
std(y, ddof=1)
|
numpy.std
|
"""
Natively reading ODIM H5 radar files in Python.
@title: pyodim
@author: <NAME> <<EMAIL>>
@institutions: Bureau of Meteorology and Monash University.
@creation: 21/01/2020
@date: 19/02/2021
.. autosummary::
:toctree: generated/
_to_str
buffer
cartesian_to_geographic
check_nyquist
coord_from_metadata
field_metadata
generate_timestamp
get_dataset_metadata
get_root_metadata
radar_coordinates_to_xyz
read_odim_slice
read_odim
"""
import datetime
import traceback
from typing import Dict, List, Tuple
import dask
import h5py
import pyproj
import pandas as pd
import numpy as np
import xarray as xr
def _to_str(t: bytes) -> str:
"""
Transform binary into string.
"""
return t.decode("utf-8")
def buffer(func):
"""
Decorator to catch and kill error message. Almost want to name the function
dont_fail.
"""
def wrapper(*args, **kwargs):
try:
rslt = func(*args, **kwargs)
except Exception:
traceback.print_exc()
rslt = None
return rslt
return wrapper
def cartesian_to_geographic(x: np.ndarray, y: np.ndarray, lon0: float, lat0: float) -> Tuple[np.ndarray, np.ndarray]:
"""
Transform cartesian coordinates to lat/lon using the Azimuth Equidistant
projection.
Parameters:
===========
x: ndarray
x-axis cartesian coordinates.
y: ndarray
y-axis cartesian coordinates. Same dimension as x
lon0: float
Radar site longitude.
lat0: float
Radar site latitude.
Returns:
lon: ndarray
Longitude of each gate.
lat: ndarray
Latitude of each gate.
"""
georef = pyproj.Proj(f"+proj=aeqd +lon_0={lon0} +lat_0={lat0} +ellps=WGS84")
lon, lat = georef(x, y, inverse=True)
lon = lon.astype(np.float32)
lat = lat.astype(np.float32)
return lon, lat
@buffer
def check_nyquist(dset: xr.Dataset) -> None:
"""
Check if the dataset Nyquist velocity corresponds to the PRF information.
"""
wavelength = dset.attrs["wavelength"]
prf = dset.attrs["highprf"]
nyquist = dset.attrs["NI"]
ny_int = 1e-2 * prf * wavelength / 4
assert
|
np.abs(nyquist - ny_int)
|
numpy.abs
|
# -*- coding: utf-8 -*-
# BCDI: tools for pre(post)-processing Bragg coherent X-ray diffraction imaging data
# (c) 07/2017-06/2019 : CNRS UMR 7344 IM2NP
# (c) 07/2019-05/2021 : DESY PHOTON SCIENCE
# (c) 06/2021-present : DESY CFEL
# authors:
# <NAME>, <EMAIL>
"""Functions related to data postprocessing after phase retrieval."""
from numbers import Number, Real
from math import pi
import numpy as np
from numpy.fft import fftn, fftshift, ifftn, ifftshift
import scipy
import matplotlib.pyplot as plt
from scipy.ndimage.measurements import center_of_mass
from scipy.interpolate import RegularGridInterpolator
from scipy.stats import multivariate_normal
from scipy.stats import pearsonr
import gc
from ..graph import graph_utils as gu
from ..utils import image_registration as reg
from ..utils import utilities as util
from ..utils import validation as valid
def align_obj(
reference_obj,
obj,
method="modulus",
support_threshold=None,
precision=1000,
debugging=False,
):
"""
Align two arrays using dft registration and subpixel shift.
:param reference_obj: 3D array, reference complex object
:param obj: 3D array, complex density to average with
:param method: 'modulus', 'support' or 'skip'. Object to use for the determination
of the shift. If 'support', the parameter 'support_threshold' must also be
provided since the binary support is defined by thresholding the normalized
modulus.
:param support_threshold: all points where the normalized modulus is larger than
this value will be set to 1 in the support.
:param precision: precision for the DFT registration in 1/pixel
:param debugging: set to True to see plots
:type debugging: bool
:return: the aligned array
"""
if obj.ndim != 3 or reference_obj.ndim != 3:
raise ValueError("reference_obj and obj should be 3D arrays")
if obj.shape != reference_obj.shape:
print(
"reference_obj and obj do not have the same shape\n" " - reference_obj is ",
reference_obj.shape,
" - obj is ",
obj.shape,
)
print("crop/pad obj")
obj = util.crop_pad(array=obj, output_shape=reference_obj.shape)
# calculate the shift between the two arrays
if method == "modulus":
shiftz, shifty, shiftx = reg.getimageregistration(
abs(reference_obj), abs(obj), precision=precision
)
elif method == "support":
ref_support = np.zeros(reference_obj.shape)
ref_support[
abs(reference_obj) > support_threshold * abs(reference_obj).max()
] = 1
support = np.zeros(reference_obj.shape)
support[abs(obj) > support_threshold * abs(obj).max()] = 1
shiftz, shifty, shiftx = reg.getimageregistration(
ref_support, support, precision=precision
)
if debugging:
gu.multislices_plot(
abs(ref_support), sum_frames=False, title="Reference support"
)
gu.multislices_plot(
abs(support), sum_frames=False, title="Support before alignement"
)
del ref_support, support
else: # 'skip'
print("\nSkipping alignment")
print(
"\tPearson correlation coefficient = {0:.3f}".format(
pearsonr(
np.ndarray.flatten(abs(reference_obj)), np.ndarray.flatten(abs(obj))
)[0]
)
)
return obj
# align obj using subpixel shift
new_obj = reg.subpixel_shift(obj, shiftz, shifty, shiftx) # keep the complex output
print(
"\tShift calculated from dft registration: (",
str("{:.2f}".format(shiftz)),
",",
str("{:.2f}".format(shifty)),
",",
str("{:.2f}".format(shiftx)),
") pixels",
)
print(
"\tPearson correlation coefficient = {0:.3f}".format(
pearsonr(
np.ndarray.flatten(abs(reference_obj)), np.ndarray.flatten(abs(new_obj))
)[0]
)
)
if debugging:
gu.multislices_plot(
abs(reference_obj), sum_frames=True, title="Reference object"
)
gu.multislices_plot(abs(new_obj), sum_frames=True, title="Aligned object")
return new_obj
def apodize(amp, phase, initial_shape, window_type, debugging=False, **kwargs):
"""
Apodize the complex array based on the window of the same shape.
:param amp: 3D array, amplitude before apodization
:param phase: 3D array, phase before apodization
:param initial_shape: shape of the FFT used for phasing
:param window_type: window filtering function, 'normal' or 'tukey' or 'blackman'
:param debugging: set to True to see plots
:type debugging: bool
:param kwargs:
- for the normal distribution: 'sigma' and 'mu' of the 3d multivariate normal
distribution, tuples of 3 floats
- for the Tuckey window: 'alpha' (shape parameter) of the 3d Tukey window,
tuple of 3 floats
- 'is_orthogonal': True if the data is in an orthonormal frame. Used for defining
default plot labels.
:return: filtered amplitude, phase of the same shape as myamp
"""
# check and load kwargs
valid.valid_kwargs(
kwargs=kwargs,
allowed_kwargs={"sigma", "mu", "alpha", "is_orthogonal"},
name="postprocessing_utils.apodize",
)
sigma = kwargs.get("sigma")
mu = kwargs.get("mu")
alpha = kwargs.get("alpha")
is_orthogonal = kwargs.get("is_orthogonal", False)
if amp.ndim != 3 or phase.ndim != 3:
raise ValueError("amp and phase should be 3D arrays")
if amp.shape != phase.shape:
raise ValueError(
"amp and phase must have the same shape\n" "amp is ",
amp.shape,
" while phase is ",
phase.shape,
)
# calculate the diffraction pattern of the reconstructed object
nb_z, nb_y, nb_x = amp.shape
nbz, nby, nbx = initial_shape
myobj = util.crop_pad(amp * np.exp(1j * phase), (nbz, nby, nbx))
del amp, phase
gc.collect()
if debugging:
gu.multislices_plot(
array=abs(myobj),
sum_frames=False,
plot_colorbar=True,
title="modulus before apodization",
reciprocal_space=False,
is_orthogonal=is_orthogonal,
scale="linear",
)
my_fft = fftshift(fftn(myobj))
del myobj
gc.collect()
fftmax = abs(my_fft).max()
print("Max FFT=", fftmax)
if debugging:
gu.multislices_plot(
array=abs(my_fft),
sum_frames=False,
plot_colorbar=True,
title="diffraction amplitude before apodization",
reciprocal_space=True,
is_orthogonal=is_orthogonal,
scale="log",
)
if window_type == "normal":
print("Apodization using a 3d multivariate normal window")
sigma = sigma or np.array([0.3, 0.3, 0.3])
mu = mu or np.array([0.0, 0.0, 0.0])
grid_z, grid_y, grid_x = np.meshgrid(
np.linspace(-1, 1, nbz),
np.linspace(-1, 1, nby),
np.linspace(-1, 1, nbx),
indexing="ij",
)
covariance = np.diag(sigma ** 2)
window = multivariate_normal.pdf(
np.column_stack([grid_z.flat, grid_y.flat, grid_x.flat]),
mean=mu,
cov=covariance,
)
del grid_z, grid_y, grid_x
gc.collect()
window = window.reshape((nbz, nby, nbx))
elif window_type == "tukey":
print("Apodization using a 3d Tukey window")
alpha = alpha or np.array([0.5, 0.5, 0.5])
window = tukey_window(initial_shape, alpha=alpha)
elif window_type == "blackman":
print("Apodization using a 3d Blackman window")
window = blackman_window(initial_shape)
else:
raise ValueError("Invalid window type")
my_fft = np.multiply(my_fft, window)
del window
gc.collect()
my_fft = my_fft * fftmax / abs(my_fft).max()
print("Max apodized FFT after normalization =", abs(my_fft).max())
if debugging:
gu.multislices_plot(
array=abs(my_fft),
sum_frames=False,
plot_colorbar=True,
title="diffraction amplitude after apodization",
reciprocal_space=True,
is_orthogonal=is_orthogonal,
scale="log",
)
myobj = ifftn(ifftshift(my_fft))
del my_fft
gc.collect()
if debugging:
gu.multislices_plot(
array=abs(myobj),
sum_frames=False,
plot_colorbar=True,
title="modulus after apodization",
reciprocal_space=False,
is_orthogonal=is_orthogonal,
scale="linear",
)
myobj = util.crop_pad(
myobj, (nb_z, nb_y, nb_x)
) # return to the initial shape of myamp
return abs(myobj), np.angle(myobj)
def average_obj(
avg_obj,
ref_obj,
obj,
support_threshold=0.25,
correlation_threshold=0.90,
aligning_option="dft",
width_z=None,
width_y=None,
width_x=None,
method="reciprocal_space",
debugging=False,
**kwargs,
):
"""
Average two reconstructions after aligning it.
Alignment is processed only if their cross-correlation is larger than the parameter
correlation_threshold.
:param avg_obj: 3D array, average complex density
:param ref_obj: 3D array, reference complex object
:param obj: 3D array, complex density to average with
:param support_threshold: for support definition
:param correlation_threshold: minimum correlation between two dataset to average
them
:param aligning_option: 'com' for center of mass, 'dft' for dft registration and
subpixel shift
:param width_z: size of the area to plot in z (axis 0), centered on the middle of
the initial array
:param width_y: size of the area to plot in y (axis 1), centered on the middle of
the initial array
:param width_x: size of the area to plot in x (axis 2), centered on the middle of
the initial array
:param method: 'real_space' or 'reciprocal_space', in which space the average will
be performed
:param debugging: set to True to see plots
:type debugging: bool
:param kwargs:
- 'reciprocal_space': True if the object is in reciprocal space
- 'is_orthogonal': True if the data is in an orthonormal frame. Used for defining
default plot labels.
:return: the average complex density
"""
# check and load kwargs
valid.valid_kwargs(
kwargs=kwargs,
allowed_kwargs={"reciprocal_space", "is_orthogonal"},
name="postprocessing_utils.average_obj",
)
reciprocal_space = kwargs.get("reciprocal_space", False)
is_orthogonal = kwargs.get("is_orthogonal", False)
if obj.ndim != 3 or avg_obj.ndim != 3 or ref_obj.ndim != 3:
raise ValueError("avg_obj, ref_obj and obj should be 3D arrays")
if obj.shape != avg_obj.shape or obj.shape != ref_obj.shape:
raise ValueError(
"avg_obj, ref_obj and obj must have the same shape\n" "avg_obj is ",
avg_obj.shape,
" - ref_obj is ",
ref_obj.shape,
" - obj is ",
obj.shape,
)
nbz, nby, nbx = obj.shape
avg_flag = 0
if avg_obj.sum() == 0:
avg_obj = ref_obj
if debugging:
gu.multislices_plot(
abs(avg_obj),
width_z=width_z,
width_y=width_y,
width_x=width_x,
plot_colorbar=True,
sum_frames=True,
title="Reference object",
reciprocal_space=reciprocal_space,
is_orthogonal=is_orthogonal,
)
else:
myref_support = np.zeros((nbz, nby, nbx))
myref_support[abs(ref_obj) > support_threshold * abs(ref_obj).max()] = 1
my_support = np.zeros((nbz, nby, nbx))
my_support[abs(obj) > support_threshold * abs(obj).max()] = 1
avg_piz, avg_piy, avg_pix = center_of_mass(abs(myref_support))
piz, piy, pix = center_of_mass(abs(my_support))
offset_z = avg_piz - piz
offset_y = avg_piy - piy
offset_x = avg_pix - pix
print(
"center of mass offset with reference object: (",
str("{:.2f}".format(offset_z)),
",",
str("{:.2f}".format(offset_y)),
",",
str("{:.2f}".format(offset_x)),
") pixels",
)
if aligning_option == "com":
# re-sample data on a new grid based on COM shift of support
old_z = np.arange(-nbz // 2, nbz // 2)
old_y = np.arange(-nby // 2, nby // 2)
old_x = np.arange(-nbx // 2, nbx // 2)
myz, myy, myx = np.meshgrid(old_z, old_y, old_x, indexing="ij")
new_z = myz + offset_z
new_y = myy + offset_y
new_x = myx + offset_x
del myx, myy, myz
rgi = RegularGridInterpolator(
(old_z, old_y, old_x),
obj,
method="linear",
bounds_error=False,
fill_value=0,
)
new_obj = rgi(
np.concatenate(
(
new_z.reshape((1, new_z.size)),
new_y.reshape((1, new_z.size)),
new_x.reshape((1, new_z.size)),
)
).transpose()
)
new_obj = new_obj.reshape((nbz, nby, nbx)).astype(obj.dtype)
else:
# dft registration and subpixel shift (see Matlab code)
shiftz, shifty, shiftx = reg.getimageregistration(
abs(ref_obj), abs(obj), precision=1000
)
new_obj = reg.subpixel_shift(
obj, shiftz, shifty, shiftx
) # keep the complex output here
print(
"Shift calculated from dft registration: (",
str("{:.2f}".format(shiftz)),
",",
str("{:.2f}".format(shifty)),
",",
str("{:.2f}".format(shiftx)),
") pixels",
)
new_obj = new_obj / abs(new_obj).max() # renormalize
correlation = pearsonr(
np.ndarray.flatten(abs(ref_obj[np.nonzero(myref_support)])),
np.ndarray.flatten(abs(new_obj[np.nonzero(myref_support)])),
)[0]
if correlation < correlation_threshold:
print(
"pearson cross-correlation=",
correlation,
"too low, skip this reconstruction",
)
else:
print(
"pearson-correlation=",
correlation,
", average with this reconstruction",
)
if debugging:
myfig, _, _ = gu.multislices_plot(
abs(new_obj),
width_z=width_z,
width_y=width_y,
width_x=width_x,
sum_frames=True,
plot_colorbar=True,
title="Aligned object",
reciprocal_space=reciprocal_space,
is_orthogonal=is_orthogonal,
)
myfig.text(
0.60,
0.30,
"pearson-correlation = " + str("{:.4f}".format(correlation)),
size=20,
)
if method == "real_space":
avg_obj = avg_obj + new_obj
elif method == "reciprocal_space":
avg_obj = ifftn(fftn(avg_obj) + fftn(obj))
else:
raise ValueError('method should be "real_space" or "reciprocal_space"')
avg_flag = 1
if debugging:
gu.multislices_plot(
abs(avg_obj),
plot_colorbar=True,
width_z=width_z,
width_y=width_y,
width_x=width_x,
sum_frames=True,
title="New averaged object",
reciprocal_space=reciprocal_space,
is_orthogonal=is_orthogonal,
)
return avg_obj, avg_flag
def blackman_window(shape, normalization=1):
"""
Create a 3d Blackman window based on shape.
:param shape: tuple, shape of the 3d window
:param normalization: value of the integral of the backman window
:return: the 3d Blackman window
"""
nbz, nby, nbx = shape
array_z = np.blackman(nbz)
array_y = np.blackman(nby)
array_x = np.blackman(nbx)
blackman2 = np.ones((nbz, nby))
blackman3 = np.ones((nbz, nby, nbx))
for idz in range(nbz):
blackman2[idz, :] = array_z[idz] * array_y
for idy in range(nby):
blackman3[idz, idy] = blackman2[idz, idy] * array_x
blackman3 = blackman3 / blackman3.sum() * normalization
return blackman3
def bragg_temperature(
spacing,
reflection,
spacing_ref=None,
temperature_ref=None,
use_q=False,
material=None,
):
"""
Calculate the temperature from Bragg peak position.
:param spacing: q or planar distance, in inverse angstroms or angstroms
:param reflection: measured reflection, e.g. np.array([1, 1, 1])
:param spacing_ref: reference spacing at known temperature
(include substrate-induced strain)
:param temperature_ref: in K, known temperature for the reference spacing
:param use_q: set to True to use q, False to use planar distance
:type use_q: bool
:param material: at the moment only 'Pt'
:return: calculated temperature
"""
print("\n")
if material == "Pt":
# reference values for Pt: temperature in K, thermal expansion x 10^6 in 1/K,
# lattice parameter in angstroms
expansion_data = np.array(
[
[100, 6.77, 3.9173],
[110, 7.10, 3.9176],
[120, 7.37, 3.9179],
[130, 7.59, 3.9182],
[140, 7.78, 3.9185],
[150, 7.93, 3.9188],
[160, 8.07, 3.9191],
[180, 8.29, 3.9198],
[200, 8.46, 3.9204],
[220, 8.59, 3.9211],
[240, 8.70, 3.9218],
[260, 8.80, 3.9224],
[280, 8.89, 3.9231],
[293.15, 8.93, 3.9236],
[300, 8.95, 3.9238],
[400, 9.25, 3.9274],
[500, 9.48, 3.9311],
[600, 9.71, 3.9349],
[700, 9.94, 3.9387],
[800, 10.19, 3.9427],
[900, 10.47, 3.9468],
[1000, 10.77, 3.9510],
[1100, 11.10, 3.9553],
[1200, 11.43, 3.9597],
]
)
if spacing_ref is None:
print("Using the reference spacing of Platinum")
spacing_ref = 3.9236 / np.linalg.norm(reflection) # angstroms
if temperature_ref is None:
temperature_ref = 293.15 # K
else:
raise ValueError('Only "Pt" available for temperature estimation')
if use_q:
spacing = 2 * np.pi / spacing # go back to distance
spacing_ref = 2 * np.pi / spacing_ref # go back to distance
spacing = spacing * np.linalg.norm(reflection) # go back to lattice constant
spacing_ref = spacing_ref * np.linalg.norm(
reflection
) # go back to lattice constant
print(
"Reference spacing at",
temperature_ref,
"K =",
str("{:.4f}".format(spacing_ref)),
"angstroms",
)
print(
"Spacing =",
str("{:.4f}".format(spacing)),
"angstroms using reflection",
reflection,
)
# fit the experimental spacing with non corrected platinum curve
myfit = np.poly1d(np.polyfit(expansion_data[:, 2], expansion_data[:, 0], 3))
print("Temperature without offset correction=", int(myfit(spacing) - 273.15), "C")
# find offset for platinum reference curve
myfit = np.poly1d(np.polyfit(expansion_data[:, 0], expansion_data[:, 2], 3))
spacing_offset = (
myfit(temperature_ref) - spacing_ref
) # T in K, spacing in angstroms
print("Spacing offset =", str("{:.4f}".format(spacing_offset)), "angstroms")
# correct the platinum reference curve for the offset
platinum_offset = np.copy(expansion_data)
platinum_offset[:, 2] = platinum_offset[:, 2] - spacing_offset
myfit = np.poly1d(np.polyfit(platinum_offset[:, 2], platinum_offset[:, 0], 3))
mytemp = int(myfit(spacing) - 273.15)
print("Temperature with offset correction=", mytemp, "C")
return mytemp
def calc_coordination(
support,
kernel=np.ones((3, 3, 3)),
width_z=None,
width_y=None,
width_x=None,
debugging=False,
):
"""
Calculate the coordination number of voxels in a support (numbe of neighbours).
:param support: 3D support array
:param kernel: kernel used for convolution with the support
:param width_z: size of the area to plot in z (axis 0), centered on the middle
of the initial array
:param width_y: size of the area to plot in y (axis 1), centered on the middle
of the initial array
:param width_x: size of the area to plot in x (axis 2), centered on the middle
of the initial array
:param debugging: set to True to see plots
:type debugging: bool
:return: the coordination matrix
"""
from scipy.signal import convolve
if support.ndim != 3:
raise ValueError("Support should be a 3D array")
mycoord = np.rint(convolve(support, kernel, mode="same"))
mycoord = mycoord.astype(int)
if debugging:
gu.multislices_plot(
support,
width_z=width_z,
width_y=width_y,
width_x=width_x,
vmin=0,
is_orthogonal=True,
reciprocal_space=False,
title="Input support",
)
gu.multislices_plot(
mycoord,
plot_colorbar=True,
width_z=width_z,
width_y=width_y,
width_x=width_x,
vmin=0,
is_orthogonal=True,
reciprocal_space=False,
title="Coordination matrix",
)
return mycoord
def center_com(array, debugging=False, **kwargs):
"""
Center array based on center_of_mass(abs(array)) using pixel shift.
:param array: 3D array to be centered based on the center of mass of abs(array)
:param debugging: boolean, True to see plots
:param kwargs:
- width_z: size of the area to plot in z (axis 0), centered on the middle
of the initial array
- width_y: size of the area to plot in y (axis 1), centered on the middle
of the initial array
- width_x: size of the area to plot in x (axis 2), centered on the middle
of the initial array
:return: array centered by pixel shift
"""
#########################
# check and load kwargs #
#########################
valid.valid_kwargs(
kwargs=kwargs, allowed_kwargs={"width_z", "width_y", "width_x"}, name="kwargs"
)
width_z = kwargs.get("width_z")
valid.valid_item(
value=width_z,
allowed_types=int,
min_excluded=0,
allow_none=True,
name="width_z",
)
width_y = kwargs.get("width_y")
valid.valid_item(
value=width_y,
allowed_types=int,
min_excluded=0,
allow_none=True,
name="width_y",
)
width_x = kwargs.get("width_x")
valid.valid_item(
value=width_x,
allowed_types=int,
min_excluded=0,
allow_none=True,
name="width_x",
)
#########################
# check some parameters #
#########################
if array.ndim != 3:
raise ValueError("array should be a 3D array")
#########################################
# find the offset of the center of mass #
#########################################
nbz, nby, nbx = array.shape
piz, piy, pix = center_of_mass(abs(array))
offset_z = int(np.rint(nbz / 2.0 - piz))
offset_y = int(np.rint(nby / 2.0 - piy))
offset_x = int(np.rint(nbx / 2.0 - pix))
if debugging:
gu.multislices_plot(
abs(array),
width_z=width_z,
width_y=width_y,
width_x=width_x,
title="Before COM centering",
)
print(
"center of mass at (z, y, x): (",
str("{:.2f}".format(piz)),
",",
str("{:.2f}".format(piy)),
",",
str("{:.2f}".format(pix)),
")",
)
print(
"center of mass offset: (",
offset_z,
",",
offset_y,
",",
offset_x,
") pixels",
)
#####################
# center the object #
#####################
array = np.roll(array, (offset_z, offset_y, offset_x), axis=(0, 1, 2))
if debugging:
gu.multislices_plot(
abs(array),
width_z=width_z,
width_y=width_y,
width_x=width_x,
title="After COM centering",
)
return array
def center_max(array, debugging=False, **kwargs):
"""
Center array based on max(abs(array)) using pixel shift.
:param array: 3D array to be centered based on max(abs(array))
:param debugging: boolean, True to see plots
:param kwargs:
- width_z: size of the area to plot in z (axis 0), centered on the middle
of the initial array
- width_y: size of the area to plot in y (axis 1), centered on the middle
of the initial array
- width_x: size of the area to plot in x (axis 2), centered on the middle
of the initial array
:return: array centered by pixel shift
"""
#########################
# check and load kwargs #
#########################
valid.valid_kwargs(
kwargs=kwargs, allowed_kwargs={"width_z", "width_y", "width_x"}, name="kwargs"
)
width_z = kwargs.get("width_z")
valid.valid_item(
value=width_z,
allowed_types=int,
min_excluded=0,
allow_none=True,
name="width_z",
)
width_y = kwargs.get("width_y")
valid.valid_item(
value=width_y,
allowed_types=int,
min_excluded=0,
allow_none=True,
name="width_y",
)
width_x = kwargs.get("width_x")
valid.valid_item(
value=width_x,
allowed_types=int,
min_excluded=0,
allow_none=True,
name="width_x",
)
#########################
# check some parameters #
#########################
if array.ndim != 3:
raise ValueError("array should be a 3D array")
##################################################################
# find the offset of the max relative to the center of the array #
##################################################################
nbz, nby, nbx = array.shape
piz, piy, pix = np.unravel_index(abs(array).argmax(), array.shape)
offset_z = int(np.rint(nbz / 2.0 - piz))
offset_y = int(np.rint(nby / 2.0 - piy))
offset_x = int(np.rint(nbx / 2.0 - pix))
if debugging:
gu.multislices_plot(
abs(array),
width_z=width_z,
width_y=width_y,
width_x=width_x,
title="Before max centering",
)
print("Max at (z, y, x): (", piz, ",", piy, ",", pix, ")")
print("Max offset: (", offset_z, ",", offset_y, ",", offset_x, ") pixels")
#####################
# center the object #
#####################
array = np.roll(array, (offset_z, offset_y, offset_x), axis=(0, 1, 2))
if debugging:
gu.multislices_plot(
abs(array),
width_z=width_z,
width_y=width_y,
width_x=width_x,
title="After max centering",
)
return array
def filter_3d(
array, filter_name="gaussian_highpass", kernel_length=21, debugging=False, **kwargs
):
"""
Apply a filter to the array by convoluting with a filtering kernel.
:param array: 2D or 3D array to be filtered
:param filter_name: name of the filter, 'gaussian', 'gaussian_highpass'
:param kernel_length: length in pixels of the filtering kernel
:param debugging: True to see a plot of the kernel
:param kwargs:
- 'sigma': sigma of the gaussian kernel
"""
from scipy.signal import convolve
# check and load kwargs
valid.valid_kwargs(
kwargs=kwargs, allowed_kwargs={"sigma"}, name="postprocessing_utils.filter_3d"
)
sigma = kwargs.get("sigma")
ndim = array.ndim
if ndim not in {2, 3}:
raise ValueError("data should be a 2D or a 3D array")
if filter_name == "gaussian_highpass":
sigma = sigma or 3
kernel = gaussian_kernel(
ndim=ndim, kernel_length=kernel_length, sigma=sigma, debugging=debugging
)
return array - convolve(array, kernel, mode="same")
if filter_name == "gaussian":
sigma = sigma or 0.5
kernel = gaussian_kernel(
ndim=ndim, kernel_length=kernel_length, sigma=sigma, debugging=debugging
)
return convolve(array, kernel, mode="same")
raise ValueError("Only the gaussian_kernel is implemented up to now.")
def find_bulk(
amp,
support_threshold,
method="threshold",
width_z=None,
width_y=None,
width_x=None,
debugging=False,
):
"""
Isolate the inner part of the crystal from the non-physical surface.
:param amp: 3D array, reconstructed object amplitude
:param support_threshold: threshold for isosurface determination
:param method: 'threshold' or 'defect'. If 'defect', removes layer by layer using
the coordination number.
:param width_z: size of the area to plot in z (axis 0), centered on the middle
of the initial array
:param width_y: size of the area to plot in y (axis 1), centered on the middle
of the initial array
:param width_x: size of the area to plot in x (axis 2), centered on the middle
of the initial array
:param debugging: set to True to see plots
:type debugging: bool
:return: the support corresponding to the bulk
"""
if amp.ndim != 3:
raise ValueError("amp should be a 3D array")
nbz, nby, nbx = amp.shape
max_amp = abs(amp).max()
support = np.ones((nbz, nby, nbx))
if method == "threshold":
support[abs(amp) < support_threshold * max_amp] = 0
else:
support[abs(amp) < 0.05 * max_amp] = 0 # predefine a larger support
mykernel = np.ones((9, 9, 9))
mycoordination_matrix = calc_coordination(
support, kernel=mykernel, debugging=debugging
)
outer = np.copy(mycoordination_matrix)
outer[np.nonzero(outer)] = 1
if mykernel.shape == np.ones((9, 9, 9)).shape:
outer[
mycoordination_matrix > 300
] = 0 # start with a larger object, the mean surface amplitude is ~ 5%
else:
raise ValueError("Kernel not yet implemented")
outer[mycoordination_matrix == 0] = 1 # corresponds to outside of the crystal
if debugging:
gu.multislices_plot(
outer,
width_z=width_z,
width_y=width_y,
width_x=width_x,
vmin=0,
vmax=1,
title="Outer matrix",
)
############################################################################
# remove layer by layer until the correct isosurface is reached on average #
############################################################################
nb_voxels = 1 # initialize this counter which corresponds to
# the number of voxels not included in outer
idx = 0
# is larger than mythreshold
while nb_voxels > 0: # nb of voxels not included in outer
# first step: find the first underlayer
mycoordination_matrix = calc_coordination(
outer, kernel=mykernel, debugging=debugging
)
surface = np.copy(mycoordination_matrix)
surface[np.nonzero(surface)] = 1
surface[mycoordination_matrix > 389] = 0 # remove part from outer 389
outer[
mycoordination_matrix > 389
] = 1 # include points left over by the coordination number selection
surface[mycoordination_matrix < 362] = 0 # remove part from bulk 311
# below is to exclude from surface the frame outer part
surface[0:5, :, :] = 0
surface[:, 0:5, :] = 0
surface[:, :, 0:5] = 0
surface[nbz - 6 : nbz, :, :] = 0
surface[:, nby - 6 : nby, :] = 0
surface[:, :, nbx - 6 : nbx] = 0
if debugging:
gu.multislices_plot(
surface,
width_z=width_z,
width_y=width_y,
width_x=width_x,
vmin=0,
vmax=1,
title="Surface matrix",
)
# second step: calculate the % of voxels from that layer whose amplitude
# is lower than support_threshold
nb_voxels = surface[np.nonzero(surface)].sum()
keep_voxels = surface[abs(amp) >= support_threshold * max_amp].sum()
voxels_counter = (
keep_voxels / nb_voxels
) # % of voxels whose amplitude is larger than support_threshold
mean_amp = np.mean(amp[np.nonzero(surface)].flatten()) / max_amp
print(
"number of surface voxels =",
nb_voxels,
" , % of surface voxels above threshold =",
str("{:.2f}".format(100 * voxels_counter)),
"% , mean surface amplitude =",
mean_amp,
)
if mean_amp < support_threshold:
outer[np.nonzero(surface)] = 1
idx = idx + 1
else:
print("Surface of object reached after", idx, "iterations")
break
support_defect = np.ones((nbz, nby, nbx)) - outer
support = np.ones((nbz, nby, nbx))
support[abs(amp) < support_threshold * max_amp] = 0
# add voxels detected by support_defect
support[np.nonzero(support_defect)] = 1
return support
def find_crop_center(array_shape, crop_shape, pivot):
"""
Find the position of the center of the cropping window.
It finds the closest voxel to pivot which allows to crop an array of array_shape to
crop_shape.
:param array_shape: initial shape of the array
:type array_shape: tuple
:param crop_shape: final shape of the array
:type crop_shape: tuple
:param pivot: position on which the final region of interest dhould be centered
(center of mass of the Bragg peak)
:type pivot: tuple
:return: the voxel position closest to pivot which allows cropping to the defined
shape.
"""
valid.valid_container(
array_shape,
container_types=(tuple, list, np.ndarray),
min_length=1,
item_types=int,
name="array_shape",
)
ndim = len(array_shape)
valid.valid_container(
crop_shape,
container_types=(tuple, list, np.ndarray),
length=ndim,
item_types=int,
name="crop_shape",
)
valid.valid_container(
pivot,
container_types=(tuple, list, np.ndarray),
length=ndim,
item_types=int,
name="pivot",
)
crop_center = np.empty(ndim)
for idx, dim in enumerate(range(ndim)):
if max(0, pivot[idx] - crop_shape[idx] // 2) == 0:
# not enough range on this side of the com
crop_center[idx] = crop_shape[idx] // 2
else:
if (
min(array_shape[idx], pivot[idx] + crop_shape[idx] // 2)
== array_shape[idx]
):
# not enough range on this side of the com
crop_center[idx] = array_shape[idx] - crop_shape[idx] // 2
else:
crop_center[idx] = pivot[idx]
crop_center = list(map(lambda x: int(x), crop_center))
return crop_center
def find_datarange(array, plot_margin=10, amplitude_threshold=0.1, keep_size=False):
"""
Find the range where data is larger than a threshold.
It finds the meaningful range of the array where it is larger than the threshold, in
order to reduce the memory consumption in latter processing. The range can be
larger than the initial data size, which then will need to be padded.
:param array: the complex 3D reconstruction
:param plot_margin: user-defined margin to add to the minimum range of the data
:param amplitude_threshold: threshold used to define a support from the amplitude
:param keep_size: set to True in order to keep the dataset full size
:return:
- zrange: half size of the data range to use in the first axis (Z)
- yrange: half size of the data range to use in the second axis (Y)
- xrange: half size of the data range to use in the third axis (X)
"""
nbz, nby, nbx = array.shape
#########################
# check some parameters #
#########################
if not isinstance(array, np.ndarray):
raise TypeError("array should be a numpy ndarray")
if array.ndim != 3:
raise ValueError("array should be 3D")
if isinstance(plot_margin, Number):
plot_margin = (plot_margin,) * 3
valid.valid_container(
plot_margin,
container_types=(tuple, list, np.ndarray),
length=3,
item_types=int,
name="plot_margin",
)
valid.valid_item(
amplitude_threshold,
allowed_types=Real,
min_included=0,
name="amplitude_threshold",
)
#########################################################
# find the relevant range where the support is non-zero #
#########################################################
if keep_size:
return nbz // 2, nby // 2, nbx // 2
support = np.zeros((nbz, nby, nbx))
support[abs(array) > amplitude_threshold * abs(array).max()] = 1
z, y, x = np.meshgrid(
np.arange(0, nbz, 1), np.arange(0, nby, 1), np.arange(0, nbx, 1), indexing="ij"
)
z = z * support
min_z = min(int(np.min(z[np.nonzero(z)])), nbz - int(np.max(z[np.nonzero(z)])))
y = y * support
min_y = min(int(np.min(y[np.nonzero(y)])), nby - int(np.max(y[np.nonzero(y)])))
x = x * support
min_x = min(int(np.min(x[np.nonzero(x)])), nbx - int(np.max(x[np.nonzero(x)])))
zrange = nbz // 2 - min_z
yrange = nby // 2 - min_y
xrange = nbx // 2 - min_x
if plot_margin is not None:
zrange += plot_margin[0]
yrange += plot_margin[1]
xrange += plot_margin[2]
return zrange, yrange, xrange
def flip_reconstruction(obj, debugging=False):
"""
Calculate the conjugate object giving the same diffracted intensity as 'obj'.
:param obj: 3D reconstructed complex object
:param debugging: set to True to see plots
:type debugging: bool
:return: the flipped complex object
"""
if obj.ndim != 3:
raise ValueError("obj should be a 3D array")
flipped_obj = ifftn(ifftshift(np.conj(fftshift(fftn(obj)))))
if debugging:
gu.multislices_plot(
abs(obj),
vmin=0,
sum_frames=False,
plot_colorbar=True,
title="Initial object",
)
gu.multislices_plot(
abs(flipped_obj),
vmin=0,
sum_frames=False,
plot_colorbar=True,
title="Flipped object",
)
return flipped_obj
def gap_detector(data, mask, start_pixel, width_gap):
"""
Reproduce a detector gap in reciprocal space data and mask.
:param data: the 3D reciprocal space data
:param mask: the corresponding 3D mask
:param start_pixel: pixel number where the gap starts
:param width_gap: width of the gap in pixels
:return: data and mask array with a gap
"""
if data.ndim != 3 or mask.ndim != 3:
raise ValueError("data and mask should be 3d arrays")
if data.shape != mask.shape:
raise ValueError("data and mask should have the same shape")
data[:, :, start_pixel : start_pixel + width_gap] = 0
data[:, start_pixel : start_pixel + width_gap, :] = 0
mask[:, :, start_pixel : start_pixel + width_gap] = 1
mask[:, start_pixel : start_pixel + width_gap, :] = 1
return data, mask
def gaussian_kernel(ndim, kernel_length=21, sigma=3, debugging=False):
"""
Generate 2D or 3D Gaussian kernels.
:param ndim: number of dimensions of the kernel, 2 or 3
:param kernel_length: length in pixels of the filtering kernel
:param sigma: sigma of the gaussian pdf
:param debugging: True to see plots
:return: a 2D or 3D Gaussian kernel
"""
from scipy.stats import norm
if kernel_length % 2 == 0:
raise ValueError("kernel_length should be an even number")
half_range = kernel_length // 2
kernel_1d = norm.pdf(np.arange(-half_range, half_range + 1, 1), 0, sigma)
if ndim == 2:
kernel = np.ones((kernel_length, kernel_length))
for idy in range(kernel_length):
kernel[idy, :] = kernel_1d[idy] * kernel_1d
if debugging:
plt.figure()
plt.imshow(kernel)
plt.colorbar()
plt.title("Gaussian kernel")
plt.pause(0.1)
elif ndim == 3:
kernel_2d = np.ones((kernel_length, kernel_length))
kernel = np.ones((kernel_length, kernel_length, kernel_length))
for idz in range(kernel_length):
kernel_2d[idz, :] = kernel_1d[idz] * kernel_1d
for idy in range(kernel_length):
kernel[idz, idy] = kernel_2d[idz, idy] * kernel_1d
if debugging:
plt.figure()
plt.imshow(kernel[half_range, :, :])
plt.colorbar()
plt.title("Central slice of the Gaussian kernel")
plt.pause(0.1)
else:
raise ValueError("This function generates only 2D or 3D kernels")
return kernel
def get_opticalpath(support, direction, k, voxel_size=None, debugging=False, **kwargs):
"""
Calculate the optical path for refraction/absorption corrections in the crystal.
'k' should be in the same basis (crystal or laboratory frame) as the data. For
xrayutilities, the data is orthogonalized in crystal frame.
:param support: 3D array, support used for defining the object
:param direction: "in" or "out" , incident or diffracted wave
:param k: vector for the incident or diffracted wave depending on direction,
expressed in an orthonormal frame (without taking in to account the different
voxel size in each dimension)
:param voxel_size: tuple, actual voxel size in z, y, and x (CXI convention)
:param debugging: boolena, True to see plots
:param kwargs:
- width_z: size of the area to plot in z (axis 0), centered on the middle
of the initial array
- width_y: size of the area to plot in y (axis 1), centered on the middle
of the initial array
- width_x: size of the area to plot in x (axis 2), centered on the middle
of the initial array
:return: the optical path in nm, of the same shape as mysupport
"""
#########################
# check and load kwargs #
#########################
valid.valid_kwargs(
kwargs=kwargs, allowed_kwargs={"width_z", "width_y", "width_x"}, name="kwargs"
)
width_z = kwargs.get("width_z")
valid.valid_item(
value=width_z,
allowed_types=int,
min_excluded=0,
allow_none=True,
name="width_z",
)
width_y = kwargs.get("width_y")
valid.valid_item(
value=width_y,
allowed_types=int,
min_excluded=0,
allow_none=True,
name="width_y",
)
width_x = kwargs.get("width_x")
valid.valid_item(
value=width_x,
allowed_types=int,
min_excluded=0,
allow_none=True,
name="width_x",
)
#########################
# check some parameters #
#########################
if support.ndim != 3:
raise ValueError("support should be a 3D array")
voxel_size = voxel_size or (1, 1, 1)
if isinstance(voxel_size, Number):
voxel_size = (voxel_size,) * 3
valid.valid_container(
voxel_size,
container_types=(tuple, list),
length=3,
item_types=Real,
min_excluded=0,
name="voxel_size",
)
############################################################
# correct k for the different voxel size in each dimension #
# (k is expressed in an orthonormal basis) #
############################################################
k = [k[i] * voxel_size[i] for i in range(3)]
###################################################################
# find the extent of the object, to optimize the calculation time #
###################################################################
nbz, nby, nbx = support.shape
path = np.zeros((nbz, nby, nbx))
indices_support = np.nonzero(support)
min_z = indices_support[0].min()
max_z = indices_support[0].max() + 1 # last point not included in range()
min_y = indices_support[1].min()
max_y = indices_support[1].max() + 1 # last point not included in range()
min_x = indices_support[2].min()
max_x = indices_support[2].max() + 1 # last point not included in range()
#############################################
# normalize k, now it is in units of voxels #
#############################################
if direction == "in":
k_norm = -1 / np.linalg.norm(k) * np.asarray(k) # we will work with -k_in
else: # "out"
k_norm = 1 / np.linalg.norm(k) * np.asarray(k)
#############################################
# calculate the optical path for each voxel #
#############################################
for idz in range(min_z, max_z, 1):
for idy in range(min_y, max_y, 1):
for idx in range(min_x, max_x, 1):
stop_flag = False
counter = support[
idz, idy, idx
] # include also the pixel if it belongs to the support
pixel = np.array(
[idz, idy, idx]
) # pixel for which the optical path is calculated
# beware, the support could be 0 at some voxel inside the object also,
# but the loop should continue until it reaches the end of the box
# (min_z, max_z, min_y, max_y, min_x, max_x)
while not stop_flag:
pixel = pixel + k_norm # add unitary translation in -k_in direction
coords = np.rint(pixel)
stop_flag = True
if (
(min_z <= coords[0] <= max_z)
and (min_y <= coords[1] <= max_y)
and (min_x <= coords[2] <= max_x)
):
counter = (
counter
+ support[int(coords[0]), int(coords[1]), int(coords[2])]
)
stop_flag = False
# For each voxel, counter is the number of steps along the unitary
# k vector where the support is non zero. Now we need to convert this
# into nm using the voxel size, different in each dimension
endpoint = (
np.array([idz, idy, idx]) + counter * k_norm
) # indices of the final voxel
path[idz, idy, idx] = np.sqrt(
((np.rint(endpoint[0]) - idz) * voxel_size[0]) ** 2
+ ((np.rint(endpoint[1]) - idy) * voxel_size[1]) ** 2
+ ((np.rint(endpoint[2]) - idx) * voxel_size[2]) ** 2
)
##################
# debugging plot #
##################
if debugging:
print(
"Optical path calculation, support limits "
"(start_z, stop_z, start_y, stop_y, start_x, stop_x):"
f"{min_z}, {max_z}, {min_y}, {max_y}, {min_x}, {max_x}"
)
gu.multislices_plot(
support,
width_z=width_z,
width_y=width_y,
width_x=width_x,
vmin=0,
vmax=1,
sum_frames=False,
title="Support for optical path",
is_orthogonal=True,
reciprocal_space=False,
)
###########################################
# apply a mean filter to reduce artefacts #
###########################################
# the path should be averaged only in the support defined by the isosurface
path = mean_filter(
array=path,
support=support,
half_width=1,
title="Optical path",
debugging=debugging,
)
return path
def get_strain(
phase,
planar_distance,
voxel_size,
reference_axis="y",
extent_phase=2 * pi,
method="default",
debugging=False,
):
"""
Calculate the 3D strain array.
:param phase: 3D phase array (do not forget the -1 sign if the phasing algorithm
is python or matlab-based)
:param planar_distance: the planar distance of the material corresponding to
the measured Bragg peak
:param voxel_size: float or tuple of three floats, the voxel size of the
phase array in nm
:param reference_axis: the axis of the array along which q is aligned:
'x', 'y' or 'z' (CXI convention)
:param extent_phase: range for phase wrapping, specify it when the phase spans
over more than 2*pi
:param method: 'default' or 'defect'. If 'defect', will offset the phase
in a loop and keep the smallest value for the strain (Felix Hofmann's method 2019).
:param debugging: True to see plots
:return: the strain 3D array
"""
from bcdi.preprocessing.preprocessing_utils import wrap
if phase.ndim != 3:
raise ValueError("phase should be a 3D array")
if reference_axis not in {"x", "y", "z"}:
raise ValueError("The reference axis should be 'x', 'y' or 'z'")
if isinstance(voxel_size, Number):
voxel_size = (voxel_size,) * 3
valid.valid_container(
voxel_size,
container_types=(tuple, list),
length=3,
item_types=Real,
name="postprocessing_utils.get_strain",
min_excluded=0,
)
strain = np.inf * np.ones(phase.shape)
if method == "defect":
offsets = 2 * np.pi / 10 * np.linspace(-10, 10, num=11)
print(
"Strain method = defect, the following phase offsets will be processed:",
offsets,
)
else: # 'default'
offsets = (0,)
for offset in offsets:
# offset the phase
if method == "defect":
temp_phase = np.copy(phase)
temp_phase = temp_phase + offset
# wrap again the offseted phase
temp_phase = wrap(
obj=temp_phase, start_angle=-extent_phase / 2, range_angle=extent_phase
)
else: # no need to copy the phase, offset = 0
temp_phase = phase
# calculate the strain for this offset
if reference_axis == "x":
_, _, temp_strain = np.gradient(
planar_distance / (2 * np.pi) * temp_phase, voxel_size[2]
) # q is along x after rotating the crystal
elif reference_axis == "y":
_, temp_strain, _ = np.gradient(
planar_distance / (2 * np.pi) * temp_phase, voxel_size[1]
) # q is along y after rotating the crystal
else: # "z"
temp_strain, _, _ = np.gradient(
planar_distance / (2 * np.pi) * temp_phase, voxel_size[0]
) # q is along z after rotating the crystal
# update the strain values
strain = np.where(abs(strain) < abs(temp_strain), strain, temp_strain)
if debugging:
gu.multislices_plot(
temp_phase,
sum_frames=False,
title="Offseted phase",
vmin=-np.pi,
vmax=np.pi,
plot_colorbar=True,
is_orthogonal=True,
reciprocal_space=False,
)
gu.multislices_plot(
strain,
sum_frames=False,
title="strain",
vmin=-0.002,
vmax=0.002,
plot_colorbar=True,
is_orthogonal=True,
reciprocal_space=False,
)
return strain
def mean_filter(
array,
support,
half_width=0,
width_z=None,
width_y=None,
width_x=None,
vmin=np.nan,
vmax=np.nan,
title="Object",
debugging=False,
):
"""
Apply a mean filter to an object defined by a support.
Only voxels belonging to the object are taken into account, taking care of the
object's surface.
:param array: 3D array to be averaged
:param support: support used for averaging
:param half_width: half_width of the 2D square averaging window,
0 means no averaging, 1 is one pixel away...
:param width_z: size of the area to plot in z (axis 0), centered on the middle
of the initial array
:param width_y: size of the area to plot in y (axis 1), centered on the middle
of the initial array
:param width_x: size of the area to plot in x (axis 2), centered on the middle
of the initial array
:param vmin: real number, lower boundary for the colorbar of the plots
:param vmax: real number, higher boundary for the colorbar of the plots
:param title: str, title for the plots
:param debugging: bool, True to see plots
:return: averaged array of the same shape as the input array
"""
#########################
# check some parameters #
#########################
if not isinstance(array, np.ndarray):
raise TypeError("array should be a numpy array")
if array.ndim != 3:
raise ValueError("array should be 3D")
if not isinstance(support, np.ndarray):
raise TypeError("support should be a numpy array")
if support.shape != array.shape:
raise ValueError("the support should have the same shape as the array")
valid.valid_item(half_width, allowed_types=int, min_included=0, name="half_width")
valid.valid_container(title, container_types=str, name="title")
valid.valid_item(vmin, allowed_types=Real, name="vmin")
valid.valid_item(vmax, allowed_types=Real, name="vmax")
valid.valid_item(debugging, allowed_types=bool, name="debugging")
valid.valid_item(
value=width_z,
allowed_types=int,
min_excluded=0,
allow_none=True,
name="width_z",
)
valid.valid_item(
value=width_y,
allowed_types=int,
min_excluded=0,
allow_none=True,
name="width_y",
)
valid.valid_item(
value=width_x,
allowed_types=int,
min_excluded=0,
allow_none=True,
name="width_x",
)
#########################
# apply the mean filter #
#########################
if half_width != 0:
if debugging:
gu.multislices_plot(
array,
width_z=width_z,
width_y=width_y,
width_x=width_x,
vmin=vmin,
vmax=vmax,
title=title + " before averaging",
plot_colorbar=True,
)
gu.multislices_plot(
support,
width_z=width_z,
width_y=width_y,
width_x=width_x,
vmin=0,
vmax=1,
title="Support for averaging",
)
nonzero_pixels = np.argwhere(support != 0)
new_values = np.zeros((nonzero_pixels.shape[0], 1), dtype=array.dtype)
counter = 0
for indx in range(nonzero_pixels.shape[0]):
piz = nonzero_pixels[indx, 0]
piy = nonzero_pixels[indx, 1]
pix = nonzero_pixels[indx, 2]
tempo_support = support[
piz - half_width : piz + half_width + 1,
piy - half_width : piy + half_width + 1,
pix - half_width : pix + half_width + 1,
]
nb_points = tempo_support.sum()
temp_phase = array[
piz - half_width : piz + half_width + 1,
piy - half_width : piy + half_width + 1,
pix - half_width : pix + half_width + 1,
]
if temp_phase.size != 0:
value = temp_phase[np.nonzero(tempo_support)].sum() / nb_points
new_values[indx] = value
else:
counter = counter + 1
for indx in range(nonzero_pixels.shape[0]):
array[
nonzero_pixels[indx, 0],
nonzero_pixels[indx, 1],
nonzero_pixels[indx, 2],
] = new_values[indx]
if debugging:
gu.multislices_plot(
array,
width_z=width_z,
width_y=width_y,
width_x=width_x,
vmin=vmin,
vmax=vmax,
title=title + " after averaging",
plot_colorbar=True,
)
if counter != 0:
print("There were", counter, "voxels for which phase could not be averaged")
return array
def ortho_modes(array_stack, nb_mode=None, method="eig", verbose=False):
"""
Decompose an object into a set of orthogonal modes.
It finds modes from a N+1 dimensional array or a list/tuple of N-dimensional
arrays. The decomposition is such that the total intensity (i.e. (abs(m)**2).sum(
)) is conserved. Adapted from PyNX.
:param array_stack: the stack of modes to orthogonalize along the first dimension.
:param nb_mode: the maximum number of modes to be returned. If None,
all are returned. This is useful if nb_mode is used, and only a partial list
of modes is returned.
:param method: either 'eig' to use eigenvalue decomposition or 'svd' to use
singular value decomposition.
:param verbose: set it to True to have more printed comments
:return: an array (modes) with the same shape as given in input, but with
orthogonal modes, i.e. (mo[i]*mo[j].conj()).sum()=0 for i!=j. The modes are
sorted by decreasing norm. If nb_mode is not None, only modes up
to nb_mode will be returned.
"""
if array_stack[0].ndim != 3:
raise ValueError("A stack of 3D arrays is expected")
# array stack has the shape: (nb_arrays, L, M, N)
nb_arrays = array_stack.shape[0]
array_size = array_stack[0].size # the size of individual arrays is L x M x N
if method == "eig":
my_matrix = np.array(
[
[np.vdot(array2, array1) for array1 in array_stack]
for array2 in array_stack
]
)
# array of shape (nb_arrays,nb_arrays)
eigenvalues, eigenvectors = np.linalg.eig(
my_matrix
) # the number of eigenvalues is nb_arrays
elif method == "svd": # Singular value decomposition
my_matrix = np.reshape(array_stack, (nb_arrays, array_size))
eigenvectors, eigenvalues, vh = scipy.linalg.svd(
my_matrix, full_matrices=False, compute_uv=True
)
# my_matrix = eigenvectors x S x Vh,
# where S is a suitably shaped matrix of zeros with main diagonal s
# The shapes are (M, K) for the eigenvectors and (K, N)
# for the unitary matrix Vh where K = min(M, N)
# Here, M is the number of reconstructions nb_arrays,
# N is the size of a reconstruction array_size
else:
raise ValueError('Incorrect value for parameter "method"')
sort_indices = (
-eigenvalues
).argsort() # returns the indices that would sort eigenvalues in descending order
print("\neigenvalues", eigenvalues)
eigenvectors = eigenvectors[
:, sort_indices
] # sort eigenvectors using sort_indices, same shape as my_matrix
for idx in range(len(sort_indices)):
if eigenvectors[abs(eigenvectors[:, idx]).argmax(), idx].real < 0:
eigenvectors[:, idx] *= -1
modes = np.array(
[
sum(array_stack[i] * eigenvectors[i, j] for i in range(nb_arrays))
for j in range(nb_arrays)
]
)
# # the double nested comprehension list above is equivalent to the following code:
# modes = np.zeros(array_stack.shape, dtype=complex)
# for j in range(nb_arrays):
# temp = np.zeros(array_stack[0].shape, dtype=complex)
# for i in range(nb_arrays):
# temp += array_stack[i] * eigenvectors[i, j]
# modes[j] = temp
if verbose:
print("Orthonormal decomposition coefficients (rows)")
print(
np.array2string(
(eigenvectors.transpose()),
threshold=10,
precision=3,
floatmode="fixed",
suppress_small=True,
)
)
if nb_mode is not None:
nb_mode = min(nb_arrays, nb_mode)
else:
nb_mode = nb_arrays
weights = (
np.array([(abs(modes[i]) ** 2).sum() for i in range(nb_arrays)])
/ (abs(modes) ** 2).sum()
)
return modes[:nb_mode], eigenvectors, weights
def regrid(array, old_voxelsize, new_voxelsize):
"""
Interpolate real space data on a grid with a different voxel size.
:param array: 3D array, the object to be interpolated
:param old_voxelsize: tuple, actual voxel size in z, y, and x (CXI convention)
:param new_voxelsize: tuple, desired voxel size for the interpolation in
z, y, and x (CXI convention)
:return: obj interpolated using the new voxel sizes
"""
if array.ndim != 3:
raise ValueError("array should be a 3D array")
if isinstance(old_voxelsize, Number):
old_voxelsize = (old_voxelsize,) * 3
valid.valid_container(
old_voxelsize,
container_types=(tuple, list),
length=3,
item_types=Real,
name="postprocessing_utils.regrid",
min_excluded=0,
)
if isinstance(new_voxelsize, Number):
new_voxelsize = (new_voxelsize,) * 3
valid.valid_container(
new_voxelsize,
container_types=(tuple, list),
length=3,
item_types=Real,
name="postprocessing_utils.regrid",
min_excluded=0,
)
nbz, nby, nbx = array.shape
old_z =
|
np.arange(-nbz // 2, nbz // 2, 1)
|
numpy.arange
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
import pickle
import time
import warnings
from sklearn.utils import shuffle
# SEED = 88
warnings.filterwarnings("ignore", category=RuntimeWarning)
from matplotlib.legend_handler import HandlerLine2D
import utils
from classifier import CNNClassifier, one_hot_encoder, CONFIDENCE_THRESHOLD
from ops import *
from utils import *
import tensorflow as tf
def gradient_penalty(real, fake, f):
def interpolate(a, b):
shape = tf.concat((tf.shape(a)[0:1], tf.tile([1], [a.shape.ndims - 1])), axis=0)
alpha = tf.random_uniform(shape=shape, minval=0., maxval=1.)
inter = a + alpha * (b - a)
inter.set_shape(a.get_shape().as_list())
return inter
x = interpolate(real, fake)
_, pred, _ = f(x)
gradients = tf.gradients(pred, x)[0]
# slopes = tf.sqrt(tf.reduce_sum(tf.square(gradients), reduction_indices=range(1, x.shape.ndims)))
# gp = tf.reduce_mean((slopes - 1.)**2)
slopes = tf.sqrt(tf.reduce_sum(tf.square(gradients), axis=1))
gp = tf.reduce_mean(tf.square(slopes - 1.))
return gp
class MultiModalInfoGAN(object):
model_name = "MultiModalInfoGAN" # name for checkpoint
def __init__(self, sess, epoch, batch_size, z_dim, dataset_name, checkpoint_dir, result_dir, log_dir, sampler, seed, len_continuous_code=2, is_wgan_gp=False,
dataset_creation_order=["czcc", "czrc", "rzcc", "rzrc"], SUPERVISED=True):
print("saving to esults dir={}".format(result_dir))
np.random.seed(seed)
self.test_size = 5000
self.wgan_gp = is_wgan_gp
self.loss_list = []
self.confidence_list = []
self.sess = sess
self.dataset_name = dataset_name
self.checkpoint_dir = checkpoint_dir
self.result_dir = result_dir
self.log_dir = log_dir
self.epoch = epoch
self.batch_size = batch_size
self.sampler = sampler
self.pretrained_classifier = CNNClassifier(self.dataset_name, seed=seed)
self.dataset_creation_order = dataset_creation_order
self.SUPERVISED = SUPERVISED # if it is true, label info is directly used for code
self.dir_results = "classifier_results_seed_{}".format(seed)
# train
self.learning_rate = 0.0002
self.beta1 = 0.5
# test
self.sample_num = 64 # number of generated images to be saved
# code
self.len_discrete_code = 10 # categorical distribution (i.e. label)
self.len_continuous_code = len_continuous_code # gaussian distribution (e.g. rotation, thickness)
if dataset_name == 'mnist' or dataset_name == 'fashion-mnist':
# parameters
self.input_height = 28
self.input_width = 28
self.output_height = 28
self.output_width = 28
self.z_dim = z_dim # dimension of noise-vector
self.y_dim = self.len_discrete_code + self.len_continuous_code # dimension of code-vector (label+two features)
self.c_dim = 1
# load mnist
self.data_X, self.data_y = load_mnist(self.dataset_name)
# get number of batches for a single epoch
self.num_batches = len(self.data_X) // self.batch_size
# elif dataset_name == 'cifar10':
# print("IN CIFAR")
# # parameters
# self.input_height = 32
# self.input_width = 32
# self.output_height = 32
# self.output_width = 32
#
# self.z_dim = z_dim # dimension of noise-vector
# self.y_dim = self.len_discrete_code + self.len_continuous_code # dimension of code-vector (label+two features)
# self.c_dim = 3
# # self.data_X, self.data_y, self.test_x, self.test_labels = get_train_test_data()
# # get number of batches for a single epoch
# self.num_batches = len(self.data_X) // self.batch_size
# elif dataset_name == 'celebA':
# from data_load import preprocess_fn
# print("in celeba")
# img_paths = glob.glob('/Users/idan.a/data/celeba/*.jpg')
# self.data_pool = utils.DiskImageData(img_paths, batch_size, shape=[218, 178, 3], preprocess_fn=preprocess_fn)
# self.num_batches = len(self.data_pool) // (batch_size)
#
# # real_ipt = data_pool.batch()
# # parameters
# self.input_height = 64
# self.input_width = 64
# self.output_height = 32
# self.output_width = 32
#
# self.z_dim = 128 # dimension of noise-vector
# self.c_dim = 3
# self.len_discrete_code = 100 # categorical distribution (i.e. label)
# self.len_continuous_code = 0 # gaussian distribution (e.g. rotation, thickness)
# self.y_dim = self.len_discrete_code + self.len_continuous_code # dimension of code-vector (label+two features)
# sess = utils.session()
#
# # iteration counter
# it_cnt, update_cnt = utils.counter()
#
# sess.run(tf.global_variables_initializer())
# sess.run(it_cnt)
# sess.run(update_cnt) # get number of batches for a single epoch
self.model_dir = self.get_model_dir()
def classifier(self, x, is_training=True, reuse=False):
# Network Architecture is exactly same as in infoGAN (https://arxiv.org/abs/1606.03657)
# Architecture : (64)5c2s-(128)5c2s_BL-FC1024_BL-FC128_BL-FC12S’
# All layers except the last two layers are shared by discriminator
# Number of nodes in the last layer is reduced by half. It gives better results.
with tf.variable_scope("classifier", reuse=reuse):
net = lrelu(bn(linear(x, 64, scope='c_fc1'), is_training=is_training, scope='c_bn1'))
out_logit = linear(net, self.y_dim, scope='c_fc2')
out = tf.nn.softmax(out_logit)
return out, out_logit
def discriminator(self, x, is_training=True, reuse=True):
# Network Architecture is exactly same as in infoGAN (https://arxiv.org/abs/1606.03657)
# Architecture : (64)4c2s-(128)4c2s_BL-FC1024_BL-FC1_S
if self.wgan_gp:
with tf.variable_scope("wgan_discriminator", reuse=reuse):
net = lrelu(conv2d(x, 64, 4, 4, 2, 2, name='d_conv1'))
net = lrelu(bn(conv2d(net, 128, 4, 4, 2, 2, name='d_conv2'), is_training=is_training, scope='d_bn2'))
net = tf.reshape(net, [self.batch_size, -1])
net = lrelu(bn(linear(net, 1024, scope='d_fc3'), is_training=is_training, scope='d_bn3'))
out_logit = linear(net, 1, scope='d_fc4')
out = tf.nn.sigmoid(out_logit)
else:
with tf.variable_scope("discriminator", reuse=reuse):
net = lrelu(conv2d(x, 64, 4, 4, 2, 2, name='d_conv1'))
net = lrelu(bn(conv2d(net, 128, 4, 4, 2, 2, name='d_conv2'), is_training=is_training, scope='d_bn2'))
net = tf.reshape(net, [self.batch_size, -1])
net = lrelu(bn(linear(net, 1024, scope='d_fc3'), is_training=is_training, scope='d_bn3'))
out_logit = linear(net, 1, scope='d_fc4')
out = tf.nn.sigmoid(out_logit)
return out, out_logit, net
def generator(self, z, y, is_training=True, reuse=False):
# Network Architecture is exactly same as in infoGAN (https://arxiv.org/abs/1606.03657)
# Architecture : FC1024_BR-FC7x7x128_BR-(64)4dc2s_BR-(1)4dc2s_S
with tf.variable_scope("generator", reuse=reuse):
# merge noise and code
z = concat([z, y], 1)
net = lrelu(bn(linear(z, 1024, scope='g_fc1'), is_training=is_training, scope='g_bn1'))
net = lrelu(bn(linear(net, 128 * self.input_height // 4 * self.input_width // 4, scope='g_fc2'), is_training=is_training, scope='g_bn2'))
net = tf.reshape(net, [self.batch_size, int(self.input_height // 4), int(self.input_width // 4), 128])
net = lrelu(
bn(deconv2d(net, [self.batch_size, int(self.input_height // 2), int(self.input_width // 2), 64], 4, 4, 2, 2, name='g_dc3'), is_training=is_training, scope='g_bn3'))
out = tf.nn.sigmoid(deconv2d(net, [self.batch_size, self.input_height, self.input_width, self.c_dim], 4, 4, 2, 2, name='g_dc4'))
# out = tf.reshape(out, ztf.stack([self.batch_size, 784]))
return out
def build_model(self):
# some parameters
image_dims = [self.input_height, self.input_width, self.c_dim]
bs = self.batch_size
""" Graph Input """
# images
self.x = tf.placeholder(tf.float32, [bs] + image_dims, name='real_images')
# labels
self.y = tf.placeholder(tf.float32, [bs, self.y_dim], name='y')
# noises
self.z = tf.placeholder(tf.float32, [bs, self.z_dim], name='z')
""" Loss Function """
## 1. GAN Loss
# output of D for real images
D_real, D_real_logits, _ = self.discriminator(self.x, is_training=True, reuse=False)
# output of D for fake images
self.x_ = self.generator(self.z, self.y, is_training=True, reuse=False)
D_fake, D_fake_logits, input4classifier_fake = self.discriminator(self.x_, is_training=True, reuse=True)
# get loss for discriminator
d_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=D_real_logits, labels=tf.ones_like(D_real)))
d_loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=D_fake_logits, labels=tf.zeros_like(D_fake)))
self.d_loss = d_loss_real + d_loss_fake
# get loss for generator
self.g_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=D_fake_logits, labels=tf.ones_like(D_fake)))
if self.wgan_gp:
d_loss_real = - tf.reduce_mean(D_real_logits)
d_loss_fake = tf.reduce_mean(D_fake_logits)
self.d_loss = d_loss_real + d_loss_fake
# get loss for generator
self.g_loss = - d_loss_fake
wd = tf.reduce_mean(D_real_logits) - tf.reduce_mean(D_fake_logits)
gp = gradient_penalty(self.x, self.x_, self.discriminator)
self.d_loss = -wd + gp * 10.0
self.g_loss = -tf.reduce_mean(D_fake_logits)
## 2. Information Loss
code_fake, code_logit_fake = self.classifier(input4classifier_fake, is_training=True, reuse=False)
# discrete code : categorical
disc_code_est = code_logit_fake[:, :self.len_discrete_code]
disc_code_tg = self.y[:, :self.len_discrete_code]
q_disc_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=disc_code_est, labels=disc_code_tg))
# continuous code : gaussian
cont_code_est = code_logit_fake[:, self.len_discrete_code:]
cont_code_tg = self.y[:, self.len_discrete_code:]
q_cont_loss = tf.reduce_mean(tf.reduce_sum(tf.square(cont_code_tg - cont_code_est), axis=1))
# get information loss = P(x|c)
self.q_loss = q_disc_loss + q_cont_loss
""" Training """
# divide trainable variables into a group for D and a group for G
t_vars = tf.trainable_variables()
d_vars = [var for var in t_vars if 'd_' in var.name]
g_vars = [var for var in t_vars if 'g_' in var.name]
q_vars = [var for var in t_vars if ('d_' in var.name) or ('c_' in var.name) or ('g_' in var.name)]
# optimizers
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
self.d_optim = tf.train.AdamOptimizer(self.learning_rate, beta1=self.beta1).minimize(self.d_loss, var_list=d_vars)
self.g_optim = tf.train.AdamOptimizer(self.learning_rate * 5, beta1=self.beta1).minimize(self.g_loss, var_list=g_vars)
self.q_optim = tf.train.AdamOptimizer(self.learning_rate * 5, beta1=self.beta1).minimize(self.q_loss, var_list=q_vars)
"""" Testing """
# for test
self.fake_images = self.generator(self.z, self.y, is_training=False, reuse=True)
""" Summary """
d_loss_real_sum = tf.summary.scalar("d_loss_real", d_loss_real)
d_loss_fake_sum = tf.summary.scalar("d_loss_fake", d_loss_fake)
d_loss_sum = tf.summary.scalar("d_loss", self.d_loss)
g_loss_sum = tf.summary.scalar("g_loss", self.g_loss)
q_loss_sum = tf.summary.scalar("g_loss", self.q_loss)
q_disc_sum = tf.summary.scalar("q_disc_loss", q_disc_loss)
q_cont_sum = tf.summary.scalar("q_cont_loss", q_cont_loss)
# final summary operations
self.g_sum = tf.summary.merge([d_loss_fake_sum, g_loss_sum])
self.d_sum = tf.summary.merge([d_loss_real_sum, d_loss_sum])
self.q_sum = tf.summary.merge([q_loss_sum, q_disc_sum, q_cont_sum])
def train(self):
# initialize all variables
tf.global_variables_initializer().run()
# graph inputs for visualize training results
self.sample_z = self.sampler.get_sample(self.batch_size, self.z_dim, 10) # np.random.uniform(-1, 1,
# size=(self.batch_size, self.z_dim))
self.test_labels = np.ones([self.batch_size, self.y_dim])
if self.dataset_name != "celebA":
self.test_labels = self.data_y[0:self.batch_size]
self.test_codes = np.concatenate((self.test_labels, np.zeros([self.batch_size, self.len_continuous_code])), axis=1)
# saver to save model
self.saver = tf.train.Saver()
# summary writer
self.writer = tf.summary.FileWriter(self.log_dir + '/' + self.model_name, self.sess.graph)
# restore check-point if it exits
could_load, checkpoint_counter = self.load(self.checkpoint_dir)
if could_load:
start_epoch = (int)(checkpoint_counter / self.num_batches)
start_batch_id = checkpoint_counter - start_epoch * self.num_batches
counter = checkpoint_counter
print(" [*] Load SUCCESS")
else:
start_epoch = 0
start_batch_id = 0
counter = 1
print(" [!] Load failed...")
# loop for epoch
start_time = time.time()
for epoch in range(start_epoch, self.epoch):
# get batch data
for idx in range(start_batch_id, self.num_batches):
batch_images = self.data_X[idx * self.batch_size:(idx + 1) * self.batch_size]
batch_labels = np.random.multinomial(1, self.len_discrete_code * [float(1.0 / self.len_discrete_code)], size=[self.batch_size])
batch_codes = np.concatenate((batch_labels, np.random.uniform(-1, 1, size=(self.batch_size, self.len_continuous_code))), axis=1)
batch_z = self.sampler.get_sample(self.batch_size, self.z_dim, 10)
# update D network
_, summary_str, d_loss = self.sess.run([self.d_optim, self.d_sum, self.d_loss], feed_dict={self.x: batch_images, self.y: batch_codes, self.z: batch_z})
self.writer.add_summary(summary_str, counter)
# update G and Q network
_, summary_str_g, g_loss, _, summary_str_q, q_loss = self.sess.run([self.g_optim, self.g_sum, self.g_loss, self.q_optim, self.q_sum, self.q_loss],
feed_dict={self.x: batch_images, self.z: batch_z, self.y: batch_codes})
self.writer.add_summary(summary_str_g, counter)
self.writer.add_summary(summary_str_q, counter)
# display training status
counter += 1
print("Epoch: [%2d] [%4d/%4d] time: %4.4f, d_loss: %.8f, g_loss: %.8f" % (epoch, idx, self.num_batches, time.time() - start_time, d_loss, g_loss,))
# After an epoch, start_batch_id is set to zero
# non-zero value is only for the first epoch after loading pre-trained model
start_batch_id = 0
# save model
self.save(self.checkpoint_dir, counter)
# show temporal results
self.visualize_results(epoch)
# plotting
self.create_dataset_from_GAN()
self.save(self.checkpoint_dir, counter)
# if self.dataset_name != "celebA":
# self.plot_train_test_loss("confidence", self.confidence_list)
# Evaluation with classifier
# save model for final step
def visualize_results(self, epoch):
tot_num_samples = min(self.sample_num, self.batch_size)
image_frame_dim = int(np.floor(np.sqrt(tot_num_samples)))
""" random noise, random discrete code, fixed continuous code """
y = np.random.choice(self.len_discrete_code, self.batch_size)
y_one_hot = np.zeros((self.batch_size, self.y_dim))
y_one_hot[np.arange(self.batch_size), y] = 1
z_sample = self.sampler.get_sample(self.batch_size, self.z_dim, 10)
samples = self.sess.run(self.fake_images, feed_dict={self.z: z_sample, self.y: y_one_hot}) # samples_for_test.append(samples)
save_images(samples[:image_frame_dim * image_frame_dim, :, :, :], [image_frame_dim, image_frame_dim],
check_folder(self.result_dir + '/' + self.model_dir) + '/' + self.model_name + '_epoch%03d' % epoch + '_test_all_classes.png')
""" specified condition, random noise """
n_styles = 10 # must be less than or equal to self.batch_size
si = np.random.choice(self.batch_size, n_styles)
for l in range(self.len_discrete_code):
y = np.zeros(self.batch_size, dtype=np.int64) + l
y_one_hot = np.zeros((self.batch_size, self.y_dim))
y_one_hot[np.arange(self.batch_size), y] = 1
samples = self.sess.run(self.fake_images, feed_dict={self.z: z_sample, self.y: y_one_hot})
# save_images(samples[:image_frame_dim * image_frame_dim, :, :, :], [image_frame_dim, image_frame_dim],
# check_folder(self.result_dir + '/' + self.model_dir) + '/' + self.model_name + '_epoch%03d' % epoch + '_test_class_%d.png' % l)
samples = samples[si, :, :, :]
if l == 0:
all_samples = samples
else:
all_samples = np.concatenate((all_samples, samples), axis=0)
""" save merged images to check style-consistency """
canvas = np.zeros_like(all_samples)
for s in range(n_styles):
for c in range(self.len_discrete_code):
canvas[s * self.len_discrete_code + c, :, :, :] = all_samples[c * n_styles + s, :, :, :]
save_images(canvas, [n_styles, self.len_discrete_code],
check_folder(self.result_dir + '/' + self.model_dir) + '/' + self.model_name + '_epoch%03d' % epoch + '_test_all_classes_style_by_style.png')
""" fixed noise """
assert self.len_continuous_code == 2
c1 = np.linspace(-1, 1, image_frame_dim)
c2 = np.linspace(-1, 1, image_frame_dim)
xv, yv = np.meshgrid(c1, c2)
xv = xv[:image_frame_dim, :image_frame_dim]
yv = yv[:image_frame_dim, :image_frame_dim]
c1 = xv.flatten()
c2 = yv.flatten()
z_fixed = np.zeros([self.batch_size, self.z_dim])
for l in range(self.len_discrete_code):
y = np.zeros(self.batch_size, dtype=np.int64) + l # ones in the discrete_code idx * batch_size
y_one_hot = np.zeros((self.batch_size, self.y_dim))
y_one_hot[np.arange(self.batch_size), y] = 1
# cartesian multiplication of the two latent codes
y_one_hot[np.arange(image_frame_dim * image_frame_dim), self.len_discrete_code] = c1
y_one_hot[np.arange(image_frame_dim * image_frame_dim), self.len_discrete_code + 1] = c2
samples = self.sess.run(self.fake_images, feed_dict={self.z: z_fixed, self.y: y_one_hot})
save_images(samples[:image_frame_dim * image_frame_dim, :, :, :], [image_frame_dim, image_frame_dim],
check_folder(self.result_dir + '/' + self.model_dir) + '/' + self.model_name + '_epoch%03d' % epoch + '_test_class_c1c2_%d.png' % l)
def create_dataset_from_GAN(self, is_confidence=False):
generated_dataset = []
generated_labels = []
tot_num_samples = min(self.sample_num, self.batch_size)
image_frame_dim = int(np.floor(np.sqrt(tot_num_samples)))
c1 = np.linspace(-1, 1, image_frame_dim)
c2 =
|
np.linspace(-1, 1, image_frame_dim)
|
numpy.linspace
|
# Copyright (c) 2020-2021 impersonator.org authors (<NAME> and <NAME>). All rights reserved.
import cv2
import numpy as np
import torchvision
from typing import Union, List
import torch
from scipy.spatial.transform import Rotation as R
def compute_scaled_size(origin_size, control_size):
"""
Args:
origin_size (tuple or List): (h, w) or [h, w]
control_size (int or float): the final size of the min(h, w)
Returns:
scaled_size (tuple or List): (h', w')
"""
scale_rate = np.sqrt(control_size * control_size / (origin_size[0] * origin_size[1]))
scaled_size = (int(origin_size[0] * scale_rate), int(origin_size[1] * scale_rate))
return scaled_size
def read_cv2_img(path):
"""
Read color images
Args:
path (str): Path to image
Returns:
img (np.ndarray): color images with RGB channel, and its shape is (H, W, 3).
"""
img = cv2.imread(path, -1)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
return img
def load_images(images_paths: Union[str, List[str]], image_size):
"""
Args:
images_paths (Union[str, List[str]]):
Returns:
images (np.ndarray): shape is (ns, 3, H, W), channel is RGB, and color space is [-1, 1].
"""
if isinstance(images_paths, str):
images_paths = [images_paths]
images = []
for image_path in images_paths:
image = read_cv2_img(image_path)
image = normalize_img(image, image_size=image_size, transpose=True)
images.append(image)
images = np.stack(images, axis=0) # (ns, 3, H, W)
return images
def read_mask(path, image_size):
"""
Read mask
Args:
path (str): Path to mask
Returns:
mask (np.ndarray): mask image with grayscale, and its shape is (1, H, W) in the range of [0, 1]
"""
mask = cv2.imread(path, cv2.IMREAD_GRAYSCALE)
mask = cv2.resize(mask, (image_size, image_size))
mask = mask.astype(np.float32) / 255
mask = np.expand_dims(mask, 0)
return mask
def load_parse(parse_path, image_size):
mask = cv2.imread(parse_path, cv2.IMREAD_GRAYSCALE)
mask = cv2.resize(mask, (image_size, image_size))
mask = mask.astype(np.float32) / 255
mask = np.expand_dims(mask, 0)
return mask
def load_img_parse(img_path, parse_path, image_size):
image = transform_img(read_cv2_img(img_path), transpose=True)
mask = load_parse(parse_path, image_size)
return image, mask
def save_cv2_img(img, path, image_size=None, normalize=False, transpose=True):
if transpose:
img = np.transpose(img, (1, 2, 0))
if normalize:
img = (img + 1) / 2.0 * 255
img = img.astype(np.uint8)
if len(img.shape) == 3:
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
if image_size is not None:
img = cv2.resize(img, (image_size, image_size))
cv2.imwrite(path, img)
return img
def transform_img(image, image_size=None, transpose=False):
if image_size is not None and image_size != image.shape[0]:
image = cv2.resize(image, (image_size, image_size))
image = image.astype(np.float32)
image /= 255.0
if transpose:
image = image.transpose((2, 0, 1))
return image
def normalize_img(image, image_size=None, transpose=False):
image = transform_img(image, image_size, transpose)
image *= 2
image -= 1
return image
def resize_img(img, scale_factor):
new_size = (np.floor(np.array(img.shape[0:2]) * scale_factor)).astype(int)
new_img = cv2.resize(img, (new_size[1], new_size[0]))
# This is scale factor of [height, width] i.e. [y, x]
actual_factor = [
new_size[0] / float(img.shape[0]), new_size[1] / float(img.shape[1])
]
return new_img, actual_factor
def tensor2im(img, imtype=np.uint8, unnormalize=True, idx=0, nrows=None):
# select a sample or create grid if img is a batch
if len(img.shape) == 4:
nrows = nrows if nrows is not None else int(np.sqrt(img.size(0)))
img = img[idx] if idx >= 0 else torchvision.utils.make_grid(img, nrows)
img = img.cpu().float()
if unnormalize:
img += 1.0
img /= 2.0
image_numpy = img.numpy()
# image_numpy = np.transpose(image_numpy, (1, 2, 0))
image_numpy *= 255.0
return image_numpy.astype(imtype)
def kp_to_bbox_param(kp, vis_thresh=0, diag_len=150.0):
"""
Finds the bounding box parameters from the 2D keypoints.
Args:
kp (Kx3): 2D Keypoints.
vis_thresh (float): Threshold for visibility.
diag_len(float): diagonal length of bbox of each person
Returns:
[center_x, center_y, scale]
"""
if kp is None:
return
if kp.shape[1] == 3:
vis = kp[:, 2] > vis_thresh
if not np.any(vis):
return
min_pt = np.min(kp[vis, :2], axis=0)
max_pt = np.max(kp[vis, :2], axis=0)
else:
min_pt = np.min(kp, axis=0)
max_pt = np.max(kp, axis=0)
person_height = np.linalg.norm(max_pt - min_pt)
if person_height < 0.5:
return
center = (min_pt + max_pt) / 2.
scale = diag_len / person_height
return np.append(center, scale)
def process_hmr_img(im_path, bbox_param, rescale=None, image=None, image_size=256, proc=False):
"""
Args:
im_path (str): the path of image.
image (np.ndarray or None): if it is None, then loading the im_path, else use image.
bbox_param (3,) : [cx, cy, scale].
rescale (float, np.ndarray or None): rescale factor.
proc (bool): the flag to return processed image or not.
image_size (int):
Returns:
proc_img (np.ndarray): if proc is True, return the process image, else return the original image.
"""
if image is None:
image = cv2.imread(im_path)
orig_h, orig_w = image.shape[0:2]
center = bbox_param[:2]
scale = bbox_param[2]
if rescale is not None:
scale = rescale
if proc:
image_scaled, scale_factors = resize_img(image, scale)
resized_h, resized_w = image_scaled.shape[:2]
else:
scale_factors = [scale, scale]
resized_h = orig_h * scale
resized_w = orig_w * scale
center_scaled = np.round(center * scale_factors).astype(np.int)
if proc:
# Make sure there is enough space to crop image_size x image_size.
image_padded = np.pad(
array=image_scaled,
pad_width=((image_size,), (image_size,), (0,)),
mode='edge'
)
padded_h, padded_w = image_padded.shape[0:2]
else:
padded_h = resized_h + image_size * 2
padded_w = resized_w + image_size * 2
center_scaled += image_size
# Crop image_size x image_size around the center.
margin = image_size // 2
start_pt = (center_scaled - margin).astype(int)
end_pt = (center_scaled + margin).astype(int)
end_pt[0] = min(end_pt[0], padded_w)
end_pt[1] = min(end_pt[1], padded_h)
if proc:
proc_img = image_padded[start_pt[1]:end_pt[1], start_pt[0]:end_pt[0], :]
# height, width = image_scaled.shape[:2]
else:
# height, width = end_pt[1] - start_pt[1], end_pt[0] - start_pt[0]
proc_img = cv2.resize(image, (image_size, image_size))
# proc_img = None
center_scaled -= start_pt
return {
# return original too with info.
'image': proc_img,
'im_path': im_path,
'im_shape': (orig_h, orig_w),
'center': center_scaled,
'scale': scale,
'start_pt': start_pt,
}
def cam_denormalize(cam, N):
# This is camera in crop image coord.
new_cam = np.hstack([N * cam[0] * 0.5, cam[1:] + (2. / cam[0]) * 0.5])
return new_cam
def cam_init2orig(cam, scale, start_pt, N=224):
"""
Args:
cam (3,): (s, tx, ty)
scale (float): scale = resize_h / orig_h
start_pt (2,): (lt_x, lt_y)
N (int): hmr_image_size (224) or IMG_SIZE
Returns:
cam_orig (3,): (s, tx, ty), camera in original image coordinates.
"""
# This is camera in crop image coord.
cam_crop = np.hstack([N * cam[0] * 0.5, cam[1:] + (2. / cam[0]) * 0.5])
# This is camera in orig image coord
cam_orig = np.hstack([
cam_crop[0] / scale,
cam_crop[1:] + (start_pt - N) / cam_crop[0]
])
# print('cam crop', cam_crop)
# print('cam orig', cam_orig)
return cam_orig
def cam_orig2crop_center(cam, scale, start_pt, N=256, normalize=True):
"""
Args:
cam (3,): (s, tx, ty), camera in orginal image coordinates.
scale (float): scale = resize_h / orig_h or (resize_w / orig_w)
start_pt (2,): (lt_x, lt_y)
N (int): hmr_image_size (224) or IMG_SIZE
normalize (bool)
Returns:
"""
cam_recrop = np.hstack([
cam[0] * scale,
cam[1:] + (N - start_pt) / (scale * cam[0])
])
# print('cam re-crop', cam_recrop)
if normalize:
cam_norm = np.hstack([
cam_recrop[0] * (2. / N),
cam_recrop[1:] - N / (2 * cam_recrop[0])
])
# print('cam norm', cam_norml)
else:
cam_norm = cam_recrop
return cam_norm
def cam_orig2boxcrop(cam, scale, start_pt, N=256, normalize=True):
"""
Args:
cam (3,): (s, tx, ty), camera in orginal image coordinates.
scale (float): scale = resize_h / orig_h or (resize_w / orig_w)
start_pt (2,): (lt_x, lt_y)
N (int): hmr_image_size (224) or IMG_SIZE
normalize (bool)
Returns:
"""
cam_recrop = np.hstack([
cam[0] * scale,
cam[1:] - start_pt / cam[0]
])
# print('cam re-crop', cam_recrop)
if normalize:
cam_norm = np.hstack([
cam_recrop[0] * 2. / N,
cam_recrop[1:] - N / (2 * cam_recrop[0])
])
# print('cam norm', cam_norm)
else:
cam_norm = cam_recrop
return cam_norm
def cam_process(cam_init, scale_150, start_pt_150, scale_proc, start_pt_proc, HMR_IMG_SIZE=224, IMG_SIZE=256):
"""
Args:
cam_init:
scale_150:
start_pt_150:
scale_proc:
start_pt_proc:
HMR_IMG_SIZE:
IMG_SIZE:
Returns:
"""
# print(HMR_IMG_SIZE, IMG_SIZE)
cam_orig = cam_init2orig(cam_init, scale=scale_150, start_pt=start_pt_150, N=HMR_IMG_SIZE)
cam_crop = cam_orig2crop_center(cam_orig, scale=scale_proc, start_pt=start_pt_proc, N=IMG_SIZE, normalize=True)
return cam_orig, cam_crop
def intrinsic_mtx(f, c):
"""
Obtain intrisic camera matrix.
Args:
f: np.array, 1 x 2, the focus lenth of camera, (fx, fy)
c: np.array, 1 x 2, the center of camera, (px, py)
Returns:
- cam_mat: np.array, 3 x 3, the intrisic camera matrix.
"""
return np.array([[f[1], 0, c[1]],
[0, f[0], c[0]],
[0, 0, 1]], dtype=np.float32)
def extrinsic_mtx(rt, t):
"""
Obtain extrinsic matrix of camera.
Args:
rt: np.array, 1 x 3, the angle of rotations.
t: np.array, 1 x 3, the translation of camera center.
Returns:
- ext_mat: np.array, 3 x 4, the extrinsic matrix of camera.
"""
# R is (3, 3)
R = cv2.Rodrigues(rt)[0]
t = np.reshape(t, newshape=(3, 1))
Rc = np.dot(R, t)
ext_mat = np.hstack((R, -Rc))
ext_mat = np.vstack((ext_mat, [0, 0, 0, 1]))
ext_mat = ext_mat.astype(np.float32)
return ext_mat
def extrinsic(rt, t):
"""
Obtain extrinsic matrix of camera.
Args:
rt: np.array, 1 x 3, the angle of rotations.
t: np.array, 1 x 3, or (3,) the translation of camera center.
Returns:
- R: np.ndarray, 3 x 3
- t: np.ndarray, 1 x 3
"""
R = cv2.Rodrigues(rt)[0]
t = np.reshape(t, newshape=(1, 3))
return R, t
def euler2matrix(rt):
"""
Obtain rotation matrix from euler angles
Args:
rt: np.array, (3,)
Returns:
R: np.array, (3,3)
"""
Rx = np.array([[1, 0, 0],
[0,
|
np.cos(rt[0])
|
numpy.cos
|
"""
Methods for geometric projection.
.. include:: ../include/links.rst
"""
from IPython import embed
import numpy as np
from scipy.spatial import KDTree
#import warnings
#warnings.simplefilter('error', RuntimeWarning)
def rotate(x, y, rot, clockwise=False):
r"""
Rotate a set of coordinates about :math:`(x,y) = (0,0)`.
.. warning::
The ``rot`` argument should be a float. If it is an array, the code
will either fault if ``rot`` cannot be broadcast to match ``x`` and
``y`` or the rotation will be different for each ``x`` and ``y``
element.
Args:
x (array-like):
Cartesian x coordinates.
y (array-like):
Cartesian y coordinates. Shape must match ``x``, but this is not
checked.
rot (:obj:`float`):
Rotation angle in radians.
clockwise (:obj:`bool`, optional):
Perform a clockwise rotation. Rotation is counter-clockwise by
default. By definition and implementation, setting this to True is
identical to calling the function with a negative counter-clockwise
rotation. I.e.::
xr, yr = rotate(x, y, rot, clockwise=True)
_xr, _yr = rotate(x, y, -rot)
assert numpy.array_equal(xr, _xr) and numpy.array_equal(yr, _yr)
Returns:
:obj:`tuple`: Two `numpy.ndarray`_ objects with the rotated x
and y coordinates.
"""
if clockwise:
return rotate(x, y, -rot)
cosr = np.cos(rot)
sinr = np.sin(rot)
_x = np.atleast_1d(x)
_y = np.atleast_1d(y)
return _x*cosr - _y*sinr, _y*cosr + _x*sinr
def deriv_rotate(x, y, rot, dxdp=None, dydp=None, drotdp=None, clockwise=False):
r"""
Rotate the provided coordinates about :math:`(x,y) = (0,0)`, and calculate
the derivatives of the returned coordinates with respect to a set of input
parameters.
The set of input parameters can be unknown, i.e., defined in the calling
method, or the single parameter of this function (``rot``). That is, if the
arguments of this function depend on upstream set of parameters, their
derivatives with respect to these parameters should be passed to the
relevant keyword arguments. Importantly, if you supply one of either
``dxdp``, ``dydp``, or ``drotdp``, you must supply them all.
See additional documentation of the :func:`rotate` method.
Args:
x (array-like):
Cartesian x coordinates.
y (array-like):
Cartesian y coordinates. Shape must match ``x``, but this is not
checked.
rot (:obj:`float`):
Rotation angle in radians.
dxdp (array-like, optional):
Derivative of the Cartesian x coordinates w.r.t. a set of unknown
parameters. Shape has one more dimension than ``x``, where the size
of that dimension, :math:`m`, is is the number of parameters. If
None, the provided x coordinates are assumed to be independent of
any model parameters.
dydp (array-like, optional):
Derivative of the Cartesian y coordinates w.r.t. a set of unknown
parameters. Shape has one more dimension than ``x``, where the size
of that dimension, :math:`m`, is is the number of parameters. If
None, the provided y coordinates are assumed to be independent of
any model parameters.
drotdp (array-like, optional):
Derivative of the rotation angle w.r.t. a set of unknown parameters.
Shape is :math:`(m,)`, where :math:`m` is the number of parameters.
If None, the rotation is considered to be the only model parameter.
clockwise (:obj:`bool`, optional):
Perform a clockwise rotation. Rotation is counter-clockwise by
default. By definition and implementation, setting this to True is
identical to calling the function with a negative counter-clockwise
rotation.
Returns:
:obj:`tuple`: Four `numpy.ndarray`_ objects: the rotated x coordinates,
the rotated y coordinates, the derivate of the rotated x coordinates
w.r.t. a set of parameters, and the derivative of the rotated y
coordinates w.r.t. a set of parameters.
"""
# Check derivative input
isNone = [i is None for i in [dxdp, dydp, drotdp]]
if np.any(isNone) and not np.all(isNone):
raise ValueError('Must provide all of dxdp, dydp, and drotdp, or none of them.')
# Convert to numpy arrays
_x = np.atleast_1d(x)
_y = np.atleast_1d(y)
# If dxdp is None, they all should be.
if dxdp is None:
# Set the input derivatives to be with respect to rot
dxdp = np.zeros(_x.shape+(1,), dtype=float)
dydp = np.zeros(_y.shape+(1,), dtype=float)
drotdp = np.ones((1,), dtype=float)
if clockwise:
return deriv_rotate(x, y, -rot, dxdp=dxdp, dydp=dydp, drotdp=-drotdp)
_dxdp = np.atleast_1d(dxdp)
_dydp = np.atleast_1d(dydp)
_drotdp = np.atleast_1d(drotdp)
cosr = np.cos(rot)
sinr = np.sin(rot)
xr = _x*cosr - _y*sinr
yr = _y*cosr + _x*sinr
dxrdp = _dxdp*cosr - _x[...,None]*sinr*_drotdp[None,:] - _dydp*sinr \
- _y[...,None]*cosr*_drotdp[None,:]
dyrdp = _dydp*cosr - _y[...,None]*sinr*_drotdp[None,:] + _dxdp*sinr \
+ _x[...,None]*cosr*_drotdp[None,:]
return xr, yr, dxrdp, dyrdp
def projected_polar(x, y, pa, inc):
r"""
Calculate the in-plane polar coordinates of an inclined plane.
The position angle, :math:`\phi_0`, is the rotation from the :math:`y=0`
axis through the :math:`x=0` axis. I.e., :math:`\phi_0 = \pi/2` is along the
:math:`+x` axis and :math:`\phi_0 = \pi` is along the :math:`-y` axis.
The inclination, :math:`i`, is the angle of the plane normal with respect to
the line-of-sight. I.e., :math:`i=0` is a face-on (top-down) view of the
plane and :math:`i=\pi/2` is an edge-on view.
The returned coordinates are the projected distance from the :math:`(x,y) =
(0,0)` and the project azimuth. The projected azimuth, :math:`\theta`, is
defined to increase in the same direction as :math:`\phi_0`, with
:math:`\theta = 0` at :math:`\phi_0`.
.. warning::
Calculation of the disk-plane y coordinate is undefined at :math:`i =
\pi/2`. Only use this function with :math:`i < \pi/2`!
Args:
x (array-like):
Cartesian x coordinates.
y (array-like):
Cartesian y coordinates. Shape must match ``x``, but this is not
checked.
pa (:obj:`float`)
Position angle, as defined above, in radians.
inc (:obj:`float`)
Inclination, as defined above, in radians.
Returns:
:obj:`tuple`: Returns two arrays with the projected radius
and in-plane azimuth. The radius units are identical to the
provided cartesian coordinates. The azimuth is in radians
over the range :math:`[0,2\pi)`.
"""
xd, yd = rotate(x, y, np.pi/2-pa, clockwise=True)
yd /= np.cos(inc)
return np.sqrt(xd**2 + yd**2), np.arctan2(-yd,xd) % (2*np.pi)
def deriv_projected_polar(x, y, pa, inc, dxdp=None, dydp=None, dpadp=None, dincdp=None):
r"""
Calculate the in-plane polar coordinates of an inclined plane and their
derivatives with respect to a set of input parameters.
The set of input parameters can be unknown, i.e. defined in the calling
method, or the two parameters of this function (``pa``, ``inc``). That is,
if the arguments of this function depend on upstream set of parameters,
their derivatives with respect to these parameters should be passed to the
relevant keyword arguments. Importantly, if you supply one of either
``dxdp``, ``dydp``, ``dpadp``, ``dincdp``, you must supply them all.
See additional documentation of the :func:`projected_polar` method. The
same warning there about the calculation when :math:`i = \pi/2` holds for
the derivatives, as well.
Args:
x (array-like):
Cartesian x coordinates.
y (array-like):
Cartesian y coordinates. Shape must match ``x``, but this is not
checked.
pa (:obj:`float`)
Position angle in radians; see :func:`projected_polar`.
inc (:obj:`float`)
Inclination in radians; see :func:`projected_polar`.
dxdp (array-like, optional):
Derivative of the Cartesian x coordinates w.r.t. a set of unknown
parameters. Shape has one more dimension than ``x``, where the size
of that dimension, :math:`m`, is is the number of parameters. If
None, the provided x coordinates are assumed to be independent of
any model parameters.
dydp (array-like, optional):
Derivative of the Cartesian y coordinates w.r.t. a set of unknown
parameters. Shape has one more dimension than ``x``, where the size
of that dimension, :math:`m`, is is the number of parameters. If
None, the provided y coordinates are assumed to be independent of
any model parameters.
dpadp (array-like, optional):
Derivative of the position angle w.r.t. a set of unknown parameters.
Shape is :math:`(m,)`, where :math:`m` is the number of parameters.
If None, the position angle is considered to be the one of the model
parameters.
dincdp (array-like, optional):
Derivative of the inclination w.r.t. a set of unknown parameters.
Shape is :math:`(m,)`, where :math:`m` is the number of parameters.
If None, the inclination is considered to be the one of the model
parameters.
Returns:
:obj:`tuple`: Returns four arrays with the projected radius and in-plane
azimuth and their derivatives (order is radius, aziumth, radius
derivative, azimuth derivative). The radius units are identical to the
provided cartesian coordinates. The azimuth is in radians over the range
:math:`[0,2\pi)`.
"""
# Check derivative input
isNone = [i is None for i in [dxdp, dydp, dpadp, dincdp]]
if np.any(isNone) and not np.all(isNone):
raise ValueError('Must provide all of dxdp, dydp, dpadp, and dincdp, or none of them.')
# Convert to numpy arrays
_x = np.atleast_1d(x)
_y = np.atleast_1d(y)
# If dxdp is None, they all should be.
if dxdp is None:
# Set the input derivatives to be with respect to pa and inc
dxdp = np.zeros(_x.shape+(2,), dtype=float)
dydp = np.zeros(_y.shape+(2,), dtype=float)
dpadp = np.array([1., 0.], dtype=float)
dincdp = np.array([0., 1.], dtype=float)
_dxdp = np.atleast_1d(dxdp)
_dydp = np.atleast_1d(dydp)
_dpadp = np.atleast_1d(dpadp)
_dincdp = np.atleast_1d(dincdp)
# Calculate the rotated coordinates (note the propagation of the derivative
# given the calculation of the applied rotation based on the position angle)
xd, yr, dxd, dyr = deriv_rotate(_x, _y, np.pi/2-pa, dxdp=_dxdp, dydp=_dydp, drotdp=-_dpadp,
clockwise=True)
# Project the y axis
cosi = np.cos(inc)
yd = yr / cosi
dyd = dyr / cosi + yr[...,None] * _dincdp[None,:] * np.sin(inc) / cosi**2
# Calculate the polar coordinates
r =
|
np.sqrt(xd**2 + yd**2)
|
numpy.sqrt
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Tests of BasePhotometry.
.. codeauthor:: <NAME> <<EMAIL>>
"""
import pytest
import numpy as np
from tempfile import TemporaryDirectory
from astropy.io import fits
from astropy.wcs import WCS
import conftest # noqa: F401
from photometry import BasePhotometry, PixelQualityFlags, CorrectorQualityFlags
#import photometry.BasePhotometry.hdf5_cache as bf
DUMMY_TARGET = 260795451
DUMMY_KWARG = {'sector': 1, 'camera': 3, 'ccd': 2}
#--------------------------------------------------------------------------------------------------
def test_basephotometry_invalid_input(SHARED_INPUT_DIR):
with TemporaryDirectory() as OUTPUT_DIR:
# Test invalid datatype:
with pytest.raises(ValueError) as e:
with BasePhotometry(DUMMY_TARGET, SHARED_INPUT_DIR, OUTPUT_DIR, datasource='invalid', **DUMMY_KWARG):
pass
assert str(e.value) == "Invalid datasource: 'invalid'"
# Test invalid cache option:
with pytest.raises(ValueError) as e:
with BasePhotometry(DUMMY_TARGET, SHARED_INPUT_DIR, OUTPUT_DIR, datasource='ffi', cache='invalid', **DUMMY_KWARG):
pass
assert str(e.value) == "Invalid cache: 'invalid'"
# Test an input directory that does not exist:
with pytest.raises(FileNotFoundError) as e:
with BasePhotometry(DUMMY_TARGET, 'does/not/exist', OUTPUT_DIR, datasource='ffi', **DUMMY_KWARG):
pass
assert str(e.value).startswith('Not a valid input directory: ')
# Test asking for FFI target without providing SECTOR, CAMERA and CCD:
with pytest.raises(ValueError) as e:
with BasePhotometry(DUMMY_TARGET, SHARED_INPUT_DIR, OUTPUT_DIR, datasource='ffi'):
pass
assert str(e.value) == "SECTOR, CAMERA and CCD keywords must be provided for FFI targets."
# Test target not in the catalog:
with pytest.raises(Exception) as e:
with BasePhotometry(0, SHARED_INPUT_DIR, OUTPUT_DIR, datasource='ffi', **DUMMY_KWARG):
pass
assert str(e.value) == "Star could not be found in catalog: 0"
#--------------------------------------------------------------------------------------------------
def test_stamp(SHARED_INPUT_DIR):
with TemporaryDirectory() as OUTPUT_DIR:
with BasePhotometry(DUMMY_TARGET, SHARED_INPUT_DIR, OUTPUT_DIR, datasource='ffi', **DUMMY_KWARG) as pho:
pho._stamp = (50, 60, 50, 70)
pho._set_stamp()
cols, rows = pho.get_pixel_grid()
print('Rows:')
print(rows)
print(rows.shape)
print('Cols:')
print(cols)
print(cols.shape)
assert(rows.shape == (10, 20))
assert(cols.shape == (10, 20))
assert(rows[0,0] == 51)
assert(cols[0,0] == 51)
assert(rows[-1,0] == 60)
assert(cols[-1,0] == 51)
assert(rows[-1,-1] == 60)
assert(cols[-1,-1] == 70)
pho.resize_stamp(up=12)
cols, rows = pho.get_pixel_grid()
print('Rows:')
print(rows)
print(rows.shape)
print('Cols:')
print(cols)
print(cols.shape)
assert(rows.shape == (22, 20))
assert(cols.shape == (22, 20))
pho.resize_stamp(down=2)
cols, rows = pho.get_pixel_grid()
print('Rows:')
print(rows)
print(rows.shape)
print('Cols:')
print(cols)
print(cols.shape)
assert(rows.shape == (24, 20))
assert(cols.shape == (24, 20))
pho.resize_stamp(right=3)
cols, rows = pho.get_pixel_grid()
print('Rows:')
print(rows)
print(rows.shape)
print('Cols:')
print(cols)
print(cols.shape)
assert(rows.shape == (24, 23))
assert(cols.shape == (24, 23))
pho.resize_stamp(left=3)
cols, rows = pho.get_pixel_grid()
print('Rows:')
print(rows)
print(rows.shape)
print('Cols:')
print(cols)
print(cols.shape)
assert(rows.shape == (24, 26))
assert(cols.shape == (24, 26))
# Set a stamp that is not going to work:
with pytest.raises(ValueError) as e:
pho.resize_stamp(left=-100, right=-100)
assert str(e.value) == "Invalid stamp selected"
#--------------------------------------------------------------------------------------------------
def test_stamp_width_height(SHARED_INPUT_DIR):
with TemporaryDirectory() as OUTPUT_DIR:
with BasePhotometry(DUMMY_TARGET, SHARED_INPUT_DIR, OUTPUT_DIR, datasource='ffi', **DUMMY_KWARG) as pho:
print("Original")
orig_stamp = pho._stamp
orig_sumimage = pho.sumimage
print(orig_stamp)
# Change the size of the stamp:
print("New")
pho.resize_stamp(width=25, height=11)
large_stamp = pho._stamp
large_sumimage = pho.sumimage
print(large_stamp)
cols, rows = pho.get_pixel_grid()
print(cols.shape, rows.shape)
assert cols.shape == (11, 25)
assert rows.shape == (11, 25)
assert large_sumimage.shape == (11, 25)
# Make the stamp the same size as the original again:
pho.resize_stamp(width=17, height=17)
print(pho._stamp)
assert pho._stamp == orig_stamp
np.testing.assert_allclose(pho.sumimage, orig_sumimage)
# Make the stamp the same size large one, but only changing width:
pho.resize_stamp(width=25)
print(pho._stamp)
cols, rows = pho.get_pixel_grid()
assert cols.shape == (17, 25)
assert rows.shape == (17, 25)
# Make really large stamp now:
pho.resize_stamp(height=25)
print(pho._stamp)
cols, rows = pho.get_pixel_grid()
assert cols.shape == (25, 25)
assert rows.shape == (25, 25)
#--------------------------------------------------------------------------------------------------
def test_images(SHARED_INPUT_DIR):
with TemporaryDirectory() as OUTPUT_DIR:
with BasePhotometry(DUMMY_TARGET, SHARED_INPUT_DIR, OUTPUT_DIR, datasource='ffi', **DUMMY_KWARG) as pho:
pho._stamp = (50, 60, 50, 70)
pho._set_stamp()
for img in pho.images:
assert(img.shape == (10, 20))
#--------------------------------------------------------------------------------------------------
def test_backgrounds(SHARED_INPUT_DIR):
with TemporaryDirectory() as OUTPUT_DIR:
with BasePhotometry(DUMMY_TARGET, SHARED_INPUT_DIR, OUTPUT_DIR, datasource='ffi', **DUMMY_KWARG) as pho:
pho._stamp = (50, 60, 50, 70)
pho._set_stamp()
for img in pho.backgrounds:
assert(img.shape == (10, 20))
#--------------------------------------------------------------------------------------------------
@pytest.mark.parametrize('datasource', ['tpf', 'ffi'])
def test_catalog(SHARED_INPUT_DIR, datasource):
with TemporaryDirectory() as OUTPUT_DIR:
with BasePhotometry(DUMMY_TARGET, SHARED_INPUT_DIR, OUTPUT_DIR, datasource=datasource, **DUMMY_KWARG) as pho:
print(pho.catalog)
assert(DUMMY_TARGET in pho.catalog['starid'])
assert(pho.target_pos_ra >= np.min(pho.catalog['ra']))
assert(pho.target_pos_ra <= np.max(pho.catalog['ra']))
assert(pho.target_pos_dec >= np.min(pho.catalog['dec']))
assert(pho.target_pos_dec <= np.max(pho.catalog['dec']))
indx_main = (pho.catalog['starid'] == DUMMY_TARGET)
# Test the real position - TODO: How do we find this?
#np.testing.assert_allclose(pho.target_pos_column, 1978.082)
#np.testing.assert_allclose(pho.target_pos_row, 652.5701)
np.testing.assert_allclose(pho.catalog[indx_main]['column'], pho.target_pos_column)
np.testing.assert_allclose(pho.catalog[indx_main]['row'], pho.target_pos_row)
#--------------------------------------------------------------------------------------------------
@pytest.mark.parametrize('datasource', ['tpf', 'ffi'])
def test_catalog_attime(SHARED_INPUT_DIR, datasource):
with TemporaryDirectory() as OUTPUT_DIR:
with BasePhotometry(DUMMY_TARGET, SHARED_INPUT_DIR, OUTPUT_DIR, datasource=datasource, **DUMMY_KWARG) as pho:
time = pho.lightcurve['time']
cat = pho.catalog_attime(time[0])
assert(cat.colnames == pho.catalog.colnames)
# TODO: Add more tests here, once we change the test input data
#--------------------------------------------------------------------------------------------------
@pytest.mark.parametrize('datasource', ['tpf', 'ffi'])
def test_aperture(SHARED_INPUT_DIR, datasource):
with TemporaryDirectory() as OUTPUT_DIR:
with BasePhotometry(DUMMY_TARGET, SHARED_INPUT_DIR, OUTPUT_DIR, datasource=datasource, **DUMMY_KWARG) as pho:
print("------------------------------------")
print(pho.aperture)
print(pho.sumimage.shape)
print(pho.aperture.shape)
assert(pho.sumimage.shape == pho.aperture.shape)
# For this target, all the pixels should be available:
assert np.all(pho.aperture & 1 != 0)
# This target should fall on CCD output B:
assert np.all(pho.aperture & 64 != 0)
if datasource == 'ffi':
# For the FFI's all pixels for this target was used for the backgrounds
# (the target is not bright enough to be masked out)
assert np.all(pho.aperture & 4 != 0)
# Make the stamp one pixel smaller:
# The sumimage and aperture should still match in size!
pho.resize_stamp(right=-1)
print(pho.sumimage.shape)
print(pho.aperture.shape)
assert pho.sumimage.shape == pho.aperture.shape
# Try this very bright star, where the centre is saturated.
# The aperture for this star should have pixels near the centre that
# were not used in the background calculation for FFIs:
with BasePhotometry(267211065, SHARED_INPUT_DIR, OUTPUT_DIR, datasource='ffi', plot=True, **DUMMY_KWARG) as pho:
central_pixel = pho.aperture[int(
|
np.round(pho.target_pos_row_stamp)
|
numpy.round
|
'''
This is a set of functions for dealing with PIV data
Most functions are for 2D data.
'''
#Import matlab PIV data
def loadDataset(path,mats,structs,matlabData = None):
'''
Import a matlab data file
Inputs:
path - Path to the matlab file
mats - names of the matlab matrices to be imported.
structs - names of the structures to be imported as dictionaries
matlabData - flag to transpose matrices if data comes from matlab
Output:
Each of the matrices and structures in the order you specified in the input
'''
import numpy as np
import h5py
if matlabData is None:
matlabData = False
f = h5py.File(path)
print(list(f.keys()))
Temp = np.asarray(['Swirl'])
ret = []
for i in mats:
#print(i)
Temp = np.asarray(f[i])
if matlabData:
if Temp.ndim == 2:
Temp = np.transpose(Temp,(1,0))
elif Temp.ndim == 3:
Temp = np.transpose(Temp,(2,1,0))
ret.append(Temp)
del Temp
for i in structs:
#print(i)
TempS = {k : f[i][k].value #Generate a dictionary linking all values in cond with their names
for k in f[i].keys()}
ret.append(TempS)
del TempS
f.close()
return ret
#Save data to an HDF5 file
def saveDataset(path,names,data,DictNames,DictData):
'''
Save dataset to an HDF5 file
Inputs:
path - Path to the save file
names - names for each set of data in list
data - list of data to be saved.
DictNames - Names of a list of dicts to save as subgroups
DictData - Data in the dicts
Output:
An HDF5 file at the path specified
'''
import h5py
import os
if os.path.exists(path):
question = 'Delete original file (default: no)'
choice = query_yes_no(question, default="no")
if choice:
os.remove(path)
print("Original file deleted")
f = h5py.File(path)
for i in range(len(names)):
#print(names[i])
f.create_dataset(names[i], data=data[i])
for j in range(len(DictNames)):
Set = f.create_group(DictNames[j]) #,(len(list(Cond.items())),)
for i in list(DictData[j].items()):
Set.create_dataset(i[0], data=i[1])
print("File saved")
f.close()
#Plot 2D scalar field
def plotScalarField(S,X=None,Y=None,bound=None,saveFolder=None):
'''
Plot 2D scalar fields
Inputs:
X - 2D array with columns constant
Y - 2D array with rows constant
S - the 2D field to be plotted. Must be the same size and shape as X,Y
bound = the symetric bound of the colorbar (-bound, bound), detault is max of abs(S)/5
Output:
Displays a plot of the scalar field
'''
import numpy as np
import matplotlib.pyplot as plt
if bound is None:
bound = np.round(np.max(np.absolute(S))/5)
f = plt.figure(figsize = [8,3])
if X is None:
plt.pcolor(S, cmap='RdBu_r');
plt.axis('scaled')
plt.xlim([0, S.shape[1]])
plt.ylim([0, S.shape[0]])
else:
plt.pcolor(X,Y,S, cmap='RdBu_r');
plt.axis('scaled')
plt.xlim([X.min(), X.max()])
plt.ylim([Y.min(), Y.max()])
plt.clim([-1*bound, bound])
plt.colorbar()
if saveFolder is not None:
f.savefig(saveFolder, transparent=True, bbox_inches='tight', pad_inches=0)
return [f, plt.gca()]
#Gets locations of distinct scalar blobs in each frame that are bigger than a certain threshold (in number of vectors)
def findBlobsSlow(S,Thresh=None):
'''
Note!!!! This code does not work with 3D data yet.
Finds distinct blobs of a scalar that are bigger than a certain size (Thresh)
Inputs:
S - sets of 2D scalar fields that have already been thresholded (0s or 1s)
Thresh - Number of vectors that must be contained in a blob. If not defined, then no threshold filter will be used
Outputs:
cent
labelled_array
num_features
features_per_frame
'''
import numpy as np
from scipy.ndimage.measurements import label,find_objects,center_of_mass
import copy
uSize = S.shape
labeled_array, num_features = label(S)
print('There are ', num_features, ' features initially identified')
if Thresh is not None:
loc = find_objects(labeled_array)
labeled_array_init = copy.copy(labeled_array)
labeled_array[:] = 0;
num_features_init = copy.copy(num_features)
num_features = 0;
for i in range(num_features_init):
#print(np.max(labeled_array_init[loc[i]]),labeled_array_init[loc[i]],np.count_nonzero(labeled_array_init[loc[i]]))
#print(labeled_array_init[loc[i]])
#print(np.max(labeled_array_init[loc[i]]),np.count_nonzero(labeled_array_init[loc[i]]))
if np.count_nonzero(labeled_array_init[loc[i]])>Thresh:
#print('yes')
num_features += 1;
labeled_array[labeled_array_init==i+1] = num_features
print('A total of ', num_features, ' are larger than the threshold size')
features_per_frame = np.zeros(uSize[2],dtype=int);
cent = [];
for i in range(uSize[2]):
features_per_frame[i] = len(np.unique(labeled_array[:,:,i])[1:])
cent.append(center_of_mass(S[:,:,i],labeled_array[:,:,i],np.unique(labeled_array[:,:,i])[1:]))
return [num_features, features_per_frame, labeled_array, cent]
#Gets locations of distinct scalar blobs in each frame that are bigger than a certain threshold (in number of vectors)
def findBlobs(S,Thresh=None,EdgeBound=None):
'''
Finds distinct blobs of a scalar that are bigger than a certain size (Thresh)
Now new and improved! and much faster!
Inputs:
S - sets of 2D scalar fields that have already been thresholded (0s or 1s). The third dimension denotes the frame
Thresh - Number of vectors that must be contained in a blob. If not defined, then no threshold filter will be used
EdgeBound - Crops all blobs that are too close to the edge of the domain. No crop if left as none.
Outputs:
cent -
labelled_array - The labeled array of blobs (in format of ndimage.measurements.label function). This is all the labels including the ones that might be too close to edge of domain
num_features - Total number of features accross datasets
features_per frame - Number of features identified in each frame
'''
import numpy as np
from scipy.ndimage.measurements import label,find_objects,center_of_mass
uSize = S.shape
if S.ndim == 3:
str_3D=np.array([[[0, 0, 0],
[0, 0, 0],
[0, 0, 0]],
[[0, 1, 0],
[1, 1, 1],
[0, 1, 0]],
[[0, 0, 0],
[0, 0, 0],
[0, 0, 0]]], dtype='uint8')
#str_3D = str_3D.transpose(2,1,0)
labeled_array, num_features = label(S.transpose(2,1,0),str_3D)
labeled_array = labeled_array.transpose(2,1,0)
else:
labeled_array, num_features = label(S)
#print(np.unique(labeled_array))
#print(np.unique(labeled_array[:,:,0]))
#print(labeled_array.shape)
print('There are ', num_features, ' features identified')
if Thresh is not None:
loc = find_objects(labeled_array)
labeled_array_out = labeled_array.copy()
counts = np.bincount(labeled_array.ravel())
ind = np.where(counts>Thresh)[0][1:]
mask = np.in1d(labeled_array.ravel(), ind).reshape(labeled_array.shape)
labeled_array_out[~mask] = 0
[_, labeled_array_out] = np.unique(labeled_array_out,return_inverse=True)
labeled_array_out = labeled_array_out.reshape(labeled_array.shape)
num_features_out = len(ind)
print('A total of ', num_features_out, ' are larger than the threshold size')
else:
labeled_array_out = labeled_array
num_features_out = num_features
#a = labeled_array_out[:,:,535]
#b = a.copy()
#c = list(range(uSize[1]))
#b[:,:] = list(range(uSize[1])))
#for i in np.unique(a):
#print(np.unique(a)[15])
#print(a[a==i])
features_per_frame = np.zeros(uSize[2],dtype=int);
cent = [];
for i in range(uSize[2]):
features_per_frame[i] = len(np.unique(labeled_array_out[:,:,i])[1:])
cent.append(center_of_mass(S[:,:,i],labeled_array_out[:,:,i],np.unique(labeled_array_out[:,:,i])[1:]))
#Round center locations to nearest index
for i in range(len(cent)):
for j in range(len(cent[i])):
cent[i][j] = (int(round(cent[i][j][0])), int(round(cent[i][j][1])))
#Remove all centers too close to edge of domain
if EdgeBound is not None:
newCent = []
for i in range(len(cent)):
newCent.append([])
features_per_frame[i] = 0
for j in range(len(cent[i])):
if (cent[i][j][0]>EdgeBound-1 and cent[i][j][0]<uSize[0]-EdgeBound) and (cent[i][j][1]>EdgeBound-1 and cent[i][j][1] <uSize[1]-EdgeBound):
newCent[i].append(cent[i][j])
features_per_frame[i]+=1
num_features_out = sum(features_per_frame)
cent = newCent
print('Of these', num_features_out, ' are far enough away from edge of domain')
return [num_features_out, features_per_frame, labeled_array_out, cent]
# given the centers of a blobs, this function disects the vector field into a number of thumbnails of size frame x frame
def getThumbnails2D(mats,cent,BoxSize):
'''
Given the centers of a blobs, this function disects the vector field into a number of thumbnails of size frame x frame.
All vectors inside frame but outside domain are padded with nans
Inputs:
mats - A list of matrices from which to find thumbnails
cent - centers of each each vortex as output by FindBlobs
BoxSize - final thumbnail size is BoxSize*2+1 squared
Outputs:
Thumbnail versions of each of the matrices in mats
'''
import numpy as np
uSize = mats[0].shape
out = []
#find out how many features there are
#Round all centroids to integers
num_features = 0
for i in range(len(cent)):
for j in range(len(cent[i])):
#print(i, j)
cent[i][j] = (int(round(cent[i][j][0])), int(round(cent[i][j][1])))
num_features += 1
for k in range(len(mats)):
#initialize thumbnail matrices
U = np.zeros([2*BoxSize+1,2*BoxSize+1,num_features])
U[:] = np.NAN
#pad out velocity fields so that there are NaNs around in all directions
U2 = np.zeros([uSize[0]+2*BoxSize,uSize[1]+2*BoxSize,uSize[2]])
U2[:] = np.NAN
U2[BoxSize:-1*BoxSize,BoxSize:-1*BoxSize,:] = mats[k].copy()
#Now get the thumbnails
thumb = 0
for i in range(len(cent)):
for j in range(len(cent[i])):
U[:,:,thumb] = U2[cent[i][j][0]:cent[i][j][0]+2*BoxSize+1,cent[i][j][1]:cent[i][j][1]+2*BoxSize+1,i]
thumb+=1
out.append(U)
del U
return out
def getRandomThumbnails2D(Uf,Vf,Sf,numSamp,BoxSize):
import numpy as np
uSize = Uf.shape
Pos = np.random.rand(3,numSamp)
Pos[0] = Pos[0]*(uSize[0]-2*BoxSize-1)+BoxSize
Pos[1] = Pos[1]*(uSize[1]-2*BoxSize-1)+BoxSize
Pos[2] = Pos[2]* (uSize[2]-1)
Pos = Pos.round().astype(int)
#print(Pos[:,0])
#print(np.ndarray.min(Pos[0]))
#print(np.ndarray.max(Pos[0]))
#print(np.ndarray.min(Pos[1]))
#print(np.ndarray.max(Pos[1]))
#print(np.ndarray.min(Pos[2]))
#print(np.ndarray.max(Pos[2]))
#print(max(Pos))
#initialize thumbnail matrices
Ut = np.zeros([2*BoxSize+1,2*BoxSize+1,numSamp])
Ut[:] = np.NAN
Vt = Ut.copy()
St = Ut.copy()
#print(Ut.shape)
#pad out velocity fields so that there are NaNs around in all directions
Uf2 = np.zeros([uSize[0]+2*BoxSize,uSize[1]+2*BoxSize,uSize[2]])
Uf2[:] = np.NAN
Vf2 = Uf2.copy()
Sf2 = Uf2.copy()
Uf2[BoxSize:-1*BoxSize,BoxSize:-1*BoxSize,:] = Uf.copy()
Vf2[BoxSize:-1*BoxSize,BoxSize:-1*BoxSize,:] = Vf.copy()
Sf2[BoxSize:-1*BoxSize,BoxSize:-1*BoxSize,:] = Sf.copy()
#Now get the thumbnails
thumb = 0
for i in range(numSamp):
#print(i)
Ut[:,:,thumb] = Uf2[Pos[0,i]:Pos[0,i]+2*BoxSize+1,Pos[1,i]:Pos[1,i]+2*BoxSize+1,Pos[2,i]]
Vt[:,:,thumb] = Vf2[Pos[0,i]:Pos[0,i]+2*BoxSize+1,Pos[1,i]:Pos[1,i]+2*BoxSize+1,Pos[2,i]]
St[:,:,thumb] = Sf2[Pos[0,i]:Pos[0,i]+2*BoxSize+1,Pos[1,i]:Pos[1,i]+2*BoxSize+1,Pos[2,i]]
thumb+=1
return [Ut, Vt, St, Pos]
def genHairpinField(BoxSize,Circ,r,rs,Ts,Rot,StagStren,Gvort,Gstag,Conv,x=None,y=None):
'''
Generates a theoretical hairpin vortex velocity field given a number of parameters. Returns U and V velocity fields
Inputs:
BoxSize - 2*Boxsize+1 is the number of vectors per side of box.
Circ - Circulation strength of vortex
r - diameter of vortex solid body rotation (constant vector magnitude outside core)
rs, Ts - polar coordinate location of stagnation point
Rot - Rotation of stagnation point shear layer
StagStren - Vector magnitude of stagnation point velocity field (constant magnitude)
Gvort - Width of blending gaussian for vortex
Gstag - Width of blending gaussian for stagnation point
Conv - Convective velocity of this vortex relative to the local mean
Outputs:
U - Streamwise velocity field
V - Wall-normal velocity field
'''
import numpy as np
import math
#print((x.shape[0]-1)/2)
if x is None:
X, Y = np.meshgrid(np.arange(-1*BoxSize, BoxSize+1), np.arange(-1*BoxSize, BoxSize+1))
else:
assert BoxSize==(x.shape[0]-1)/2, 'The BoxSize does not match the length of the x vector. Thats not right...'
assert BoxSize==(y.shape[0]-1)/2, 'The BoxSize does not match the length of the y vector. Thats not right...'
X, Y = np.meshgrid(x, y)
U = np.zeros([2*BoxSize+1,2*BoxSize+1])
V = U.copy()
R = np.hypot(X, Y)
T = np.arctan2(Y,X)
#Vortex
Ut = Circ*R/(2*np.pi*r**2)
#Ut[R>=r] = Circ/(2*np.pi*R[R>=r])
Ut[R>=r] = Circ/(2*np.pi*r) #make velocities constant outside core
#Now convert back to cartesian velocities
Uvort = Ut*np.sin(T)
Vvort = -1*Ut*np.cos(T)
#Create stagnation point flow
Rot = Rot*np.pi/180*2
Ts = Ts*np.pi/180
xs = rs*np.cos(Ts) #shift in stagnation point in x
ys = rs*np.sin(Ts) #shift in stagnation point in y
#StagStren = 2;
Xs = X-xs
Ys = Y-ys;
Ts = np.arctan2(Ys,Xs)
M = np.hypot(Xs, Ys)
U = M*np.cos(Ts-Rot)
V = -1*M*np.sin(Ts-Rot)
M =
|
np.hypot(U, V)
|
numpy.hypot
|
import unittest
import numpy
import six
import chainer
from chainer.backends import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
class SoftmaxCrossEntropyTestBase(object):
def setUp(self):
self.shape, self.ignore_index = self.shape_ignore
if self.shape is None:
if self.dtype == numpy.float16:
self.x = numpy.array([[-5, 1]], dtype=self.dtype)
else:
self.x = numpy.array([[-1000, 1]], dtype=self.dtype)
self.t = numpy.array([0], dtype=self.label_dtype)
else:
self.x = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
out_shape = (self.shape[0],) + self.shape[2:]
self.t = numpy.random.randint(
0, self.shape[1], out_shape).astype(self.label_dtype)
if (self.ignore_index is not None and
len(self.ignore_index) <= self.t.ndim):
self.t[self.ignore_index] = -1
if self.reduce == 'mean':
self.gy = numpy.random.uniform(-1, 1, ()).astype(self.x.dtype)
else:
self.gy = numpy.random.uniform(
-1, 1, self.t.shape).astype(self.dtype)
self.ggx = numpy.random.uniform(
-1, 1, self.x.shape).astype(self.x.dtype)
if self.weight_apply:
self.class_weight = numpy.random.uniform(
0, 10, (self.x.shape[1],)).astype(self.dtype)
else:
self.class_weight = None
if self.dtype == numpy.float16:
self.check_forward_options = {'atol': 5e-4, 'rtol': 5e-3}
self.check_backward_options = {'atol': 5e-3, 'rtol': 5e-2}
self.check_double_backward_options = {'atol': 5e-3, 'rtol': 5e-2}
else:
self.check_forward_options = {}
self.check_backward_options = {}
self.check_double_backward_options = {}
def check_forward(self, x_data, t_data, class_weight, use_cudnn='always'):
x = chainer.Variable(x_data)
t = chainer.Variable(t_data)
with chainer.using_config('use_cudnn', use_cudnn):
loss = functions.softmax_cross_entropy(
x, t, normalize=self.normalize, reduce=self.reduce,
cache_score=self.cache_score, class_weight=class_weight,
enable_double_backprop=self.enable_double_backprop)
self.assertEqual(loss.data.shape, self.gy.shape)
self.assertEqual(loss.data.dtype, self.dtype)
if not self.enable_double_backprop:
assert (loss.creator.y is not None) == self.cache_score
loss_value = cuda.to_cpu(loss.data)
if self.reduce == 'mean':
self.check_forward_with_reduce(
float(loss_value), t_data, class_weight)
else:
self.check_forward_without_reduce(loss_value, t_data, class_weight)
def check_forward_with_reduce(self, loss_value, t_data, class_weight):
# Compute expected value
loss_expect = 0.0
count = 0
x = numpy.rollaxis(self.x, 1, self.x.ndim).reshape(
(self.t.size, self.x.shape[1]))
t = self.t.ravel()
for xi, ti in six.moves.zip(x, t):
if ti == -1:
continue
log_z = numpy.ufunc.reduce(numpy.logaddexp, xi)
if class_weight is None:
loss_expect -= (xi - log_z)[ti]
else:
loss_expect -= (xi - log_z)[ti] * class_weight[ti]
count += 1
if self.normalize:
if count == 0:
loss_expect = 0.0
else:
loss_expect /= count
else:
if len(t_data) == 0:
loss_expect = 0.0
else:
loss_expect /= len(t_data)
testing.assert_allclose(
loss_expect, loss_value, **self.check_forward_options)
def check_forward_without_reduce(self, loss_value, t_data, class_weight):
x = numpy.rollaxis(self.x, 1, self.x.ndim).reshape(
(self.t.size, self.x.shape[1]))
t = self.t.ravel()
l = loss_value.ravel()
for xi, ti, li in six.moves.zip(x, t, l):
if ti == -1:
continue
log_z = numpy.ufunc.reduce(numpy.logaddexp, xi)
if class_weight is None:
loss_expect = -(xi - log_z)[ti]
else:
loss_expect = -(xi - log_z)[ti] * class_weight[ti]
testing.assert_allclose(
loss_expect, li, **self.check_forward_options)
def test_forward_cpu(self):
self.check_forward(self.x, self.t, self.class_weight)
@attr.gpu
def test_forward_gpu(self):
self.check_forward(
cuda.to_gpu(self.x), cuda.to_gpu(self.t),
None if not self.weight_apply else cuda.to_gpu(self.class_weight))
@attr.gpu
def test_forward_gpu_no_cudnn(self):
self.check_forward(
cuda.to_gpu(self.x), cuda.to_gpu(self.t),
None if not self.weight_apply else cuda.to_gpu(self.class_weight),
'never')
def check_backward(self, x_data, t_data, g_data, class_weight,
use_cudnn='always'):
with chainer.using_config('use_cudnn', use_cudnn):
def f(x, t):
return functions.softmax_cross_entropy(
x, t, cache_score=self.cache_score,
class_weight=class_weight, reduce=self.reduce)
gradient_check.check_backward(
f, (x_data, t_data), g_data, dtype=numpy.float64,
**self.check_backward_options)
def test_backward_cpu(self):
g_data = None
if self.reduce == 'no':
g_data = self.gy
self.check_backward(self.x, self.t, g_data, self.class_weight)
@attr.gpu
def test_backward_gpu(self):
g_data = None
if self.reduce == 'no':
g_data = cuda.to_gpu(self.gy)
weight = None
if not self.weight_apply:
weight = cuda.to_gpu(self.class_weight)
self.check_backward(
cuda.to_gpu(self.x), cuda.to_gpu(self.t), g_data, weight)
@attr.gpu
def test_backward_gpu_no_cudnn(self):
g_data = None
if self.reduce == 'no':
g_data = cuda.to_gpu(self.gy)
weight = None
if not self.weight_apply:
weight = cuda.to_gpu(self.class_weight)
self.check_backward(
cuda.to_gpu(self.x), cuda.to_gpu(self.t), g_data, weight, 'never')
test_cases = testing.product({
# test each option flags
'reduce': ['mean', 'no'],
'cache_score': [True, False],
'normalize': [True, False],
'weight_apply': [True, False],
'shape_ignore': [(None, None),
((2, 3, 2, 2), (0, 1, 0))],
'dtype': [numpy.float32],
'label_dtype': [numpy.int32],
}) + testing.product({
# test floating dtypes
'reduce': ['mean', 'no'],
'cache_score': [False],
'normalize': [True],
'weight_apply': [True],
'shape_ignore': [(None, None),
((2, 3), (slice(None),)),
((2, 3, 2), (0,)),
((2, 3, 2, 2), (0, 1, 0))],
'dtype': [numpy.float16, numpy.float64],
'label_dtype': [numpy.int32],
}) + testing.product({
# test label dtypes
'reduce': ['mean', 'no'],
'cache_score': [False],
'normalize': [True],
'weight_apply': [True],
'shape_ignore': [(None, None),
((2, 3), (slice(None),)),
((2, 3, 2), (0,)),
((2, 3, 2, 2), (0, 1, 0))],
'dtype': [numpy.float32],
'label_dtype': [numpy.int8, numpy.int16, numpy.int64],
})
@testing.parameterize(*test_cases)
@testing.fix_random()
class TestSoftmaxCrossEntropyDisableDoubleBackprop(
SoftmaxCrossEntropyTestBase, unittest.TestCase):
enable_double_backprop = False
@testing.parameterize(*test_cases)
@testing.fix_random()
class TestSoftmaxCrossEntropyEnableDoubleBackprop(
SoftmaxCrossEntropyTestBase, unittest.TestCase):
enable_double_backprop = True
def check_double_backward(self, x_data, t_data, gy_data, ggx_data,
class_weight, use_cudnn='always'):
def f(x):
return functions.softmax_cross_entropy(
x, t_data, self.normalize, self.cache_score, class_weight,
reduce=self.reduce, enable_double_backprop=True)
with chainer.using_config('use_cudnn', use_cudnn):
gradient_check.check_double_backward(
f, x_data, gy_data, ggx_data, dtype=numpy.float64,
**self.check_double_backward_options)
def test_double_backward_cpu(self):
self.check_double_backward(
self.x, self.t, self.gy, self.ggx, self.class_weight)
@attr.gpu
def test_double_backward_gpu(self):
self.check_double_backward(
cuda.to_gpu(self.x), cuda.to_gpu(self.t),
cuda.to_gpu(self.gy), cuda.to_gpu(self.ggx),
None if not self.weight_apply else cuda.to_gpu(self.class_weight))
@attr.gpu
def test_double_backward_gpu_no_cudnn(self):
self.check_double_backward(
cuda.to_gpu(self.x), cuda.to_gpu(self.t),
cuda.to_gpu(self.gy), cuda.to_gpu(self.ggx),
None if not self.weight_apply else cuda.to_gpu(self.class_weight),
'never')
@testing.parameterize(*testing.product_dict(
[
{'t_value': -2, 'valid': False},
{'t_value': 3, 'valid': False},
{'t_value': -1, 'valid': True} # -1 is ignore_label
],
[
{'enable_double_backprop': True},
{'enable_double_backprop': False}
]
))
class TestSoftmaxCrossEntropyValueCheck(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, (2, 2)).astype(numpy.float32)
# `0` is required to avoid NaN
self.t = numpy.array([self.t_value, 0], dtype=numpy.int32)
self.original_debug = chainer.is_debug()
chainer.set_debug(True)
def tearDown(self):
chainer.set_debug(self.original_debug)
def check_value_check(self, x_data, t_data, use_cudnn):
x = chainer.Variable(x_data)
t = chainer.Variable(t_data)
with chainer.using_config('use_cudnn', use_cudnn):
if self.valid:
# Check if it throws nothing
functions.softmax_cross_entropy(
x, t, enable_double_backprop=self.enable_double_backprop)
else:
with self.assertRaises(ValueError):
functions.softmax_cross_entropy(
x, t,
enable_double_backprop=self.enable_double_backprop)
def test_value_check_cpu(self):
self.check_value_check(self.x, self.t, 'never')
@attr.gpu
def test_value_check_gpu(self):
self.check_value_check(self.x, self.t, 'never')
@attr.gpu
def test_value_check_gpu_cudnn(self):
self.check_value_check(cuda.to_gpu(self.x), cuda.to_gpu(self.t),
'always')
@testing.parameterize(*testing.product({
'use_cudnn': ['always', 'auto', 'never'],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
@attr.cudnn
class TestSoftmaxCrossEntropyCudnnCall(unittest.TestCase):
def setUp(self):
self.x = cuda.cupy.random.uniform(-1, 1, (4, 3)).astype(self.dtype)
self.t = cuda.cupy.random.randint(0, 3, (4,)).astype(numpy.int32)
def forward(self):
x = chainer.Variable(self.x)
t = chainer.Variable(self.t)
return functions.softmax_cross_entropy(
x, t, enable_double_backprop=False)
def test_call_cudnn_forward(self):
with chainer.using_config('use_cudnn', self.use_cudnn):
with testing.patch('cupy.cudnn.softmax_forward') as func:
self.forward()
self.assertEqual(func.called,
chainer.should_use_cudnn('>=auto'))
# Note that SoftmaxCrossEntropy does not use cudnn on backward
@testing.parameterize(
{'enable_double_backprop': True},
{'enable_double_backprop': False},
)
class TestClassWeightAssertion(unittest.TestCase):
def setUp(self):
self.x = numpy.array([[0, 1], [2, 3]])
self.t = numpy.array([0, 1])
def test_ndim_assertion(self):
wrong_ndim_class_weight = numpy.array([[0, 0]], dtype='f')
with self.assertRaises(ValueError):
functions.softmax_cross_entropy(
self.x, self.t, class_weight=wrong_ndim_class_weight,
enable_double_backprop=self.enable_double_backprop)
def test_dtype_assertion(self):
wrong_dtype_class_weight = numpy.array([0, 0], dtype=numpy.int32)
with self.assertRaises(ValueError):
functions.softmax_cross_entropy(
self.x, self.t, class_weight=wrong_dtype_class_weight,
enable_double_backprop=self.enable_double_backprop)
def test_variable_assertion(self):
wrong_inst_class_weight = chainer.Variable(
numpy.array([0, 0], dtype='f'))
with self.assertRaises(ValueError):
functions.softmax_cross_entropy(
self.x, self.t, class_weight=wrong_inst_class_weight,
enable_double_backprop=self.enable_double_backprop)
@testing.parameterize(*testing.product({
'enable_double_backprop': [True, False],
}))
class TestSoftmaxCrossEntropyInvalidReduce(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, (2, 3)).astype('f')
self.t = numpy.zeros((2,), 'i')
def check_invalid_reduce(self, x, t):
with self.assertRaises(ValueError):
functions.softmax_cross_entropy(
x, t,
reduce='unknown_reduce_type',
enable_double_backprop=self.enable_double_backprop)
def test_invalid_reduce_cpu(self):
self.check_invalid_reduce(self.x, self.t)
@attr.gpu
def test_invalid_reduce_gpu(self):
self.check_invalid_reduce(cuda.to_gpu(self.x), cuda.to_gpu(self.t))
@testing.parameterize(*testing.product({
'ignore_label': [-2, 9],
'reduce': ['mean', 'no'],
'enable_double_backprop': [False, True],
'class_weight': [None, numpy.ones((3,), dtype=numpy.float32)]})
)
class TestNonDefaultIgnoreLabel(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, (2, 3)).astype(numpy.float32)
self.t = numpy.full((2,), self.ignore_label, dtype=numpy.int32)
if self.reduce == 'mean':
gy_shape = ()
else:
gy_shape = (2,)
self.gy =
|
numpy.random.uniform(-1, 1, gy_shape)
|
numpy.random.uniform
|
import numpy as np
import random
import sys
import time
import math
#Calculates cosine similarity between two vectors
def calculateCosSim(vec1,vec2):
return
|
np.dot(vec1,vec2)
|
numpy.dot
|
import numpy as np
import cv2
# 定义一块宽600,高400的画布,初始化为白色
canvas = np.zeros((400, 600, 3), dtype=np.uint8) + 255
# 画一条纵向的正中央的黑色分界线
cv2.line(canvas, (300, 0), (300, 399), (0, 0, 0), 2)
# 画一条右半部份画面以150为界的横向分界线
cv2.line(canvas, (300, 149), (599, 149), (0, 0, 0), 2)
# 左半部分的右下角画个红色的圆
cv2.circle(canvas, (200, 300), 75, (0, 0, 255), 5)
# 左半部分的左下角画个蓝色的矩形
cv2.rectangle(canvas, (20, 240), (100, 360), (255, 0, 0), thickness=3)
# 定义两个三角形,并执行内部绿色填充
triangles = np.array([
[(200, 240), (145, 333), (255, 333)],
[(60, 180), (20, 237), (100, 237)]])
cv2.fillPoly(canvas, triangles, (0, 255, 0))
# 画一个黄色五角星
# 第一步通过旋转角度的办法求出五个顶点
phi = 4 * np.pi / 5
rotations = [[[np.cos(i * phi), -np.sin(i * phi)], [i * np.sin(phi),
|
np.cos(i * phi)
|
numpy.cos
|
#!/usr/bin/env python
# coding: utf-8
# # Wasserstein Pareto Frontier Experiment on COMPAS Data Set
# ## Import Data
# The experiment used the COMPAS data set as in "Optimized Pre-Processing for Discrimination Prevention" by Calmon and etc. for comparison purpose: https://github.com/fair-preprocessing/nips2017/tree/master/compas/experiment_data2
# In[1]:
import numpy as np
import pandas as pd
import scipy
from scipy import stats
import matplotlib.pyplot as plt
import seaborn as sns
from scipy.interpolate import interp1d
from tqdm import tqdm
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC, LinearSVC
from sklearn.naive_bayes import MultinomialNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import confusion_matrix, roc_auc_score, auc, classification_report, roc_curve
from sklearn.preprocessing import OrdinalEncoder
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from matplotlib import pyplot
from scipy.linalg import sqrtm
from matplotlib import gridspec
from matplotlib.patches import Rectangle
# import data
path =r'/Users/shizhouxu/Documents/LIBRARY/Python/Fair_L2_Supervised_Learning/experiment_data_compass/' # use your path
train_0 = pd.read_csv(path + "train_0.csv",index_col=None, header=0, usecols=range(1,6))
train_1 = pd.read_csv(path + "train_1.csv",index_col=None, header=0, usecols=range(1,6))
train_2 = pd.read_csv(path + "train_2.csv",index_col=None, header=0, usecols=range(1,6))
train_3 = pd.read_csv(path + "train_3.csv",index_col=None, header=0, usecols=range(1,6))
train_4 = pd.read_csv(path + "train_4.csv",index_col=None, header=0, usecols=range(1,6))
test_0 = pd.read_csv(path + "test_0.csv",index_col=None, header=0, usecols=range(1,6))
test_1 = pd.read_csv(path + "test_1.csv",index_col=None, header=0, usecols=range(1,6))
test_2 = pd.read_csv(path + "test_2.csv",index_col=None, header=0, usecols=range(1,6))
test_3 = pd.read_csv(path + "test_3.csv",index_col=None, header=0, usecols=range(1,6))
test_4 = pd.read_csv(path + "test_4.csv",index_col=None, header=0, usecols=range(1,6))
train_new_0 = pd.read_csv(path + "train_new_0.csv",index_col=None, header=0, usecols=range(1,6))
train_new_1 = pd.read_csv(path + "train_new_1.csv",index_col=None, header=0, usecols=range(1,6))
train_new_2 = pd.read_csv(path + "train_new_2.csv",index_col=None, header=0, usecols=range(1,6))
train_new_3 = pd.read_csv(path + "train_new_3.csv",index_col=None, header=0, usecols=range(1,6))
train_new_4 = pd.read_csv(path + "train_new_4.csv",index_col=None, header=0, usecols=range(1,6))
test_new_0 = pd.read_csv(path + "test_new_0.csv",index_col=None, header=0, usecols=range(1,6))
test_new_1 = pd.read_csv(path + "test_new_1.csv",index_col=None, header=0, usecols=range(1,6))
test_new_2 = pd.read_csv(path + "test_new_2.csv",index_col=None, header=0, usecols=range(1,6))
test_new_3 = pd.read_csv(path + "test_new_3.csv",index_col=None, header=0, usecols=range(1,6))
test_new_4 = pd.read_csv(path + "test_new_4.csv",index_col=None, header=0, usecols=range(1,6))
# all available data variables: features = ['race','age_cat','c_charge_degree','priors_count','is_recid']
features = ['race','age_cat','c_charge_degree','priors_count','is_recid']
# sensitive variable: Z_features = ['race']
Z_features = ['race']
# dependnet variable: Y_features = ['is_recid']
Y_features = ['is_recid']
# independnet variable: X_features = ['age_cat', 'c_charge_degree','priors_count']
X_features = ['age_cat', 'c_charge_degree','priors_count']
# combine the data by train/test category
TrainList=[train_0,train_1,train_2,train_3,train_4]
TestList=[test_0,test_1,test_2,test_3,test_4]
TrainNewList=[train_new_0,train_new_1,train_new_2,train_new_3,train_new_4]
TestNewList=[test_new_0,test_new_1,test_new_2,test_new_3,test_new_4]
# data set combined: df
ord_enc = OrdinalEncoder()
df = pd.concat([train_0,train_1,train_2,train_3,train_4,test_0,test_1,test_2,test_3,test_4])
# data set further excluding the sensitive variable: df_delete
df_delete = df.drop('race',axis = 1)
# sensitive variable Z: gender
race = df['race']
# ## Compute the Wasserstein Pseudo-barycenter for X
# In[2]:
# independent variable: X
X = np.delete(np.array(pd.get_dummies(df[X_features])),[4],axis = 1)
# dependent variable: Y
Y = np.array(pd.get_dummies(df[Y_features]))
# mean of X and Y: X_mean, Y_mean
X_mean = np.mean(X,axis = 0)
Y_mean = np.mean(Y)
# covariance (matrix) of X and Y: X_cov, Y_cov
X_cov = np.cov(X.T)
Y_cov = np.cov(Y.T)
# marginal (conditional) dependent variables: X_male, Y_male
X_A = X[race == 'African-American',:]
Y_A = Y[race == 'African-American']
X_C = X[race == 'Caucasian',:]
Y_C = Y[race == 'Caucasian']
# marginal mean: X_(fe)male_mean, Y_(fe)male_mean
X_A_mean = np.average(X_A, axis = 0)
Y_A_mean = np.average(Y_A)
X_C_mean = np.average(X_C, axis = 0)
Y_C_mean = np.average(Y_C)
# marginal covariance: X_(fe)male_cov, Y_(fe)male_cov,
X_A_cov = np.cov(X_A.T)
Y_A_cov = np.cov(Y_A.T)
X_C_cov = np.cov(X_C.T)
Y_C_cov = np.cov(Y_C.T)
# cross-covariance (matrix) between Y and X: yX_(fe)male_cov
yX_A_cov = np.cov(Y_A.T, X_A.T)[range(len(Y[0,:]),len(Y[0,:]) + len(X[0,:])),0]
yX_C_cov = np.cov(Y_C.T, X_C.T)[range(len(Y[0,:]),len(Y[0,:]) + len(X[0,:])),0]
# algorithm 1, step 1: iterative process to the independent barycenter covariance matrix with stop cirterion error equals 0.00000001
sample_size = len(X[:,0])
X_bar = np.random.rand(len(X[0,:]),len(X[0,:])) # random initialization for the covariance
eps = 10 # initialization for the stop variable
while eps > 0.00000001:
X_new = ((len(X_A[:,0])/sample_size) * sqrtm(sqrtm(X_bar)@X_A_cov@sqrtm(X_bar))) + ((len(X_C[:,0])/sample_size) * sqrtm(sqrtm(X_bar)@X_C_cov@sqrtm(X_bar)))
eps = np.linalg.norm(X_bar - X_new)
X_bar = X_new
# algorithm 1, step 2: the corresponding Brenier's map for marginals of X: T_X_(fe)male
T_X_A = np.linalg.inv(sqrtm(X_A_cov)) @ sqrtm( sqrtm(X_A_cov) @ X_bar @ sqrtm(X_A_cov) ) @ np.linalg.inv(sqrtm(X_A_cov))
T_X_C = np.linalg.inv(sqrtm(X_C_cov)) @ sqrtm( sqrtm(X_C_cov) @ X_bar @ sqrtm(X_C_cov) ) @ np.linalg.inv(sqrtm(X_C_cov))
# wasserstein pseudo-barycenter for X separated in train/test categories: X_TrainFairList, X_TestFairList
X_TrainFairList = []
X_TestFairList = []
for i in range(0,len(TrainList)):
train = np.delete(np.array(pd.get_dummies(TrainList[i][X_features])),[4],axis = 1)
test = np.delete(np.array(pd.get_dummies(TestList[i][X_features])),[4],axis = 1)
gender_train_i = np.array(TrainList[i][Z_features]).T[0,:]
gender_test_i = np.array(TestList[i][Z_features]).T[0,:]
train_new = np.random.rand(train.shape[0],train.shape[1])
test_new = np.random.rand(test.shape[0],test.shape[1])
train_new[gender_train_i == 'African-American',:] = (train[gender_train_i == 'African-American',:] - X_A_mean) @ T_X_A.T + X_mean
train_new[gender_train_i == 'Caucasian',:] = (train[gender_train_i == 'Caucasian',:] - X_C_mean) @ T_X_C.T + X_mean
test_new[gender_test_i == 'African-American',:] = (test[gender_test_i == 'African-American',:] - X_A_mean) @ T_X_A.T + X_mean
test_new[gender_test_i == 'Caucasian',:] = (test[gender_test_i == 'Caucasian',:] - X_C_mean) @ T_X_C.T + X_mean
X_TrainFairList.append(train_new)
X_TestFairList.append(test_new)
# ## Compute the Wasserstein Pseudo-barycenter for E(Y|X)
# In[3]:
# wasserstein pseudo-barycenter for X: X_fair
X_fair = np.concatenate([X_TrainFairList[0],X_TrainFairList[1],X_TrainFairList[2],X_TrainFairList[3],X_TrainFairList[4],X_TestFairList[0],X_TestFairList[1],X_TestFairList[2],X_TestFairList[3],X_TestFairList[4]])
# marginal (conditional) X_fair: X_fair_(fe)male
X_fair_A = X_fair[race == 'African-American',:]
X_fair_C = X_fair[race == 'Caucasian',:]
# marginal means for X_fair: X_fair_(fe)male_mean
X_fair_A_mean = np.average(X_fair_A, axis = 0)
X_fair_C_mean = np.average(X_fair_C, axis = 0)
# marginal covariance for X_fair: X_fair_(fe)male_cov
X_fair_A_cov = np.cov(X_fair_A.T)
X_fair_C_cov = np.cov(X_fair_C.T)
# cross-covariance between Y and X_fair: yX_fair_(fe)male_cov
yX_fair_A_cov = np.cov(Y_A.T, X_fair_A.T)[range(1,8),0]
yX_fair_C_cov = np.cov(Y_C.T, X_fair_C.T)[range(1,8),0]
# covariance of marginal E(Y|X) in Gaussian case: yoX_(fe)male_cov
# which is also the optimal linear estimation of covariance of E(Y|X) in general distribution case
yoX_A_cov = [email protected](X_fair_A_cov)@yX_fair_A_cov.T
yoX_C_cov = [email protected](X_fair_C_cov)@yX_fair_C_cov.T
# algorithm 2, step 1: iterative process to the dependent barycenter covariance matrix with stop cirterion error equals 0.00000000000000000001
Y_bar = np.random.rand()
eps = 10
while eps > 0.00000000000000000001:
Y_new = ((len(X_A[:,0])/sample_size) * np.sqrt(np.sqrt(Y_bar)*yoX_A_cov*np.sqrt(Y_bar))) + ((len(X_C[:,0])/sample_size) * np.sqrt(np.sqrt(Y_bar)*yoX_C_cov*np.sqrt(Y_bar)))
eps = Y_bar - Y_new
Y_bar = Y_new
# algorithm 2, step 2: the corresponding Brenier's map for marginals of E(y|X)
T_Y_A = (1/np.sqrt(yoX_A_cov)) * np.sqrt( np.sqrt(yoX_A_cov) * Y_bar * np.sqrt(yoX_A_cov) ) * (1/np.sqrt(yoX_A_cov))
T_Y_C = (1/np.sqrt(yoX_C_cov)) * np.sqrt( np.sqrt(yoX_C_cov) * Y_bar * np.sqrt(yoX_C_cov) ) * (1/np.sqrt(yoX_C_cov))
# wasserstein pseudo-barycenter for Y separated in train/test categories: Y_TrainFairList, Y_TestFairList
Y_TrainFairList = []
Y_TestFairList = []
for i in range(0,len(TrainList)):
train = np.array(pd.get_dummies(TrainList[i][Y_features]))
test = np.array(pd.get_dummies(TestList[i][Y_features]))
train_new = np.random.rand(len(train.T[0,:]))
test_new = np.random.rand(len(test.T[0,:]))
gender_train_i = np.array(TrainList[i][Z_features]).T[0,:]
gender_test_i = np.array(TestList[i][Z_features]).T[0,:]
train_new[gender_train_i == 'African-American'] = ((train[gender_train_i == 'African-American'] - Y_A_mean) * T_Y_A.T + Y_mean).T[0,:]
train_new[gender_train_i == 'Caucasian'] = ((train[gender_train_i == 'Caucasian'] - Y_C_mean) * T_Y_C.T + Y_mean).T[0,:]
test_new[gender_test_i == 'African-American'] = ((test[gender_test_i == 'African-American'] - Y_A_mean) * T_Y_A.T + Y_mean).T[0,:]
test_new[gender_test_i == 'Caucasian'] = ((test[gender_test_i == 'Caucasian'] - Y_C_mean) * T_Y_C.T + Y_mean).T[0,:]
Y_TrainFairList.append(train_new)
Y_TestFairList.append(test_new)
# Algorithm 2, step 4: reshape the dependent pseudo-barycenter to binary variable for logit regression
fair_value = np.unique(Y_TrainFairList[0])
Y_prob = (fair_value - np.min(fair_value))/(np.max(fair_value) - np.min(fair_value))
for j in range(0,len(Y_TrainFairList)):
for i in range(0,len(fair_value)):
Y_TrainFairList[j][Y_TrainFairList[j] == fair_value[i]] = np.random.binomial(size = len(np.where(Y_TrainFairList[j] == fair_value[i])[0]),n = 1,p = Y_prob[i])
Y_TestFairList[j][Y_TestFairList[j] == fair_value[i]] = np.random.binomial(size = len(np.where(Y_TestFairList[j] == fair_value[i])[0]),n = 1,p = Y_prob[i])
# In[4]:
# random forest test for the fair representation of data (barycenter pair)
RFModelsAUC=[]
RFTestPreds=[]
test_disc=[]
for i in range(0,len(TrainList)):
rf=RandomForestClassifier()
rf.fit(X_TrainFairList[i],Y_TrainFairList[i])
proba=rf.predict_proba(X_TestFairList[i])
ytrue=TestList[i][Y_features]
testauc=roc_auc_score(ytrue, proba[:, 1])
RFModelsAUC.append(testauc)
temp=TestList[i][Z_features+X_features+Y_features]
temp['pred']=proba[:,1]
mean = temp.groupby(Z_features)['pred'].mean()
v = mean.values
v = v.reshape(len(v),1)
ratio_df = pd.DataFrame(v/v.transpose(),index=mean.index,columns=mean.index )
ratio_df_arr=np.asarray(np.abs(1-ratio_df))
maxdisc=np.amax(ratio_df_arr)
test_disc.append(maxdisc)
RFres_Pseudobary = (RFModelsAUC, RFTestPreds)
RFDisc_Pseudobary = test_disc
RFModelsAUC, RFDisc_Pseudobary
# In[5]:
# logistic regression test for the fair representation of data (barycenter pair)
LRModelsAUC=[]
LRTestPreds=[]
test_disc = []
for i in range(0,len(TrainList)):
lr=LogisticRegression()
lr.fit(X_TrainFairList[i],Y_TrainFairList[i])
proba=lr.predict_proba(X_TestFairList[i])
ytrue=TestList[i][Y_features]
testauc=roc_auc_score(ytrue, proba[:, 1])
LRModelsAUC.append(testauc)
temp=TestList[i][Z_features+X_features+Y_features]
temp['pred']=proba[:,1]
mean = temp.groupby(Z_features)['pred'].mean()
v = mean.values
v = v.reshape(len(v),1)
ratio_df = pd.DataFrame(v/v.transpose(),index=mean.index,columns=mean.index )
ratio_df_arr=np.asarray(np.abs(1-ratio_df))
maxdisc=np.amax(ratio_df_arr)
test_disc.append(maxdisc)
LRres_Pseudobary = (LRModelsAUC, LRTestPreds)
LRDisc_Pseudobary = test_disc
LRModelsAUC, LRDisc_Pseudobary
# ## Estimate the geodesic path from the E(Y|X_z) to the barycenter of the marginal conditional expectations
# 1. Compute both geodesic path path from X to X^dash and from Y to Y^dash
# 2. Use diagonal argument to estimate the geodesic path from the original E(Y|X) to E(Y^dash|X^dash) on both train and test data sets: X_train/test_path_list, Y_train\test_path_list
# In[6]:
# Algorithm 1, step 3: estimate of the independent variable (X) geodesic path using McCann interpolation
X_train_path_list = []
X_test_path_list = []
T = np.linspace(0,1,50) # discretize time variable T
Id = np.identity(7)
for i in range(0,len(TrainList)):
X_train_path = []
X_test_path = []
train = np.delete(np.array(pd.get_dummies(TrainList[i][X_features])),[4],axis = 1)
test = np.delete(np.array(pd.get_dummies(TestList[i][X_features])),[4],axis = 1)
gender_train_i = np.delete(np.array(pd.get_dummies(TrainList[i][Z_features])),[1],axis = 1).T[0,:]
gender_test_i = np.delete(np.array(pd.get_dummies(TestList[i][Z_features])),[1],axis = 1).T[0,:]
for t in range(0,len(T)):
train_new = np.random.rand(train.shape[0],train.shape[1])
test_new = np.random.rand(test.shape[0],test.shape[1])
tempt_train = train_new
tempt_test = test_new
tempt_train[gender_train_i == 1,:] = (1-T[t])*train[gender_train_i == 1,:] + T[t]*((train[gender_train_i == 1,:] - X_A_mean) @ T_X_A.T + X_mean)
tempt_train[gender_train_i == 0,:] = (1-T[t])*train[gender_train_i == 0,:] + T[t]*((train[gender_train_i == 0,:] - X_C_mean) @ T_X_C.T + X_mean)
tempt_test[gender_test_i == 1,:] = (1-T[t])*test[gender_test_i == 1,:] + T[t]*((test[gender_test_i == 1,:] - X_A_mean) @ T_X_A.T + X_mean)
tempt_test[gender_test_i == 0,:] = (1-T[t])*test[gender_test_i == 0,:] + T[t]*((test[gender_test_i == 0,:] - X_C_mean) @ T_X_C.T + X_mean)
# Algorithm 1, step 4: merge the corresponding (wrt t) linear interpolation of sensitive variable back to the MacCann interpolation of dependnet variable
X_train_path.append(np.concatenate((tempt_train, np.expand_dims(gender_train_i*(1-T[t]), axis=1)),axis = 1))
X_test_path.append(np.concatenate((tempt_test, np.expand_dims(gender_test_i*(1-T[t]), axis=1)),axis = 1))
X_train_path_list.append(X_train_path)
X_test_path_list.append(X_test_path)
# Algorithm 2, step 3: estimate of the dependnet (Y) geodesic path using McCann interpolation
Y_train_path_list = []
Y_test_path_list = []
T = np.linspace(0,1,50)
for i in range(0,len(TrainList)):
Y_train_path = []
Y_test_path = []
train = np.array(pd.get_dummies(TrainList[i][Y_features]))
test = np.array(pd.get_dummies(TestList[i][Y_features]))
gender_train_i = np.array(TrainList[i][Z_features]).T[0,:]
gender_test_i = np.array(TestList[i][Z_features]).T[0,:]
for t in range(0,len(T)):
train_new = np.random.rand(len(train.T[0,:]))
test_new = np.random.rand(len(test.T[0,:]))
tempt_train = train_new
tempt_test = test_new
tempt_train[gender_train_i == 'African-American'] = ((1 - T[t] + T[t]*T_Y_A)*train[gender_train_i == 'African-American'] + T[t]*(Y_mean - T_Y_A*Y_A_mean)).T[0,:]
tempt_train[gender_train_i == 'Caucasian'] = ((1 - T[t] + T[t]*T_Y_C)*train[gender_train_i == 'Caucasian'] + T[t]*(Y_mean - T_Y_C*Y_C_mean)).T[0,:]
tempt_test[gender_test_i == 'African-American'] = ((1 - T[t] + T[t]*T_Y_A)*test[gender_test_i == 'African-American'] + T[t]*(Y_mean - T_Y_A*Y_A_mean)).T[0,:]
tempt_test[gender_test_i == 'Caucasian'] = ((1 - T[t] + T[t]*T_Y_C)*test[gender_test_i == 'Caucasian'] + T[t]*(Y_mean - T_Y_C*Y_C_mean)).T[0,:]
Y_train_path.append(tempt_train)
Y_test_path.append(tempt_test)
Y_train_path_list.append(Y_train_path)
Y_test_path_list.append(Y_test_path)
# Algorithm 2, step 4: reshape the dependent pseudo-barycenter to binary variable for logit regression
for t in range(0,len(T)):
for i in range(0,len(TrainList)):
fair_value = np.unique(Y_train_path_list[i][t])
Y_prob = (fair_value - np.min(fair_value))/(np.max(fair_value) -
|
np.min(fair_value)
|
numpy.min
|
from __future__ import print_function
###################################################################################################
# Libraries
###################################################################################################
import os
import numpy as np
from pysam import Samfile, Fastafile
from Bio import motifs
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from scipy.signal import savgol_filter
from scipy.stats import scoreatpercentile
from argparse import SUPPRESS
import pyx
# Internal
from rgt.Util import GenomeData, AuxiliaryFunctions
from rgt.HINT.signalProcessing import GenomicSignal
from rgt.GenomicRegionSet import GenomicRegionSet
from rgt.HINT.biasTable import BiasTable
def plotting_args(parser):
# Parameters Options
parser.add_argument("--organism", type=str, metavar="STRING", default="hg19",
help=("Organism considered on the analysis. Check our full documentation for all available "
"options. All default files such as genomes will be based on the chosen organism "
"and the data.config file."))
parser.add_argument("--reads-file", type=str, metavar="FILE", default=None)
parser.add_argument("--region-file", type=str, metavar="FILE", default=None)
parser.add_argument("--reads-file1", type=str, metavar="FILE", default=None)
parser.add_argument("--reads-file2", type=str, metavar="FILE", default=None)
parser.add_argument("--motif-file", type=str, metavar="FILE", default=None)
parser.add_argument("--bias-table", type=str, metavar="FILE1_F,FILE1_R", default=None)
parser.add_argument("--bias-table1", type=str, metavar="FILE1_F,FILE1_R", default=None)
parser.add_argument("--bias-table2", type=str, metavar="FILE1_F,FILE1_R", default=None)
parser.add_argument("--window-size", type=int, metavar="INT", default=400)
# Hidden Options
parser.add_argument("--initial-clip", type=int, metavar="INT", default=50, help=SUPPRESS)
parser.add_argument("--downstream-ext", type=int, metavar="INT", default=1, help=SUPPRESS)
parser.add_argument("--upstream-ext", type=int, metavar="INT", default=0, help=SUPPRESS)
parser.add_argument("--forward-shift", type=int, metavar="INT", default=5, help=SUPPRESS)
parser.add_argument("--reverse-shift", type=int, metavar="INT", default=-5, help=SUPPRESS)
parser.add_argument("--k-nb", type=int, metavar="INT", default=6, help=SUPPRESS)
parser.add_argument("--y-lim", type=float, metavar="FLOAT", default=0.3, help=SUPPRESS)
# Output Options
parser.add_argument("--output-location", type=str, metavar="PATH", default=os.getcwd(),
help="Path where the output bias table files will be written.")
parser.add_argument("--output-prefix", type=str, metavar="STRING", default=None,
help="The prefix for results files.")
# plot type
parser.add_argument("--seq-logo", default=False, action='store_true')
parser.add_argument("--bias-raw-bc-line", default=False, action='store_true')
parser.add_argument("--raw-bc-line", default=False, action='store_true')
parser.add_argument("--strand-line", default=False, action='store_true')
parser.add_argument("--unstrand-line", default=False, action='store_true')
parser.add_argument("--bias-line", default=False, action='store_true')
parser.add_argument("--atac-dnase-line", default=False, action='store_true')
parser.add_argument("--bias-raw-bc-strand-line2", default=False, action='store_true')
parser.add_argument("--fragment-raw-size-line", default=False, action='store_true')
parser.add_argument("--fragment-bc-size-line", default=False, action='store_true')
def plotting_run(args):
if args.seq_logo:
seq_logo(args)
if args.bias_raw_bc_line:
bias_raw_bc_strand_line(args)
if args.strand_line:
strand_line(args)
if args.unstrand_line:
unstrand_line(args)
if args.raw_bc_line:
raw_bc_line(args)
if args.bias_raw_bc_strand_line2:
bias_raw_bc_strand_line2(args)
if args.fragment_raw_size_line:
fragment_size_raw_line(args)
if args.fragment_bc_size_line:
fragment_size_bc_line(args)
def seq_logo(args):
logo_fname = os.path.join(args.output_location, "{}.logo.eps".format(args.output_prefix))
pwm_file = os.path.join(args.output_location, "{}.pwm".format(args.output_prefix))
pwm_dict = dict(
[("A", [0.0] * args.window_size), ("C", [0.0] * args.window_size), ("G", [0.0] * args.window_size),
("T", [0.0] * args.window_size), ("N", [0.0] * args.window_size)])
genome_data = GenomeData(args.organism)
fasta_file = Fastafile(genome_data.get_genome())
bam = Samfile(args.reads_file, "rb")
regions = GenomicRegionSet("Peaks")
regions.read(args.region_file)
for region in regions:
for r in bam.fetch(region.chrom, region.initial, region.final):
if not r.is_reverse:
cut_site = r.pos - 1
p1 = cut_site - int(args.window_size / 2)
else:
cut_site = r.aend + 1
p1 = cut_site - int(args.window_size / 2)
p2 = p1 + args.window_size
# Fetching k-mer
currStr = str(fasta_file.fetch(region.chrom, p1, p2)).upper()
if r.is_reverse: continue
for i in range(0, len(currStr)):
pwm_dict[currStr[i]][i] += 1
with open(pwm_file, "w") as f:
for e in ["A", "C", "G", "T"]:
f.write(" ".join([str(int(c)) for c in pwm_dict[e]]) + "\n")
pwm = motifs.read(open(pwm_file), "pfm")
pwm.weblogo(logo_fname, format="eps", stack_width="large", stacks_per_line=str(args.window_size),
color_scheme="color_classic", unit_name="", show_errorbars=False, logo_title="",
show_xaxis=False, xaxis_label="", show_yaxis=False, yaxis_label="",
show_fineprint=False, show_ends=False, yaxis_scale=args.y_lim)
start = -(args.window_size / 2)
end = (args.window_size / 2) - 1
x = np.linspace(start, end, num=args.window_size).tolist()
fig = plt.figure(figsize=(8, 2))
ax = fig.add_subplot(111)
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_position(('outward', 15))
ax.tick_params(direction='out')
ax.xaxis.set_ticks(map(int, x))
x1 = map(int, x)
ax.set_xticklabels(map(str, x1), rotation=90)
ax.set_xlabel("Coordinates from Read Start", fontweight='bold')
ax.set_ylim([0, args.y_lim])
ax.yaxis.set_ticks([0, args.y_lim])
ax.set_yticklabels([str(0), str(args.y_lim)], rotation=90)
ax.set_ylabel("bits", rotation=90)
figure_name = os.path.join(args.output_location, "{}.line.eps".format(args.output_prefix))
fig.tight_layout()
fig.savefig(figure_name, format="eps", dpi=300)
# Creating canvas and printing eps / pdf with merged results
output_fname = os.path.join(args.output_location, "{}.eps".format(args.output_prefix))
c = pyx.canvas.canvas()
c.insert(pyx.epsfile.epsfile(0, 0, figure_name, scale=1.0))
c.insert(pyx.epsfile.epsfile(1.5, 1.5, logo_fname, width=18.8, height=3.5))
c.writeEPSfile(output_fname)
os.system("epstopdf " + output_fname)
os.remove(os.path.join(args.output_location, "{}.line.eps".format(args.output_prefix)))
os.remove(os.path.join(args.output_location, "{}.logo.eps".format(args.output_prefix)))
os.remove(os.path.join(args.output_location, "{}.eps".format(args.output_prefix)))
def bias_raw_bc_line(args):
signal = GenomicSignal(args.reads_file)
signal.load_sg_coefs(slope_window_size=9)
bias_table = BiasTable()
bias_table_list = args.bias_table.split(",")
table = bias_table.load_table(table_file_name_F=bias_table_list[0],
table_file_name_R=bias_table_list[1])
genome_data = GenomeData(args.organism)
fasta = Fastafile(genome_data.get_genome())
pwm_dict = dict([("A", [0.0] * args.window_size), ("C", [0.0] * args.window_size),
("G", [0.0] * args.window_size), ("T", [0.0] * args.window_size),
("N", [0.0] * args.window_size)])
num_sites = 0
mpbs_regions = GenomicRegionSet("Motif Predicted Binding Sites")
mpbs_regions.read(args.motif_file)
bam = Samfile(args.reads_file, "rb")
mean_signal_bias_f = np.zeros(args.window_size)
mean_signal_bias_r = np.zeros(args.window_size)
mean_signal_raw = np.zeros(args.window_size)
mean_signal_raw_f = np.zeros(args.window_size)
mean_signal_raw_r = np.zeros(args.window_size)
mean_signal_bc = np.zeros(args.window_size)
mean_signal_bc_f = np.zeros(args.window_size)
mean_signal_bc_r = np.zeros(args.window_size)
motif_len = 0
for region in mpbs_regions:
if str(region.name).split(":")[-1] == "Y":
# Extend by window_size
mid = (region.initial + region.final) / 2
p1 = mid - (args.window_size / 2)
p2 = mid + (args.window_size / 2)
motif_len = region.final - region.initial
signal_bias_f, signal_bias_r, raw, raw_f, raw_r, bc, bc_f, bc_r = \
signal.get_bias_raw_bc_signal(ref=region.chrom, start=p1, end=p2, bam=bam,
fasta=fasta, bias_table=table,
forward_shift=args.forward_shift,
reverse_shift=args.reverse_shift,
strand=True)
num_sites += 1
mean_signal_bias_f = np.add(mean_signal_bias_f, np.array(signal_bias_f))
mean_signal_bias_r = np.add(mean_signal_bias_r, np.array(signal_bias_r))
mean_signal_raw = np.add(mean_signal_raw, np.array(raw))
mean_signal_raw_f = np.add(mean_signal_raw_f, np.array(raw_f))
mean_signal_raw_r = np.add(mean_signal_raw_r, np.array(raw_r))
mean_signal_bc = np.add(mean_signal_bc, np.array(bc))
mean_signal_bc_f = np.add(mean_signal_bc_f, np.array(bc_f))
mean_signal_bc_r = np.add(mean_signal_bc_r, np.array(bc_r))
# Update pwm
aux_plus = 1
dna_seq = str(fasta.fetch(region.chrom, p1, p2)).upper()
if (region.final - region.initial) % 2 == 0:
aux_plus = 0
if region.orientation == "+":
for i in range(len(dna_seq)):
pwm_dict[dna_seq[i]][i] += 1
mean_signal_bias_f = mean_signal_bias_f / num_sites
mean_signal_bias_r = mean_signal_bias_r / num_sites
mean_signal_raw = mean_signal_raw / num_sites
mean_signal_bc = mean_signal_bc / num_sites
mean_signal_bc_f = mean_signal_bc_f / num_sites
mean_signal_bc_r = mean_signal_bc_r / num_sites
# Output the norm and slope signal
output_fname = os.path.join(args.output_location, "{}.txt".format(args.output_prefix))
f = open(output_fname, "w")
f.write("\t".join((map(str, mean_signal_bias_f))) + "\n")
f.write("\t".join((map(str, mean_signal_bias_r))) + "\n")
f.write("\t".join((map(str, mean_signal_raw))) + "\n")
f.write("\t".join((map(str, mean_signal_bc))) + "\n")
f.close()
# Output PWM and create logo
pwm_fname = os.path.join(args.output_location, "{}.pwm".format(args.output_prefix))
pwm_file = open(pwm_fname, "w")
for e in ["A", "C", "G", "T"]:
pwm_file.write(" ".join([str(int(f)) for f in pwm_dict[e]]) + "\n")
pwm_file.close()
logo_fname = os.path.join(args.output_location, "{}.logo.eps".format(args.output_prefix))
pwm = motifs.read(open(pwm_fname), "pfm")
pwm.weblogo(logo_fname, format="eps", stack_width="large", stacks_per_line=str(args.window_size),
color_scheme="color_classic", unit_name="", show_errorbars=False, logo_title="",
show_xaxis=False, xaxis_label="", show_yaxis=False, yaxis_label="",
show_fineprint=False, show_ends=False)
fig, (ax1, ax2, ax3) = plt.subplots(3, figsize=(8, 6))
start = -(args.window_size / 2)
end = (args.window_size / 2) - 1
x = np.linspace(start, end, num=args.window_size)
if motif_len % 2 == 0:
x1 = int(- (motif_len / 2))
x2 = int(motif_len / 2)
else:
x1 = int(-(motif_len / 2) - 1)
x2 = int((motif_len / 2) + 1)
############################################################
# bias signal per strand
fp_score = sum(mean_signal_raw[args.window_size / 2 + x1: args.window_size / 2 + x2])
shoulder_l = sum(mean_signal_raw[args.window_size / 2 + x1 - motif_len:args.window_size / 2 + x1])
shoulder_r = sum(mean_signal_raw[args.window_size / 2 + x2:args.window_size / 2 + x2 + motif_len])
sfr = (shoulder_l + shoulder_r) / (2 * fp_score)
min_ax1 = min(mean_signal_raw)
max_ax1 = max(mean_signal_raw)
ax1.plot(x, mean_signal_raw, color='blue', label='Uncorrected')
ax1.text(0.15, 0.9, 'n = {}'.format(num_sites), verticalalignment='bottom',
horizontalalignment='right', transform=ax1.transAxes, fontweight='bold')
ax1.text(0.35, 0.15, 'SFR = {}'.format(round(sfr, 2)), verticalalignment='bottom',
horizontalalignment='right', transform=ax1.transAxes, fontweight='bold')
ax1.xaxis.set_ticks_position('bottom')
ax1.yaxis.set_ticks_position('left')
ax1.spines['top'].set_visible(False)
ax1.spines['right'].set_visible(False)
ax1.spines['left'].set_position(('outward', 15))
ax1.spines['bottom'].set_position(('outward', 5))
ax1.tick_params(direction='out')
ax1.set_xticks([start, 0, end])
ax1.set_xticklabels([str(start), 0, str(end)])
ax1.set_yticks([min_ax1, max_ax1])
ax1.set_yticklabels([str(round(min_ax1, 2)), str(round(max_ax1, 2))], rotation=90)
ax1.set_title(args.output_prefix, fontweight='bold')
ax1.set_xlim(start, end)
ax1.set_ylim([min_ax1, max_ax1])
ax1.legend(loc="lower right", frameon=False)
####################################################################
#####################################################################
# Bias corrected, non-bias corrected (not strand specific)
fp_score = sum(mean_signal_bc[args.window_size / 2 + x1: args.window_size / 2 + x2])
shoulder_l = sum(mean_signal_bc[args.window_size / 2 + x1 - motif_len:args.window_size / 2 + x1])
shoulder_r = sum(mean_signal_bc[args.window_size / 2 + x2:args.window_size / 2 + x2 + motif_len])
sfr = (shoulder_l + shoulder_r) / (2 * fp_score)
min_ax2 = min(mean_signal_bc)
max_ax2 = max(mean_signal_bc)
ax2.plot(x, mean_signal_bc, color='red', label='Corrected')
ax2.text(0.35, 0.15, 'SFR = {}'.format(round(sfr, 2)), verticalalignment='bottom',
horizontalalignment='right', transform=ax2.transAxes, fontweight='bold')
ax2.xaxis.set_ticks_position('bottom')
ax2.yaxis.set_ticks_position('left')
ax2.spines['top'].set_visible(False)
ax2.spines['right'].set_visible(False)
ax2.spines['left'].set_position(('outward', 15))
ax2.tick_params(direction='out')
ax2.set_xticks([start, 0, end])
ax2.set_xticklabels([str(start), 0, str(end)])
ax2.set_yticks([min_ax2, max_ax2])
ax2.set_yticklabels([str(round(min_ax2, 2)), str(round(max_ax2, 2))], rotation=90)
ax2.set_xlim(start, end)
ax2.set_ylim([min_ax2, max_ax2])
ax2.legend(loc="lower right", frameon=False)
fp_score_f = sum(mean_signal_bc_f[args.window_size / 2 + x1: args.window_size / 2 + x2])
shoulder_l_f = sum(mean_signal_bc_f[args.window_size / 2 + x1 - motif_len:args.window_size / 2 + x1])
shoulder_r_f = sum(mean_signal_bc_f[args.window_size / 2 + x2:args.window_size / 2 + x2 + motif_len])
sfr_f = (shoulder_l_f + shoulder_r_f) / (2 * fp_score_f)
fp_score_r = sum(mean_signal_bc_r[args.window_size / 2 + x1: args.window_size / 2 + x2])
shoulder_l_r = sum(mean_signal_bc_r[args.window_size / 2 + x1 - motif_len:args.window_size / 2 + x1])
shoulder_r_r = sum(mean_signal_bc_r[args.window_size / 2 + x2:args.window_size / 2 + x2 + motif_len])
sfr_r = (shoulder_l_r + shoulder_r_r) / (2 * fp_score_r)
min_ax3 = min(min(mean_signal_bc_f), min(mean_signal_bc_r))
max_ax3 = max(max(mean_signal_bc_f), max(mean_signal_bc_r))
ax3.plot(x, mean_signal_bc_f, color='purple', label='Forward')
ax3.plot(x, mean_signal_bc_r, color='green', label='Reverse')
ax3.text(0.35, 0.15, 'SFR_f = {}'.format(round(sfr_f, 2)), verticalalignment='bottom',
horizontalalignment='right', transform=ax3.transAxes, fontweight='bold')
ax3.text(0.35, 0.05, 'SFR_r = {}'.format(round(sfr_r, 2)), verticalalignment='bottom',
horizontalalignment='right', transform=ax3.transAxes, fontweight='bold')
ax3.xaxis.set_ticks_position('bottom')
ax3.yaxis.set_ticks_position('left')
ax3.spines['top'].set_visible(False)
ax3.spines['right'].set_visible(False)
ax3.spines['left'].set_position(('outward', 15))
ax3.tick_params(direction='out')
ax3.set_xticks([start, 0, end])
ax3.set_xticklabels([str(start), 0, str(end)])
ax3.set_yticks([min_ax3, max_ax3])
ax3.set_yticklabels([str(round(min_ax3, 2)), str(round(max_ax3, 2))], rotation=90)
ax3.set_xlim(start, end)
ax3.set_ylim([min_ax3, max_ax3])
ax3.legend(loc="lower right", frameon=False)
ax3.spines['bottom'].set_position(('outward', 40))
ax1.axvline(x=x1, ymin=-0.3, ymax=1, c="black", lw=0.5, ls='dashed', zorder=0, clip_on=False)
ax1.axvline(x=x2, ymin=-0.3, ymax=1, c="black", lw=0.5, ls='dashed', zorder=0, clip_on=False)
ax2.axvline(x=x1, ymin=-0.5, ymax=1.2, c="black", lw=0.5, ls='dashed', zorder=0, clip_on=False)
ax2.axvline(x=x2, ymin=-0.5, ymax=1.2, c="black", lw=0.5, ls='dashed', zorder=0, clip_on=False)
###############################################################################
# merge the above figures
figure_name = os.path.join(args.output_location, "{}.line.eps".format(args.output_prefix))
fig.subplots_adjust(bottom=.2, hspace=.5)
fig.tight_layout()
fig.savefig(figure_name, format="eps", dpi=300)
# Creating canvas and printing eps / pdf with merged results
output_fname = os.path.join(args.output_location, "{}.eps".format(args.output_prefix))
c = pyx.canvas.canvas()
c.insert(pyx.epsfile.epsfile(0, 0, figure_name, scale=1.0))
c.insert(pyx.epsfile.epsfile(1.45, 0.89, logo_fname, width=18.3, height=1.75))
c.writeEPSfile(output_fname)
os.system("epstopdf " + figure_name)
os.system("epstopdf " + logo_fname)
os.system("epstopdf " + output_fname)
os.remove(pwm_fname)
os.remove(os.path.join(args.output_location, "{}.line.eps".format(args.output_prefix)))
os.remove(os.path.join(args.output_location, "{}.logo.eps".format(args.output_prefix)))
os.remove(os.path.join(args.output_location, "{}.line.pdf".format(args.output_prefix)))
os.remove(os.path.join(args.output_location, "{}.logo.pdf".format(args.output_prefix)))
os.remove(os.path.join(args.output_location, "{}.eps".format(args.output_prefix)))
def raw_bc_line(args):
signal = GenomicSignal(args.reads_file)
signal.load_sg_coefs(slope_window_size=9)
bias_table = BiasTable()
bias_table_list = args.bias_table.split(",")
table = bias_table.load_table(table_file_name_F=bias_table_list[0],
table_file_name_R=bias_table_list[1])
genome_data = GenomeData(args.organism)
fasta = Fastafile(genome_data.get_genome())
pwm_dict = dict([("A", [0.0] * args.window_size), ("C", [0.0] * args.window_size),
("G", [0.0] * args.window_size), ("T", [0.0] * args.window_size),
("N", [0.0] * args.window_size)])
num_sites = 0
mpbs_regions = GenomicRegionSet("Motif Predicted Binding Sites")
mpbs_regions.read(args.motif_file)
bam = Samfile(args.reads_file, "rb")
mean_signal_raw = np.zeros(args.window_size)
mean_signal_bc = np.zeros(args.window_size)
for region in mpbs_regions:
if str(region.name).split(":")[-1] == "Y":
# Extend by window_size
mid = (region.initial + region.final) / 2
p1 = mid - (args.window_size / 2)
p2 = mid + (args.window_size / 2)
signal_bias_f, signal_bias_r, raw, raw_f, raw_r, bc, bc_f, bc_r = \
signal.get_bias_raw_bc_signal(ref=region.chrom, start=p1, end=p2, bam=bam,
fasta=fasta, bias_table=table,
forward_shift=args.forward_shift,
reverse_shift=args.reverse_shift,
strand=True)
num_sites += 1
mean_signal_raw = np.add(mean_signal_raw, np.array(raw))
mean_signal_bc = np.add(mean_signal_bc, np.array(bc))
# Update pwm
aux_plus = 1
dna_seq = str(fasta.fetch(region.chrom, p1, p2)).upper()
if (region.final - region.initial) % 2 == 0:
aux_plus = 0
if region.orientation == "+":
for i in range(len(dna_seq)):
pwm_dict[dna_seq[i]][i] += 1
mean_signal_raw = mean_signal_raw / num_sites
mean_signal_bc = mean_signal_bc / num_sites
# Output the norm and slope signal
output_fname = os.path.join(args.output_location, "{}.txt".format(args.output_prefix))
f = open(output_fname, "w")
f.write("\t".join((map(str, mean_signal_raw))) + "\n")
f.write("\t".join((map(str, mean_signal_bc))) + "\n")
f.close()
# Output PWM and create logo
pwm_fname = os.path.join(args.output_location, "{}.pwm".format(args.output_prefix))
pwm_file = open(pwm_fname, "w")
for e in ["A", "C", "G", "T"]:
pwm_file.write(" ".join([str(int(f)) for f in pwm_dict[e]]) + "\n")
pwm_file.close()
logo_fname = os.path.join(args.output_location, "{}.logo.eps".format(args.output_prefix))
pwm = motifs.read(open(pwm_fname), "pfm")
pwm.weblogo(logo_fname, format="eps", stack_width="large", stacks_per_line=str(args.window_size),
color_scheme="color_classic", unit_name="", show_errorbars=False, logo_title="",
show_xaxis=False, xaxis_label="", show_yaxis=False, yaxis_label="",
show_fineprint=False, show_ends=False)
fig = plt.figure(figsize=(8, 4))
ax = fig.add_subplot(111)
start = -(args.window_size / 2)
end = (args.window_size / 2) - 1
x = np.linspace(start, end, num=args.window_size)
############################################################
min_ = min(min(mean_signal_raw), min(mean_signal_bc))
max_ = max(max(mean_signal_raw), max(mean_signal_bc))
ax.plot(x, mean_signal_raw, color='red', label='Uncorrected')
ax.plot(x, mean_signal_bc, color='blue', label='Corrected')
ax.text(0.15, 0.9, 'n = {}'.format(num_sites), verticalalignment='bottom',
horizontalalignment='right', transform=ax.transAxes, fontweight='bold')
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_position(('outward', 15))
ax.spines['bottom'].set_position(('outward', 5))
ax.tick_params(direction='out')
ax.set_xticks([start, 0, end])
ax.set_xticklabels([str(start), 0, str(end)])
ax.set_yticks([min_, max_])
ax.set_yticklabels([str(round(min_, 2)), str(round(max_, 2))], rotation=90)
ax.set_title(args.output_prefix, fontweight='bold')
ax.set_xlim(start, end)
ax.set_ylim(min_, max_)
ax.legend(loc="lower right", frameon=False)
ax.spines['bottom'].set_position(('outward', 40))
###############################################################################
# merge the above figures
figure_name = os.path.join(args.output_location, "{}.line.eps".format(args.output_prefix))
fig.subplots_adjust(bottom=.2, hspace=.5)
fig.tight_layout()
fig.savefig(figure_name, format="eps", dpi=300)
# Creating canvas and printing eps / pdf with merged results
output_fname = os.path.join(args.output_location, "{}.eps".format(args.output_prefix))
c = pyx.canvas.canvas()
c.insert(pyx.epsfile.epsfile(0, 0, figure_name, scale=1.0))
c.insert(pyx.epsfile.epsfile(1.45, 0.89, logo_fname, width=18.3, height=1.75))
c.writeEPSfile(output_fname)
os.system("epstopdf " + figure_name)
os.system("epstopdf " + logo_fname)
os.system("epstopdf " + output_fname)
os.remove(pwm_fname)
os.remove(os.path.join(args.output_location, "{}.line.eps".format(args.output_prefix)))
os.remove(os.path.join(args.output_location, "{}.logo.eps".format(args.output_prefix)))
os.remove(os.path.join(args.output_location, "{}.line.pdf".format(args.output_prefix)))
os.remove(os.path.join(args.output_location, "{}.logo.pdf".format(args.output_prefix)))
os.remove(os.path.join(args.output_location, "{}.eps".format(args.output_prefix)))
def bias_raw_bc_strand_line(args):
signal = GenomicSignal(args.reads_file)
signal.load_sg_coefs(slope_window_size=9)
bias_table = BiasTable()
bias_table_list = args.bias_table.split(",")
table = bias_table.load_table(table_file_name_F=bias_table_list[0],
table_file_name_R=bias_table_list[1])
genome_data = GenomeData(args.organism)
fasta = Fastafile(genome_data.get_genome())
pwm_dict = dict([("A", [0.0] * args.window_size), ("C", [0.0] * args.window_size),
("G", [0.0] * args.window_size), ("T", [0.0] * args.window_size),
("N", [0.0] * args.window_size)])
num_sites = 0
mpbs_regions = GenomicRegionSet("Motif Predicted Binding Sites")
mpbs_regions.read(args.motif_file)
bam = Samfile(args.reads_file, "rb")
mean_signal_bias_f = np.zeros(args.window_size)
mean_signal_bias_r = np.zeros(args.window_size)
mean_signal_raw = np.zeros(args.window_size)
mean_signal_bc = np.zeros(args.window_size)
for region in mpbs_regions:
if str(region.name).split(":")[-1] == "Y":
mid = (region.initial + region.final) / 2
p1 = mid - (args.window_size / 2)
p2 = mid + (args.window_size / 2)
signal_bias_f, signal_bias_r, signal_raw, signal_bc = \
signal.get_bias_raw_bc_signal(ref=region.chrom, start=p1, end=p2, bam=bam,
fasta=fasta, bias_table=table,
forward_shift=args.forward_shift, reverse_shift=args.reverse_shift)
num_sites += 1
mean_signal_bias_f = np.add(mean_signal_bias_f, np.array(signal_bias_f))
mean_signal_bias_r = np.add(mean_signal_bias_r, np.array(signal_bias_r))
mean_signal_raw = np.add(mean_signal_raw, np.array(signal_raw))
mean_signal_bc = np.add(mean_signal_bc, np.array(signal_bc))
# Update pwm
aux_plus = 1
dna_seq = str(fasta.fetch(region.chrom, p1, p2)).upper()
if (region.final - region.initial) % 2 == 0:
aux_plus = 0
dna_seq_rev = AuxiliaryFunctions.revcomp(str(fasta.fetch(region.chrom,
p1 + aux_plus, p2 + aux_plus)).upper())
if region.orientation == "+":
for i in range(0, len(dna_seq)):
pwm_dict[dna_seq[i]][i] += 1
mean_signal_bias_f = mean_signal_bias_f / num_sites
mean_signal_bias_r = mean_signal_bias_r / num_sites
mean_signal_raw = mean_signal_raw / num_sites
mean_signal_bc = mean_signal_bc / num_sites
# Output the norm and slope signal
output_fname = os.path.join(args.output_location, "{}.txt".format(args.output_prefix))
f = open(output_fname, "w")
f.write("\t".join((map(str, mean_signal_bias_f))) + "\n")
f.write("\t".join((map(str, mean_signal_bias_r))) + "\n")
f.write("\t".join((map(str, mean_signal_raw))) + "\n")
f.write("\t".join((map(str, mean_signal_bc))) + "\n")
f.close()
# Output PWM and create logo
pwm_fname = os.path.join(args.output_location, "{}.pwm".format(args.output_prefix))
pwm_file = open(pwm_fname, "w")
for e in ["A", "C", "G", "T"]:
pwm_file.write(" ".join([str(int(f)) for f in pwm_dict[e]]) + "\n")
pwm_file.close()
logo_fname = os.path.join(args.output_location, "{}.logo.eps".format(args.output_prefix))
pwm = motifs.read(open(pwm_fname), "pfm")
pwm.weblogo(logo_fname, format="eps", stack_width="large", stacks_per_line=str(args.window_size),
color_scheme="color_classic", unit_name="", show_errorbars=False, logo_title="",
show_xaxis=False, xaxis_label="", show_yaxis=False, yaxis_label="",
show_fineprint=False, show_ends=False)
fig, (ax1, ax2) = plt.subplots(2, figsize=(8, 4))
start = -(args.window_size / 2)
end = (args.window_size / 2) - 1
x = np.linspace(start, end, num=args.window_size)
############################################################
# bias signal per strand
min_ = min(min(mean_signal_bias_f), min(mean_signal_bias_r))
max_ = max(max(mean_signal_bias_f), max(mean_signal_bias_r))
ax1.plot(x, mean_signal_bias_f, color='purple', label='Forward')
ax1.plot(x, mean_signal_bias_r, color='green', label='Reverse')
ax1.text(0.15, 0.9, 'n = {}'.format(num_sites), verticalalignment='bottom',
horizontalalignment='right', transform=ax1.transAxes, fontweight='bold')
ax1.xaxis.set_ticks_position('bottom')
ax1.yaxis.set_ticks_position('left')
ax1.spines['top'].set_visible(False)
ax1.spines['right'].set_visible(False)
ax1.spines['left'].set_position(('outward', 15))
ax1.spines['bottom'].set_position(('outward', 5))
ax1.tick_params(direction='out')
ax1.set_xticks([start, 0, end])
ax1.set_xticklabels([str(start), 0, str(end)])
ax1.set_yticks([min_, max_])
ax1.set_yticklabels([str(round(min_, 2)), str(round(max_, 2))], rotation=90)
ax1.set_title(args.output_prefix, fontweight='bold')
ax1.set_xlim(start, end)
ax1.set_ylim([min_, max_])
ax1.legend(loc="upper right", frameon=False)
####################################################################
#####################################################################
# Bias corrected, non-bias corrected (not strand specific)
min_ = min(min(mean_signal_raw), min(mean_signal_bc))
max_ = max(max(mean_signal_raw), max(mean_signal_bc))
ax2.plot(x, mean_signal_raw, color='blue', label='Uncorrected')
ax2.plot(x, mean_signal_bc, color='red', label='Corrected')
ax2.xaxis.set_ticks_position('bottom')
ax2.yaxis.set_ticks_position('left')
ax2.spines['top'].set_visible(False)
ax2.spines['right'].set_visible(False)
ax2.spines['left'].set_position(('outward', 15))
ax2.tick_params(direction='out')
ax2.set_xticks([start, 0, end])
ax2.set_xticklabels([str(start), 0, str(end)])
ax2.set_yticks([min_, max_])
ax2.set_yticklabels([str(round(min_, 2)), str(round(max_, 2))], rotation=90)
ax2.set_xlim(start, end)
ax2.set_ylim([min_, max_])
ax2.legend(loc="upper right", frameon=False)
ax2.spines['bottom'].set_position(('outward', 40))
###############################################################################
# merge the above figures
figure_name = os.path.join(args.output_location, "{}.line.eps".format(args.output_prefix))
fig.subplots_adjust(bottom=.2, hspace=.5)
fig.tight_layout()
fig.savefig(figure_name, format="eps", dpi=300)
# Creating canvas and printing eps / pdf with merged results
output_fname = os.path.join(args.output_location, "{}.eps".format(args.output_prefix))
c = pyx.canvas.canvas()
c.insert(pyx.epsfile.epsfile(0, 0, figure_name, scale=1.0))
c.insert(pyx.epsfile.epsfile(1.51, 0.89, logo_fname, width=18.3, height=1.75))
c.writeEPSfile(output_fname)
os.system("epstopdf " + figure_name)
os.system("epstopdf " + logo_fname)
os.system("epstopdf " + output_fname)
os.remove(pwm_fname)
os.remove(os.path.join(args.output_location, "{}.line.eps".format(args.output_prefix)))
os.remove(os.path.join(args.output_location, "{}.logo.eps".format(args.output_prefix)))
os.remove(os.path.join(args.output_location, "{}.line.pdf".format(args.output_prefix)))
os.remove(os.path.join(args.output_location, "{}.logo.pdf".format(args.output_prefix)))
os.remove(os.path.join(args.output_location, "{}.eps".format(args.output_prefix)))
def bias_raw_bc_strand_line2(args):
signal = GenomicSignal(args.reads_file)
signal.load_sg_coefs(slope_window_size=9)
bias_table = BiasTable()
bias_table_list = args.bias_table.split(",")
table = bias_table.load_table(table_file_name_F=bias_table_list[0],
table_file_name_R=bias_table_list[1])
genome_data = GenomeData(args.organism)
fasta = Fastafile(genome_data.get_genome())
pwm_dict = dict([("A", [0.0] * args.window_size), ("C", [0.0] * args.window_size),
("G", [0.0] * args.window_size), ("T", [0.0] * args.window_size),
("N", [0.0] * args.window_size)])
num_sites = 0
mpbs_regions = GenomicRegionSet("Motif Predicted Binding Sites")
mpbs_regions.read(args.motif_file)
bam = Samfile(args.reads_file, "rb")
mean_signal_bias_f = np.zeros(args.window_size)
mean_signal_bias_r = np.zeros(args.window_size)
mean_signal_raw = np.zeros(args.window_size)
mean_signal_raw_f = np.zeros(args.window_size)
mean_signal_raw_r = np.zeros(args.window_size)
mean_signal_bc = np.zeros(args.window_size)
mean_signal_bc_f = np.zeros(args.window_size)
mean_signal_bc_r = np.zeros(args.window_size)
for region in mpbs_regions:
if str(region.name).split(":")[-1] == "Y":
mid = (region.initial + region.final) / 2
p1 = mid - (args.window_size / 2)
p2 = mid + (args.window_size / 2)
signal_bias_f, signal_bias_r, signal_raw, signal_raw_f, signal_raw_r, signal_bc, signal_bc_f, signal_bc_r = \
signal.get_bias_raw_bc_signal(ref=region.chrom, start=p1, end=p2, bam=bam,
fasta=fasta, bias_table=table,
forward_shift=args.forward_shift,
reverse_shift=args.reverse_shift,
strand=True)
num_sites += 1
mean_signal_bias_f = np.add(mean_signal_bias_f, np.array(signal_bias_f))
mean_signal_bias_r = np.add(mean_signal_bias_r, np.array(signal_bias_r))
mean_signal_raw = np.add(mean_signal_raw, np.array(signal_raw))
mean_signal_raw_f = np.add(mean_signal_raw_f, np.array(signal_raw_f))
mean_signal_raw_r = np.add(mean_signal_raw_r, np.array(signal_raw_r))
mean_signal_bc = np.add(mean_signal_bc, np.array(signal_bc))
mean_signal_bc_f = np.add(mean_signal_bc_f, np.array(signal_bc_f))
mean_signal_bc_r = np.add(mean_signal_bc_r, np.array(signal_bc_r))
# Update pwm
aux_plus = 1
dna_seq = str(fasta.fetch(region.chrom, p1, p2)).upper()
if (region.final - region.initial) % 2 == 0:
aux_plus = 0
dna_seq_rev = AuxiliaryFunctions.revcomp(str(fasta.fetch(region.chrom,
p1 + aux_plus, p2 + aux_plus)).upper())
if region.orientation == "+":
for i in range(0, len(dna_seq)):
pwm_dict[dna_seq[i]][i] += 1
mean_signal_bias_f = mean_signal_bias_f / num_sites
mean_signal_bias_r = mean_signal_bias_r / num_sites
mean_signal_raw = mean_signal_raw / num_sites
mean_signal_raw_f = mean_signal_raw_f / num_sites
mean_signal_raw_r = mean_signal_raw_r / num_sites
mean_signal_bc = mean_signal_bc / num_sites
mean_signal_bc_f = mean_signal_bc_f / num_sites
mean_signal_bc_r = mean_signal_bc_r / num_sites
# mean_signal_raw = rescaling(mean_signal_raw)
# mean_signal_bc = rescaling(mean_signal_bc)
# Output the norm and slope signal
output_fname = os.path.join(args.output_location, "{}.txt".format(args.output_prefix))
f = open(output_fname, "w")
f.write("\t".join((map(str, mean_signal_bias_f))) + "\n")
f.write("\t".join((map(str, mean_signal_bias_r))) + "\n")
f.write("\t".join((map(str, mean_signal_raw))) + "\n")
f.write("\t".join((map(str, mean_signal_raw_f))) + "\n")
f.write("\t".join((map(str, mean_signal_raw_r))) + "\n")
f.write("\t".join((map(str, mean_signal_bc))) + "\n")
f.write("\t".join((map(str, mean_signal_bc_f))) + "\n")
f.write("\t".join((map(str, mean_signal_bc_r))) + "\n")
f.close()
# Output PWM and create logo
pwm_fname = os.path.join(args.output_location, "{}.pwm".format(args.output_prefix))
pwm_file = open(pwm_fname, "w")
for e in ["A", "C", "G", "T"]:
pwm_file.write(" ".join([str(int(f)) for f in pwm_dict[e]]) + "\n")
pwm_file.close()
logo_fname = os.path.join(args.output_location, "{}.logo.eps".format(args.output_prefix))
pwm = motifs.read(open(pwm_fname), "pfm")
pwm.weblogo(logo_fname, format="eps", stack_width="large", stacks_per_line=str(args.window_size),
color_scheme="color_classic", unit_name="", show_errorbars=False, logo_title="",
show_xaxis=False, xaxis_label="", show_yaxis=False, yaxis_label="",
show_fineprint=False, show_ends=False)
# fig, (ax1, ax2, ax3, ax4) = plt.subplots(4, figsize=(8, 8))
fig, (ax1, ax4) = plt.subplots(2, figsize=(8, 4))
start = -(args.window_size / 2)
end = (args.window_size / 2) - 1
x = np.linspace(start, end, num=args.window_size)
############################################################
# bias signal per strand
min_ = min(min(mean_signal_bias_f), min(mean_signal_bias_r))
max_ = max(max(mean_signal_bias_f), max(mean_signal_bias_r))
ax1.plot(x, mean_signal_bias_f, color='purple', label='Forward')
ax1.plot(x, mean_signal_bias_r, color='green', label='Reverse')
ax1.text(0.15, 0.9, 'n = {}'.format(num_sites), verticalalignment='bottom',
horizontalalignment='right', transform=ax1.transAxes, fontweight='bold')
ax1.xaxis.set_ticks_position('bottom')
ax1.yaxis.set_ticks_position('left')
ax1.spines['top'].set_visible(False)
ax1.spines['right'].set_visible(False)
ax1.spines['left'].set_position(('outward', 15))
ax1.spines['bottom'].set_position(('outward', 5))
ax1.tick_params(direction='out')
ax1.set_xticks([start, 0, end])
ax1.set_xticklabels([str(start), 0, str(end)])
ax1.set_yticks([min_, max_])
ax1.set_yticklabels([str(round(min_, 2)), str(round(max_, 2))], rotation=90)
ax1.set_title(args.output_prefix, fontweight='bold')
ax1.set_xlim(start, end)
ax1.set_ylim([min_, max_])
ax1.legend(loc="upper right", frameon=False)
####################################################################
#####################################################################
# Bias corrected, non-bias corrected (not strand specific)
# min_ = min(min(mean_signal_raw_f), min(mean_signal_raw_r))
# max_ = max(max(mean_signal_raw_f), max(mean_signal_raw_r))
# ax2.plot(x, mean_signal_raw_f, color='red', label='Forward')
# ax2.plot(x, mean_signal_raw_r, color='green', label='Reverse')
# ax2.xaxis.set_ticks_position('bottom')
# ax2.yaxis.set_ticks_position('left')
# ax2.spines['top'].set_visible(False)
# ax2.spines['right'].set_visible(False)
# ax2.spines['left'].set_position(('outward', 15))
# ax2.tick_params(direction='out')
# ax2.set_xticks([start, -1, 0, 1, end])
# ax2.set_xticklabels([str(start), -1, 0,1, str(end)])
# ax2.set_yticks([min_, max_])
# ax2.set_yticklabels([str(round(min_, 2)), str(round(max_, 2))], rotation=90)
# ax2.set_xlim(start, end)
# ax2.set_ylim([min_, max_])
# ax2.legend(loc="upper right", frameon=False)
#####################################################################
# Bias corrected and strand specific
# min_ = min(min(mean_signal_bc_f), min(mean_signal_bc_r))
# max_ = max(max(mean_signal_bc_f), max(mean_signal_bc_r))
# ax3.plot(x, mean_signal_bc_f, color='red', label='Forward')
# ax3.plot(x, mean_signal_bc_r, color='green', label='Reverse')
# ax3.xaxis.set_ticks_position('bottom')
# ax3.yaxis.set_ticks_position('left')
# ax3.spines['top'].set_visible(False)
# ax3.spines['right'].set_visible(False)
# ax3.spines['left'].set_position(('outward', 15))
# ax3.tick_params(direction='out')
# ax3.set_xticks([start, 0, end])
# ax3.set_xticklabels([str(start), 0, str(end)])
# ax3.set_yticks([min_, max_])
# ax3.set_yticklabels([str(round(min_, 2)), str(round(max_, 2))], rotation=90)
# ax3.set_xlim(start, end)
# ax3.set_ylim([min_, max_])
# ax3.legend(loc="upper right", frameon=False)
#####################################################################
# Bias corrected, non-bias corrected (not strand specific)
min_ = min(min(mean_signal_raw), min(mean_signal_bc))
max_ = max(max(mean_signal_raw), max(mean_signal_bc))
ax4.plot(x, mean_signal_raw, color='blue', label='Uncorrected')
ax4.plot(x, mean_signal_bc, color='red', label='Corrected')
ax4.xaxis.set_ticks_position('bottom')
ax4.yaxis.set_ticks_position('left')
ax4.spines['top'].set_visible(False)
ax4.spines['right'].set_visible(False)
ax4.spines['left'].set_position(('outward', 15))
ax4.tick_params(direction='out')
ax4.set_xticks([start, 0, end])
ax4.set_xticklabels([str(start), 0, str(end)])
ax4.set_yticks([min_, max_])
ax4.set_yticklabels([str(round(min_, 2)), str(round(max_, 2))], rotation=90)
ax4.set_xlim(start, end)
ax4.set_ylim([min_, max_])
ax4.legend(loc="upper right", frameon=False)
ax4.spines['bottom'].set_position(('outward', 40))
###############################################################################
# merge the above figures
figure_name = os.path.join(args.output_location, "{}.line.eps".format(args.output_prefix))
fig.subplots_adjust(bottom=.2, hspace=.5)
fig.tight_layout()
fig.savefig(figure_name, format="eps", dpi=300)
# Creating canvas and printing eps / pdf with merged results
output_fname = os.path.join(args.output_location, "{}.eps".format(args.output_prefix))
c = pyx.canvas.canvas()
c.insert(pyx.epsfile.epsfile(0, 0, figure_name, scale=1.0))
c.insert(pyx.epsfile.epsfile(1.45, 0.89, logo_fname, width=18.3, height=1.75))
c.writeEPSfile(output_fname)
os.system("epstopdf " + figure_name)
os.system("epstopdf " + logo_fname)
os.system("epstopdf " + output_fname)
os.remove(pwm_fname)
os.remove(os.path.join(args.output_location, "{}.line.eps".format(args.output_prefix)))
os.remove(os.path.join(args.output_location, "{}.logo.eps".format(args.output_prefix)))
os.remove(os.path.join(args.output_location, "{}.line.pdf".format(args.output_prefix)))
os.remove(os.path.join(args.output_location, "{}.logo.pdf".format(args.output_prefix)))
os.remove(os.path.join(args.output_location, "{}.eps".format(args.output_prefix)))
def strand_line(args):
genomic_signal = GenomicSignal(args.reads_file)
genomic_signal.load_sg_coefs(slope_window_size=9)
table = None
if args.bias_table is not None:
bias_table = BiasTable()
bias_table_list = args.bias_table.split(",")
table = bias_table.load_table(table_file_name_F=bias_table_list[0],
table_file_name_R=bias_table_list[1])
genome_data = GenomeData(args.organism)
fasta = Fastafile(genome_data.get_genome())
num_sites = 0
mpbs_regions = GenomicRegionSet("Motif Predicted Binding Sites")
mpbs_regions.read(args.motif_file)
bam = Samfile(args.reads_file, "rb")
mean_signal_f = np.zeros(args.window_size)
mean_signal_r = np.zeros(args.window_size)
pwm_dict = None
for region in mpbs_regions:
if str(region.name).split(":")[-1] == "Y":
# Extend by 50 bp
mid = (region.initial + region.final) / 2
p1 = mid - (args.window_size / 2)
p2 = mid + (args.window_size / 2)
if args.bias_table is not None:
signal_f, signal_r = genomic_signal.get_bc_signal_by_fragment_length(ref=region.chrom, start=p1,
end=p2, bam=bam, fasta=fasta,
bias_table=table,
forward_shift=args.forward_shift,
reverse_shift=args.reverse_shift)
else:
signal_f, signal_r = genomic_signal.get_raw_signal_by_fragment_length(ref=region.chrom, start=p1,
end=p2,
bam=bam,
forward_shift=args.forward_shift,
reverse_shift=args.reverse_shift)
num_sites += 1
mean_signal_f = np.add(mean_signal_f, signal_f)
mean_signal_r = np.add(mean_signal_r, signal_r)
# Update pwm
if pwm_dict is None:
pwm_dict = dict([("A", [0.0] * (p2 - p1)), ("C", [0.0] * (p2 - p1)),
("G", [0.0] * (p2 - p1)), ("T", [0.0] * (p2 - p1)),
("N", [0.0] * (p2 - p1))])
aux_plus = 1
dna_seq = str(fasta.fetch(region.chrom, p1, p2)).upper()
if (region.final - region.initial) % 2 == 0:
aux_plus = 0
dna_seq_rev = AuxiliaryFunctions.revcomp(str(fasta.fetch(region.chrom,
p1 + aux_plus, p2 + aux_plus)).upper())
if region.orientation == "+":
for i in range(0, len(dna_seq)):
pwm_dict[dna_seq[i]][i] += 1
elif region.orientation == "-":
for i in range(0, len(dna_seq_rev)):
pwm_dict[dna_seq_rev[i]][i] += 1
mean_norm_signal_f = genomic_signal.boyle_norm(mean_signal_f)
perc = scoreatpercentile(mean_norm_signal_f, 98)
std = np.std(mean_norm_signal_f)
mean_norm_signal_f = genomic_signal.hon_norm_atac(mean_norm_signal_f, perc, std)
mean_norm_signal_r = genomic_signal.boyle_norm(mean_signal_r)
perc = scoreatpercentile(mean_norm_signal_r, 98)
std = np.std(mean_norm_signal_r)
mean_norm_signal_r = genomic_signal.hon_norm_atac(mean_norm_signal_r, perc, std)
mean_slope_signal_f = genomic_signal.slope(mean_norm_signal_f, genomic_signal.sg_coefs)
mean_slope_signal_r = genomic_signal.slope(mean_norm_signal_r, genomic_signal.sg_coefs)
# Output the norm and slope signal
output_fname = os.path.join(args.output_location, "{}.txt".format(args.output_prefix))
f = open(output_fname, "w")
f.write("\t".join((map(str, mean_norm_signal_f))) + "\n")
f.write("\t".join((map(str, mean_slope_signal_f))) + "\n")
f.write("\t".join((map(str, mean_norm_signal_r))) + "\n")
f.write("\t".join((map(str, mean_slope_signal_r))) + "\n")
f.close()
# Output PWM and create logo
pwm_fname = os.path.join(args.output_location, "{}.pwm".format(args.output_prefix))
pwm_file = open(pwm_fname, "w")
for e in ["A", "C", "G", "T"]:
pwm_file.write(" ".join([str(int(f)) for f in pwm_dict[e]]) + "\n")
pwm_file.close()
logo_fname = os.path.join(args.output_location, "{}.logo.eps".format(args.output_prefix))
pwm = motifs.read(open(pwm_fname), "pfm")
pwm.weblogo(logo_fname, format="eps", stack_width="large", stacks_per_line=str(args.window_size),
color_scheme="color_classic", unit_name="", show_errorbars=False, logo_title="",
show_xaxis=False, xaxis_label="", show_yaxis=False, yaxis_label="",
show_fineprint=False, show_ends=False)
start = -(args.window_size / 2)
end = (args.window_size / 2) - 1
x = np.linspace(start, end, num=args.window_size)
fig = plt.figure(figsize=(8, 4))
ax = fig.add_subplot(111)
min_signal = min(min(mean_signal_f), min(mean_signal_r))
max_signal = max(max(mean_signal_f), max(mean_signal_r))
ax.plot(x, mean_signal_f, color='red', label='Forward')
ax.plot(x, mean_signal_r, color='green', label='Reverse')
ax.set_title(args.output_prefix, fontweight='bold')
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_position(('outward', 15))
ax.tick_params(direction='out')
ax.set_xticks([start, 0, end])
ax.set_xticklabels([str(start), 0, str(end)])
ax.set_yticks([min_signal, max_signal])
ax.set_yticklabels([str(round(min_signal, 2)), str(round(max_signal, 2))], rotation=90)
ax.set_xlim(start, end)
ax.set_ylim([min_signal, max_signal])
ax.legend(loc="upper right", frameon=False)
ax.spines['bottom'].set_position(('outward', 40))
figure_name = os.path.join(args.output_location, "{}.line.eps".format(args.output_prefix))
fig.subplots_adjust(bottom=.2, hspace=.5)
fig.tight_layout()
fig.savefig(figure_name, format="eps", dpi=300)
# Creating canvas and printing eps / pdf with merged results
output_fname = os.path.join(args.output_location, "{}.eps".format(args.output_prefix))
c = pyx.canvas.canvas()
c.insert(pyx.epsfile.epsfile(0, 0, figure_name, scale=1.0))
c.insert(pyx.epsfile.epsfile(1.37, 0.89, logo_fname, width=18.5, height=1.75))
c.writeEPSfile(output_fname)
os.system("epstopdf " + figure_name)
os.system("epstopdf " + logo_fname)
os.system("epstopdf " + output_fname)
# os.remove(pwm_fname)
os.remove(os.path.join(args.output_location, "{}.line.eps".format(args.output_prefix)))
os.remove(os.path.join(args.output_location, "{}.logo.eps".format(args.output_prefix)))
os.remove(os.path.join(args.output_location, "{}.line.pdf".format(args.output_prefix)))
os.remove(os.path.join(args.output_location, "{}.logo.pdf".format(args.output_prefix)))
os.remove(os.path.join(args.output_location, "{}.eps".format(args.output_prefix)))
def unstrand_line(args):
genomic_signal = GenomicSignal(args.reads_file)
genomic_signal.load_sg_coefs(slope_window_size=9)
table = None
if args.bias_table is not None:
bias_table = BiasTable()
bias_table_list = args.bias_table.split(",")
table = bias_table.load_table(table_file_name_F=bias_table_list[0], table_file_name_R=bias_table_list[1])
genome_data = GenomeData(args.organism)
fasta = Fastafile(genome_data.get_genome())
num_sites = 0
mpbs_regions = GenomicRegionSet("Motif Predicted Binding Sites")
mpbs_regions.read(args.motif_file)
bam = Samfile(args.reads_file, "rb")
mean_signal = np.zeros(args.window_size)
pwm_dict = None
output_fname = os.path.join(args.output_location, "{}.txt".format(args.output_prefix))
with open(output_fname, "w") as output_f:
for region in mpbs_regions:
mid = (region.initial + region.final) / 2
p1 = mid - (args.window_size / 2)
p2 = mid + (args.window_size / 2)
if args.bias_table is not None:
signal = genomic_signal.get_bc_signal_by_fragment_length(ref=region.chrom, start=p1, end=p2,
bam=bam, fasta=fasta,
bias_table=table,
forward_shift=args.forward_shift,
reverse_shift=args.reverse_shift,
strand=False)
else:
signal = genomic_signal.get_raw_signal_by_fragment_length(ref=region.chrom, start=p1, end=p2,
bam=bam,
forward_shift=args.forward_shift,
reverse_shift=args.reverse_shift,
strand=False)
if region.orientation == "-":
signal = np.flip(signal)
name = "{}_{}_{}".format(region.chrom, str(region.initial), str(region.final))
output_f.write(name + "\t" + "\t".join(map(str, map(int, signal))) + "\n")
num_sites += 1
mean_signal = np.add(mean_signal, signal)
# Update pwm
if pwm_dict is None:
pwm_dict = dict([("A", [0.0] * (p2 - p1)), ("C", [0.0] * (p2 - p1)),
("G", [0.0] * (p2 - p1)), ("T", [0.0] * (p2 - p1)),
("N", [0.0] * (p2 - p1))])
aux_plus = 1
dna_seq = str(fasta.fetch(region.chrom, p1, p2)).upper()
if (region.final - region.initial) % 2 == 0:
aux_plus = 0
dna_seq_rev = AuxiliaryFunctions.revcomp(
str(fasta.fetch(region.chrom, p1 + aux_plus, p2 + aux_plus)).upper())
if region.orientation == "+":
for i in range(0, len(dna_seq)):
pwm_dict[dna_seq[i]][i] += 1
elif region.orientation == "-":
for i in range(0, len(dna_seq_rev)):
pwm_dict[dna_seq_rev[i]][i] += 1
mean_signal = mean_signal / num_sites
# Output PWM and create logo
pwm_fname = os.path.join(args.output_location, "{}.pwm".format(args.output_prefix))
pwm_file = open(pwm_fname, "w")
for e in ["A", "C", "G", "T"]:
pwm_file.write(" ".join([str(int(f)) for f in pwm_dict[e]]) + "\n")
pwm_file.close()
logo_fname = os.path.join(args.output_location, "{}.logo.eps".format(args.output_prefix))
pwm = motifs.read(open(pwm_fname), "pfm")
pwm.weblogo(logo_fname, format="eps", stack_width="large", stacks_per_line=str(args.window_size),
color_scheme="color_classic", unit_name="", show_errorbars=False, logo_title="",
show_xaxis=False, xaxis_label="", show_yaxis=False, yaxis_label="",
show_fineprint=False, show_ends=False)
start = -(args.window_size / 2)
end = (args.window_size / 2) - 1
x = np.linspace(start, end, num=args.window_size)
fig = plt.figure(figsize=(8, 4))
ax = fig.add_subplot(111)
min_signal = min(mean_signal)
max_signal = max(mean_signal)
ax.plot(x, mean_signal, color='red')
ax.set_title(args.output_prefix, fontweight='bold')
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_position(('outward', 15))
ax.tick_params(direction='out')
ax.set_xticks([start, 0, end])
ax.set_xticklabels([str(start), 0, str(end)])
ax.set_yticks([min_signal, max_signal])
ax.set_yticklabels([str(round(min_signal, 2)), str(round(max_signal, 2))], rotation=90)
ax.set_xlim(start, end)
ax.set_ylim([min_signal, max_signal])
ax.legend(loc="upper right", frameon=False)
ax.spines['bottom'].set_position(('outward', 40))
figure_name = os.path.join(args.output_location, "{}.line.eps".format(args.output_prefix))
fig.subplots_adjust(bottom=.2, hspace=.5)
fig.tight_layout()
fig.savefig(figure_name, format="eps", dpi=300)
# Creating canvas and printing eps / pdf with merged results
output_fname = os.path.join(args.output_location, "{}.eps".format(args.output_prefix))
c = pyx.canvas.canvas()
c.insert(pyx.epsfile.epsfile(0, 0, figure_name, scale=1.0))
c.insert(pyx.epsfile.epsfile(1.31, 0.89, logo_fname, width=18.5, height=1.75))
c.writeEPSfile(output_fname)
os.system("epstopdf " + figure_name)
os.system("epstopdf " + logo_fname)
os.system("epstopdf " + output_fname)
os.remove(pwm_fname)
os.remove(os.path.join(args.output_location, "{}.line.eps".format(args.output_prefix)))
os.remove(os.path.join(args.output_location, "{}.logo.eps".format(args.output_prefix)))
os.remove(os.path.join(args.output_location, "{}.line.pdf".format(args.output_prefix)))
# os.remove(os.path.join(args.output_location, "{}.logo.pdf".format(args.output_prefix)))
os.remove(os.path.join(args.output_location, "{}.eps".format(args.output_prefix)))
def fragment_size_raw_line(args):
mpbs_regions = GenomicRegionSet("Motif Predicted Binding Sites")
mpbs_regions.read(args.motif_file)
bam = Samfile(args.reads_file, "rb")
signal_f_max_145 = np.zeros(args.window_size)
signal_r_max_145 = np.zeros(args.window_size)
signal_f_146_307 = np.zeros(args.window_size)
signal_r_146_307 = np.zeros(args.window_size)
signal_f_min_307 = np.zeros(args.window_size)
signal_r_min_307 = np.zeros(args.window_size)
signal_f = np.zeros(args.window_size)
signal_r = np.zeros(args.window_size)
for region in mpbs_regions:
if str(region.name).split(":")[-1] == "Y":
# Extend by 50 bp
mid = (region.initial + region.final) / 2
p1 = mid - (args.window_size / 2)
p2 = mid + (args.window_size / 2)
# Fetch raw signal
for read in bam.fetch(region.chrom, p1, p2):
# All reads
if not read.is_reverse:
cut_site = read.pos + args.forward_shift
if p1 <= cut_site < p2:
signal_f[cut_site - p1] += 1.0
else:
cut_site = read.aend + args.reverse_shift - 1
if p1 <= cut_site < p2:
signal_r[cut_site - p1] += 1.0
# length <= 145
if abs(read.template_length) <= 145:
if not read.is_reverse:
cut_site = read.pos + args.forward_shift
if p1 <= cut_site < p2:
signal_f_max_145[cut_site - p1] += 1.0
else:
cut_site = read.aend + args.reverse_shift - 1
if p1 <= cut_site < p2:
signal_r_max_145[cut_site - p1] += 1.0
# length > 145 and <= 307
if 145 < abs(read.template_length) <= 307:
if not read.is_reverse:
cut_site = read.pos + args.forward_shift
if p1 <= cut_site < p2:
signal_f_146_307[cut_site - p1] += 1.0
else:
cut_site = read.aend + args.reverse_shift - 1
if p1 <= cut_site < p2:
signal_r_146_307[cut_site - p1] += 1.0
# length > 307
if abs(read.template_length) > 307:
if not read.is_reverse:
cut_site = read.pos + args.forward_shift
if p1 <= cut_site < p2:
signal_f_min_307[cut_site - p1] += 1.0
else:
cut_site = read.aend + args.reverse_shift - 1
if p1 <= cut_site < p2:
signal_r_min_307[cut_site - p1] += 1.0
# Output the norm and slope signal
output_fname = os.path.join(args.output_location, "{}.txt".format(args.output_prefix))
f = open(output_fname, "w")
f.write("\t".join((map(str, signal_f))) + "\n")
f.write("\t".join((map(str, signal_r))) + "\n")
f.write("\t".join((map(str, signal_f_max_145))) + "\n")
f.write("\t".join((map(str, signal_r_max_145))) + "\n")
f.write("\t".join((map(str, signal_f_146_307))) + "\n")
f.write("\t".join((map(str, signal_r_146_307))) + "\n")
f.write("\t".join((map(str, signal_f_min_307))) + "\n")
f.write("\t".join((map(str, signal_r_min_307))) + "\n")
f.close()
# find out the linker position
pos_f_1, pos_r_1, pos_f_2, pos_r_2 = get_linkers_position(signal_f_146_307,
signal_r_146_307,
signal_f_min_307,
signal_r_min_307)
p1 = (pos_f_1 - pos_f_2) / 2 + pos_f_2
p2 = p1 + 180
p3 = args.window_size - p2
p4 = args.window_size - p1
fig, (ax1, ax2, ax3, ax4) = plt.subplots(4, 1, figsize=(8, 8))
start = -(args.window_size / 2)
end = (args.window_size / 2) - 1
x = np.linspace(start, end, num=args.window_size)
x_ticks = [start, p1 - 500, p2 - 500, 0, p3 - 500, p4 - 500, end]
update_axes_for_fragment_size_line(ax1, x, x_ticks, start, end, signal_f, signal_r, p1, p2,
p3, p4)
update_axes_for_fragment_size_line(ax2, x, x_ticks, start, end, signal_f_max_145, signal_r_max_145, p1, p2,
p3, p4)
update_axes_for_fragment_size_line(ax3, x, x_ticks, start, end, signal_f_146_307, signal_r_146_307, p1, p2,
p3, p4)
update_axes_for_fragment_size_line(ax4, x, x_ticks, start, end, signal_f_min_307, signal_r_min_307, p1, p2,
p3, p4)
figure_name = os.path.join(args.output_location, "{}.pdf".format(args.output_prefix))
fig.subplots_adjust(bottom=.2, hspace=.5)
fig.tight_layout()
fig.savefig(figure_name, format="pdf", dpi=300)
def fragment_size_bc_line(args):
mpbs_regions = GenomicRegionSet("Motif Predicted Binding Sites")
mpbs_regions.read(args.motif_file)
genomic_signal = GenomicSignal(args.reads_file)
genomic_signal.load_sg_coefs(11)
bam = Samfile(args.reads_file, "rb")
genome_data = GenomeData(args.organism)
fasta = Fastafile(genome_data.get_genome())
bias_table = BiasTable()
bias_table_list = args.bias_table.split(",")
table = bias_table.load_table(table_file_name_F=bias_table_list[0],
table_file_name_R=bias_table_list[1])
signal_f_max_145 = np.zeros(args.window_size)
signal_r_max_145 = np.zeros(args.window_size)
signal_f_146_307 = np.zeros(args.window_size)
signal_r_146_307 = np.zeros(args.window_size)
signal_f_min_307 = np.zeros(args.window_size)
signal_r_min_307 = np.zeros(args.window_size)
signal_f = np.zeros(args.window_size)
signal_r = np.zeros(args.window_size)
for region in mpbs_regions:
if str(region.name).split(":")[-1] == "Y":
mid = (region.initial + region.final) / 2
p1 = mid - (args.window_size / 2)
p2 = mid + (args.window_size / 2)
# All reads
signal_bc_f, signal_bc_r = \
genomic_signal.get_bc_signal_by_fragment_length(ref=region.chrom, start=p1, end=p2,
bam=bam, fasta=fasta,
bias_table=table,
forward_shift=args.forward_shift,
reverse_shift=args.reverse_shift,
min_length=None, max_length=None,
strand=True)
# length <= 145
signal_bc_max_145_f, signal_bc_max_145_r = \
genomic_signal.get_bc_signal_by_fragment_length(ref=region.chrom, start=p1, end=p2,
bam=bam, fasta=fasta,
bias_table=table,
forward_shift=args.forward_shift,
reverse_shift=args.reverse_shift,
min_length=None, max_length=145,
strand=True)
# length > 145 and <= 307
signal_bc_146_307_f, signal_bc_146_307_r = \
genomic_signal.get_bc_signal_by_fragment_length(ref=region.chrom, start=p1, end=p2,
bam=bam, fasta=fasta,
bias_table=table,
forward_shift=args.forward_shift,
reverse_shift=args.reverse_shift,
min_length=145, max_length=307,
strand=True)
# length > 307
signal_bc_min_307_f, signal_bc_min_307_r = \
genomic_signal.get_bc_signal_by_fragment_length(ref=region.chrom, start=p1, end=p2,
bam=bam, fasta=fasta,
bias_table=table,
forward_shift=args.forward_shift,
reverse_shift=args.reverse_shift,
min_length=307, max_length=None,
strand=True)
signal_f = np.add(signal_f, np.array(signal_bc_f))
signal_r = np.add(signal_r, np.array(signal_bc_r))
signal_f_max_145 = np.add(signal_f_max_145, np.array(signal_bc_max_145_f))
signal_r_max_145 = np.add(signal_r_max_145, np.array(signal_bc_max_145_r))
signal_f_146_307 = np.add(signal_f_146_307, np.array(signal_bc_146_307_f))
signal_r_146_307 = np.add(signal_r_146_307, np.array(signal_bc_146_307_r))
signal_f_min_307 = np.add(signal_f_min_307, np.array(signal_bc_min_307_f))
signal_r_min_307 = np.add(signal_r_min_307, np.array(signal_bc_min_307_r))
# Output the norm and slope signal
output_fname = os.path.join(args.output_location, "{}.txt".format(args.output_prefix))
f = open(output_fname, "w")
f.write("\t".join((map(str, signal_f))) + "\n")
f.write("\t".join((map(str, signal_r))) + "\n")
f.write("\t".join((map(str, signal_f_max_145))) + "\n")
f.write("\t".join((map(str, signal_r_max_145))) + "\n")
f.write("\t".join((map(str, signal_f_146_307))) + "\n")
f.write("\t".join((map(str, signal_r_146_307))) + "\n")
f.write("\t".join((map(str, signal_f_min_307))) + "\n")
f.write("\t".join((map(str, signal_r_min_307))) + "\n")
f.close()
# find out the linker position
pos_f_1, pos_r_1, pos_f_2, pos_r_2 = get_linkers_position(signal_f_146_307,
signal_r_146_307,
signal_f_min_307,
signal_r_min_307)
p1 = (pos_f_1 - pos_f_2) / 2 + pos_f_2
p2 = p1 + 180
p3 = args.window_size - p2
p4 = args.window_size - p1
fig, (ax1, ax2, ax3, ax4) = plt.subplots(4, 1, figsize=(8, 8))
start = -(args.window_size / 2)
end = (args.window_size / 2) - 1
x = np.linspace(start, end, num=args.window_size)
x_ticks = [start, p1 - 500, p2 - 500, 0, p3 - 500, p4 - 500, end]
update_axes_for_fragment_size_line(ax1, x, x_ticks, start, end, signal_f, signal_r, p1, p2, p3, p4)
update_axes_for_fragment_size_line(ax2, x, x_ticks, start, end, signal_f_max_145, signal_r_max_145, p1, p2,
p3, p4)
update_axes_for_fragment_size_line(ax3, x, x_ticks, start, end, signal_f_146_307, signal_r_146_307, p1, p2,
p3, p4)
update_axes_for_fragment_size_line(ax4, x, x_ticks, start, end, signal_f_min_307, signal_r_min_307, p1, p2,
p3, p4)
figure_name = os.path.join(args.output_location, "{}.pdf".format(args.output_prefix))
fig.subplots_adjust(bottom=.2, hspace=.5)
fig.tight_layout()
fig.savefig(figure_name, format="pdf", dpi=300)
def update_axes_for_fragment_size_line(ax, x, x_ticks, start, end, signal_f, signal_r, p1, p2, p3, p4):
max_signal = max(max(signal_f), max(signal_r))
min_signal = min(min(signal_f), min(signal_r))
ax.plot(x, signal_f, color='red', label='Forward')
ax.plot(x, signal_r, color='green', label='Reverse')
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_position(('outward', 15))
ax.tick_params(direction='out')
ax.set_xticks(x_ticks)
ax.set_xticklabels(map(str, x_ticks))
ax.set_xlim(start, end)
ax.set_yticks([min_signal, max_signal])
ax.set_yticklabels([str(int(min_signal)), str(int(max_signal))], rotation=90)
ax.set_ylim([min_signal, max_signal])
ax.legend().set_visible(False)
f_1, r_1 = sum(signal_f[:p1]) / sum(signal_r), sum(signal_r[:p1]) / sum(signal_r)
f_2, r_2 = sum(signal_f[p1:p2]) / sum(signal_r), sum(signal_r[p1:p2]) / sum(signal_r)
f_3, r_3 = sum(signal_f[p2:500]) / sum(signal_r), sum(signal_r[p2:500]) / sum(signal_r)
f_4, r_4 = sum(signal_f[500:p3]) / sum(signal_r), sum(signal_r[500:p3]) / sum(signal_r)
f_5, r_5 = sum(signal_f[p3:p4]) / sum(signal_r), sum(signal_r[p3:p4]) / sum(signal_r)
f_6, r_6 = sum(signal_f[p4:]) / sum(signal_r), sum(signal_r[p4:]) / sum(signal_r)
text_x_1 = ((p1 - 0) / 2.0 + 0) / 1000
text_x_2 = ((p2 - p1) / 2.0 + p1) / 1000
text_x_3 = ((500 - p2) / 2.0 + p2) / 1000
text_x_4 = ((p3 - 500) / 2.0 + 500) / 1000
text_x_5 = ((p4 - p3) / 2.0 + p3) / 1000
text_x_6 = ((1000 - p4) / 2.0 + p4) / 1000
ax.text(text_x_1, 1.0, str(round(f_1, 2)), verticalalignment='center', color='red',
horizontalalignment='center', transform=ax.transAxes, fontsize=12)
ax.text(text_x_1, 0.9, str(round(r_1, 2)), verticalalignment='center', color='green',
horizontalalignment='center', transform=ax.transAxes, fontsize=12)
ax.text(text_x_2, 1.0, str(round(f_2, 2)), verticalalignment='center', color='red',
horizontalalignment='center', transform=ax.transAxes, fontsize=12)
ax.text(text_x_2, 0.9, str(round(r_2, 2)), verticalalignment='center', color='green',
horizontalalignment='center', transform=ax.transAxes, fontsize=12)
ax.text(text_x_3, 1.0, str(round(f_3, 2)), verticalalignment='center', color='red',
horizontalalignment='center', transform=ax.transAxes, fontsize=12)
ax.text(text_x_3, 0.9, str(round(r_3, 2)), verticalalignment='center', color='green',
horizontalalignment='center', transform=ax.transAxes, fontsize=12)
ax.text(text_x_4, 1.0, str(round(f_4, 2)), verticalalignment='center', color='red',
horizontalalignment='center', transform=ax.transAxes, fontsize=12)
ax.text(text_x_4, 0.9, str(round(r_4, 2)), verticalalignment='center', color='green',
horizontalalignment='center', transform=ax.transAxes, fontsize=12)
ax.text(text_x_5, 1.0, str(round(f_5, 2)), verticalalignment='center', color='red',
horizontalalignment='center', transform=ax.transAxes, fontsize=12)
ax.text(text_x_5, 0.9, str(round(r_5, 2)), verticalalignment='center', color='green',
horizontalalignment='center', transform=ax.transAxes, fontsize=12)
ax.text(text_x_6, 1.0, str(round(f_6, 2)), verticalalignment='center', color='red',
horizontalalignment='center', transform=ax.transAxes, fontsize=12)
ax.text(text_x_6, 0.9, str(round(r_6, 2)), verticalalignment='center', color='green',
horizontalalignment='center', transform=ax.transAxes, fontsize=12)
def get_linkers_position(signal_f_146_307, signal_r_146_307, signal_f_min_307, signal_r_min_307):
smooth_signal_f_146_307 = savgol_filter(signal_f_146_307, window_length=51, polyorder=2)
smooth_signal_r_146_307 = savgol_filter(signal_r_146_307, window_length=51, polyorder=2)
smooth_signal_f_min_307 = savgol_filter(signal_f_min_307, window_length=51, polyorder=2)
smooth_signal_r_min_307 = savgol_filter(signal_r_min_307, window_length=51, polyorder=2)
position_f_1 = np.argmax(smooth_signal_f_146_307[:400])
position_f_2 = np.argmax(smooth_signal_f_min_307[:position_f_1])
position_r_1 = np.argmax(smooth_signal_r_146_307[600:]) + 600
position_r_2 = np.argmax(smooth_signal_r_min_307[position_r_1:]) + position_r_1
return position_f_1, position_r_1, position_f_2, position_r_2
def rescaling(vector):
maxN = max(vector)
minN = min(vector)
return [(e - minN) / (maxN - minN) for e in vector]
class Plot:
def __init__(self, organism, reads_file, motif_file, window_size,
downstream_ext, upstream_ext, forward_shift, reverse_shift,
initial_clip, bias_table, k_nb, output_loc, output_prefix):
self.organism = organism
self.reads_file = reads_file
self.motif_file = motif_file
self.window_size = window_size
self.downstream_ext = downstream_ext
self.upstream_ext = upstream_ext
self.forward_shift = forward_shift
self.reverse_shift = reverse_shift
self.initial_clip = initial_clip
self.bias_table = bias_table
self.k_nb = k_nb
self.output_loc = output_loc
self.output_prefix = output_prefix
def line3(self, bias_table1, bias_table2):
signal = GenomicSignal(self.reads_file)
signal.load_sg_coefs(slope_window_size=9)
bias_table = BiasTable()
bias_table_list = bias_table1.split(",")
table1 = bias_table.load_table(table_file_name_F=bias_table_list[0],
table_file_name_R=bias_table_list[1])
bias_table = BiasTable()
bias_table_list = bias_table2.split(",")
table2 = bias_table.load_table(table_file_name_F=bias_table_list[0],
table_file_name_R=bias_table_list[1])
genome_data = GenomeData(self.organism)
fasta = Fastafile(genome_data.get_genome())
pwm_dict = dict([("A", [0.0] * self.window_size), ("C", [0.0] * self.window_size),
("G", [0.0] * self.window_size), ("T", [0.0] * self.window_size),
("N", [0.0] * self.window_size)])
num_sites = 0
mpbs_regions = GenomicRegionSet("Motif Predicted Binding Sites")
mpbs_regions.read(self.motif_file)
bam = Samfile(self.reads_file, "rb")
mean_signal_raw = np.zeros(self.window_size)
mean_signal_bc1 = np.zeros(self.window_size)
mean_signal_bc2 = np.zeros(self.window_size)
for region in mpbs_regions:
if str(region.name).split(":")[-1] == "Y":
# Extend by window_size
mid = (region.initial + region.final) / 2
p1 = mid - (self.window_size / 2)
p2 = mid + (self.window_size / 2)
signal_raw, signal_bc1 = \
self.get_signal3(ref=region.chrom, start=p1, end=p2, bam=bam, fasta=fasta, bias_table=table1)
signal_raw, signal_bc2 = \
self.get_signal3(ref=region.chrom, start=p1, end=p2, bam=bam, fasta=fasta, bias_table=table2)
num_sites += 1
mean_signal_raw = np.add(mean_signal_raw, np.array(signal_raw))
mean_signal_bc1 = np.add(mean_signal_bc1, np.array(signal_bc1))
mean_signal_bc2 = np.add(mean_signal_bc2, np.array(signal_bc2))
# Update pwm
aux_plus = 1
dna_seq = str(fasta.fetch(region.chrom, p1, p2)).upper()
if (region.final - region.initial) % 2 == 0:
aux_plus = 0
dna_seq_rev = AuxiliaryFunctions.revcomp(
str(fasta.fetch(region.chrom, p1 + aux_plus, p2 + aux_plus)).upper())
if region.orientation == "+":
for i in range(0, len(dna_seq)):
pwm_dict[dna_seq[i]][i] += 1
elif region.orientation == "-":
for i in range(0, len(dna_seq_rev)):
pwm_dict[dna_seq_rev[i]][i] += 1
mean_signal_raw = mean_signal_raw / num_sites
mean_signal_bc1 = mean_signal_bc1 / num_sites
mean_signal_bc2 = mean_signal_bc2 / num_sites
mean_signal_raw = self.rescaling(mean_signal_raw)
mean_signal_bc1 = self.rescaling(mean_signal_bc1)
mean_signal_bc2 = self.rescaling(mean_signal_bc2)
# Output the norm and slope signal
output_fname = os.path.join(self.output_loc, "{}.txt".format(self.output_prefix))
f = open(output_fname, "w")
f.write("\t".join((map(str, mean_signal_raw))) + "\n")
f.write("\t".join((map(str, mean_signal_bc1))) + "\n")
f.write("\t".join((map(str, mean_signal_bc2))) + "\n")
f.close()
# Output PWM and create logo
pwm_fname = os.path.join(self.output_loc, "{}.pwm".format(self.output_prefix))
pwm_file = open(pwm_fname, "w")
for e in ["A", "C", "G", "T"]:
pwm_file.write(" ".join([str(int(f)) for f in pwm_dict[e]]) + "\n")
pwm_file.close()
logo_fname = os.path.join(self.output_loc, "{}.logo.eps".format(self.output_prefix))
pwm = motifs.read(open(pwm_fname), "pfm")
pwm.weblogo(logo_fname, format="eps", stack_width="large", stacks_per_line=str(self.window_size),
color_scheme="color_classic", unit_name="", show_errorbars=False, logo_title="",
show_xaxis=False, xaxis_label="", show_yaxis=False, yaxis_label="",
show_fineprint=False, show_ends=False)
fig, (ax1, ax2) = plt.subplots(2)
start = -(self.window_size / 2)
end = (self.window_size / 2) - 1
x = np.linspace(start, end, num=self.window_size)
############################################################
# bias signal per strand
min_ = min(mean_signal_raw)
max_ = max(mean_signal_raw)
ax1.plot(x, mean_signal_raw, color='red')
ax1.xaxis.set_ticks_position('bottom')
ax1.yaxis.set_ticks_position('left')
ax1.spines['top'].set_visible(False)
ax1.spines['right'].set_visible(False)
ax1.spines['left'].set_position(('outward', 15))
ax1.spines['bottom'].set_position(('outward', 5))
ax1.tick_params(direction='out')
ax1.set_xticks([start, 0, end])
ax1.set_xticklabels([str(start), 0, str(end)])
ax1.set_yticks([min_, max_])
ax1.set_yticklabels([str(round(min_, 2)), str(round(max_, 2))], rotation=90)
ax1.set_title(self.output_prefix, fontweight='bold')
ax1.set_xlim(start, end)
ax1.set_ylim([min_, max_])
ax1.legend(loc="upper right", frameon=False)
ax1.set_ylabel("Raw Signal", rotation=90, fontweight='bold')
####################################################################
#####################################################################
# Bias corrected, non-bias corrected (not strand specific)
min_ = min(min(mean_signal_bc1), min(mean_signal_bc2))
max_ = max(max(mean_signal_bc1), max(mean_signal_bc2))
ax2.plot(x, mean_signal_bc1, color='red', label='VOM_8_NonNaked')
ax2.plot(x, mean_signal_bc2, color='green', label='KMER_6_NonNaked')
ax2.xaxis.set_ticks_position('bottom')
ax2.yaxis.set_ticks_position('left')
ax2.spines['top'].set_visible(False)
ax2.spines['right'].set_visible(False)
ax2.spines['left'].set_position(('outward', 15))
ax2.tick_params(direction='out')
ax2.set_xticks([start, 0, end])
ax2.set_xticklabels([str(start), 0, str(end)])
ax2.set_yticks([min_, max_])
ax2.set_yticklabels([str(round(min_, 2)), str(round(max_, 2))], rotation=90)
ax2.set_xlim(start, end)
ax2.set_ylim([min_, max_])
ax2.legend(loc="lower right", frameon=False)
ax2.spines['bottom'].set_position(('outward', 40))
ax2.set_xlabel("Coordinates from Motif Center", fontweight='bold')
ax2.set_ylabel("Bias Corrected Signal", rotation=90, fontweight='bold')
###################################################################################
###############################################################################
# merge the above figures
figure_name = os.path.join(self.output_loc, "{}.line.eps".format(self.output_prefix))
fig.subplots_adjust(bottom=.2, hspace=.5)
fig.tight_layout()
fig.savefig(figure_name, format="eps", dpi=300)
# Creating canvas and printing eps / pdf with merged results
output_fname = os.path.join(self.output_loc, "{}.eps".format(self.output_prefix))
c = pyx.canvas.canvas()
c.insert(pyx.epsfile.epsfile(0, 0, figure_name, scale=1.0))
c.insert(pyx.epsfile.epsfile(2.0, 1.39, logo_fname, width=13.8, height=1.75))
c.writeEPSfile(output_fname)
os.system("epstopdf " + figure_name)
os.system("epstopdf " + logo_fname)
os.system("epstopdf " + output_fname)
os.remove(pwm_fname)
os.remove(os.path.join(self.output_loc, "{}.line.eps".format(self.output_prefix)))
os.remove(os.path.join(self.output_loc, "{}.logo.eps".format(self.output_prefix)))
os.remove(os.path.join(self.output_loc, "{}.line.pdf".format(self.output_prefix)))
os.remove(os.path.join(self.output_loc, "{}.logo.pdf".format(self.output_prefix)))
os.remove(os.path.join(self.output_loc, "{}.eps".format(self.output_prefix)))
def line4(self):
genome_data = GenomeData(self.organism)
fasta = Fastafile(genome_data.get_genome())
mpbs_regions = GenomicRegionSet("Motif Predicted Binding Sites")
mpbs_regions.read(self.motif_file)
bam = Samfile(self.reads_file, "rb")
pwm_dict = None
signal_raw_f = None
signal_raw_r = None
num_sites = 0
for region in mpbs_regions:
if str(region.name).split(":")[-1] == "Y":
# Extend by 50 bp
mid = (region.initial + region.final) / 2
p1 = mid - (self.window_size / 2)
p2 = mid + (self.window_size / 2)
# p1 = region.initial - (self.window_size / 2)
# p2 = region.final + (self.window_size / 2)
size = p2 - p1
if pwm_dict is None:
pwm_dict = dict([("A", [0.0] * size), ("C", [0.0] * size),
("G", [0.0] * size), ("T", [0.0] * size),
("N", [0.0] * size)])
if signal_raw_f is None:
signal_raw_f = np.zeros(size)
if signal_raw_r is None:
signal_raw_r = np.zeros(size)
# Fetch raw signal
for read in bam.fetch(region.chrom, p1, p2):
if not read.is_reverse:
cut_site = read.pos + self.forward_shift
if p1 <= cut_site < p2:
signal_raw_f[cut_site - p1] += 1.0
else:
cut_site = read.aend + self.reverse_shift - 1
if p1 <= cut_site < p2:
signal_raw_r[cut_site - p1] += 1.0
num_sites += 1
# Update pwm
aux_plus = 1
dna_seq = str(fasta.fetch(region.chrom, p1, p2)).upper()
if (region.final - region.initial) % 2 == 0:
aux_plus = 0
dna_seq_rev = AuxiliaryFunctions.revcomp(str(fasta.fetch(region.chrom,
p1 + aux_plus, p2 + aux_plus)).upper())
if region.orientation == "+":
for i in range(0, len(dna_seq)):
pwm_dict[dna_seq[i]][i] += 1
elif region.orientation == "-":
for i in range(0, len(dna_seq_rev)):
pwm_dict[dna_seq_rev[i]][i] += 1
# mean_signal_raw_f = self.rescaling(signal_raw_f)
# mean_signal_raw_r = self.rescaling(signal_raw_r)
mean_signal_raw_f = signal_raw_f
mean_signal_raw_r = signal_raw_r
# Output the norm and slope signal
output_fname = os.path.join(self.output_loc, "{}.txt".format(self.output_prefix))
f = open(output_fname, "w")
f.write("\t".join((map(str, mean_signal_raw_f))) + "\n")
f.write("\t".join((map(str, mean_signal_raw_r))) + "\n")
f.close()
# Output PWM and create logo
pwm_fname = os.path.join(self.output_loc, "{}.pwm".format(self.output_prefix))
pwm_file = open(pwm_fname, "w")
for e in ["A", "C", "G", "T"]:
pwm_file.write(" ".join([str(int(f)) for f in pwm_dict[e]]) + "\n")
pwm_file.close()
logo_fname = os.path.join(self.output_loc, "{}.logo.eps".format(self.output_prefix))
pwm = motifs.read(open(pwm_fname), "pfm")
pwm.weblogo(logo_fname, format="eps", stack_width="large", stacks_per_line=str(size),
color_scheme="color_classic", unit_name="", show_errorbars=False, logo_title="",
show_xaxis=False, xaxis_label="", show_yaxis=False, yaxis_label="",
show_fineprint=False, show_ends=False)
start = -(size / 2)
end = (size / 2) - 1
x = np.linspace(start, end, num=size)
fig = plt.figure(figsize=(8, 4))
ax2 = fig.add_subplot(111)
min_signal = min(min(mean_signal_raw_f), min(mean_signal_raw_r))
max_signal = max(max(mean_signal_raw_f), max(mean_signal_raw_r))
ax2.plot(x, mean_signal_raw_f, color='red', label='Forward')
ax2.plot(x, mean_signal_raw_r, color='green', label='Reverse')
ax2.xaxis.set_ticks_position('bottom')
ax2.yaxis.set_ticks_position('left')
ax2.spines['top'].set_visible(False)
ax2.spines['right'].set_visible(False)
ax2.spines['left'].set_position(('outward', 15))
ax2.tick_params(direction='out')
ax2.set_title(self.output_prefix, fontweight='bold')
ax2.set_xticks([start, 0, end])
ax2.set_xticklabels([str(start), 0, str(end)])
ax2.set_yticks([min_signal, max_signal])
ax2.set_yticklabels([str(round(min_signal, 2)), str(round(max_signal, 2))], rotation=90)
ax2.set_xlim(start, end)
ax2.set_ylim([min_signal, max_signal])
ax2.legend(loc="upper right", frameon=False)
ax2.spines['bottom'].set_position(('outward', 40))
# ax2.set_xlabel("Coordinates from Motif Center", fontweight='bold')
# ax2.set_ylabel("Average Signal", rotation=90, fontweight='bold')
figure_name = os.path.join(self.output_loc, "{}.line.eps".format(self.output_prefix))
fig.subplots_adjust(bottom=.2, hspace=.5)
fig.tight_layout()
fig.savefig(figure_name, format="eps", dpi=300)
# Creating canvas and printing eps / pdf with merged results
output_fname = os.path.join(self.output_loc, "{}.eps".format(self.output_prefix))
c = pyx.canvas.canvas()
c.insert(pyx.epsfile.epsfile(0, 0, figure_name, scale=1.0))
c.insert(pyx.epsfile.epsfile(1.48, 0.92, logo_fname, width=18.5, height=1.75))
c.writeEPSfile(output_fname)
os.system("epstopdf " + figure_name)
os.system("epstopdf " + logo_fname)
os.system("epstopdf " + output_fname)
os.remove(pwm_fname)
os.remove(os.path.join(self.output_loc, "{}.line.eps".format(self.output_prefix)))
os.remove(os.path.join(self.output_loc, "{}.logo.eps".format(self.output_prefix)))
os.remove(os.path.join(self.output_loc, "{}.line.pdf".format(self.output_prefix)))
os.remove(os.path.join(self.output_loc, "{}.logo.pdf".format(self.output_prefix)))
os.remove(os.path.join(self.output_loc, "{}.eps".format(self.output_prefix)))
def atac_dnase_bc_line(self, reads_file1, reads_file2, bias_table1, bias_table2):
genome_data = GenomeData(self.organism)
fasta = Fastafile(genome_data.get_genome())
pwm_dict = dict([("A", [0.0] * self.window_size), ("C", [0.0] * self.window_size),
("G", [0.0] * self.window_size), ("T", [0.0] * self.window_size),
("N", [0.0] * self.window_size)])
bias_table = BiasTable()
bias_table_list = bias_table1.split(",")
table1 = bias_table.load_table(table_file_name_F=bias_table_list[0],
table_file_name_R=bias_table_list[1])
bias_table_list = bias_table2.split(",")
table2 = bias_table.load_table(table_file_name_F=bias_table_list[0],
table_file_name_R=bias_table_list[1])
mpbs_regions = GenomicRegionSet("Motif Predicted Binding Sites")
mpbs_regions.read(self.motif_file)
bam_atac = Samfile(reads_file1, "rb")
bam_dnase = Samfile(reads_file2, "rb")
mean_signal_atac = np.zeros(self.window_size)
mean_signal_dnase = np.zeros(self.window_size)
num_sites = 0
for region in mpbs_regions:
if str(region.name).split(":")[-1] == "Y":
# Extend by 50 bp
mid = (region.initial + region.final) / 2
p1 = mid - (self.window_size / 2)
p2 = mid + (self.window_size / 2)
# Fetch raw signal
signal_atac = self.get_bc_signal(ref=region.chrom, start=p1, end=p2, bam=bam_atac,
fasta=fasta, bias_table=table1,
forward_shift=5, reverse_shift=-4)
signal_dnase = self.get_bc_signal(ref=region.chrom, start=p1, end=p2, bam=bam_dnase,
fasta=fasta, bias_table=table2,
forward_shift=0, reverse_shift=0)
num_sites += 1
mean_signal_atac = np.add(mean_signal_atac, np.array(signal_atac))
mean_signal_dnase = np.add(mean_signal_dnase, np.array(signal_dnase))
# Update pwm
aux_plus = 1
dna_seq = str(fasta.fetch(region.chrom, p1, p2)).upper()
if (region.final - region.initial) % 2 == 0:
aux_plus = 0
dna_seq_rev = AuxiliaryFunctions.revcomp(str(fasta.fetch(region.chrom,
p1 + aux_plus, p2 + aux_plus)).upper())
if region.orientation == "+":
for i in range(0, len(dna_seq)):
pwm_dict[dna_seq[i]][i] += 1
elif region.orientation == "-":
for i in range(0, len(dna_seq_rev)):
pwm_dict[dna_seq_rev[i]][i] += 1
mean_signal_atac = self.rescaling(mean_signal_atac)
mean_signal_dnase = self.rescaling(mean_signal_dnase)
# Output the norm and slope signal
output_fname = os.path.join(self.output_loc, "{}.txt".format(self.output_prefix))
f = open(output_fname, "w")
f.write("\t".join((map(str, mean_signal_atac))) + "\n")
f.write("\t".join((map(str, mean_signal_dnase))) + "\n")
f.close()
# Output PWM and create logo
pwm_fname = os.path.join(self.output_loc, "{}.pwm".format(self.output_prefix))
pwm_file = open(pwm_fname, "w")
for e in ["A", "C", "G", "T"]:
pwm_file.write(" ".join([str(int(f)) for f in pwm_dict[e]]) + "\n")
pwm_file.close()
logo_fname = os.path.join(self.output_loc, "{}.logo.eps".format(self.output_prefix))
pwm = motifs.read(open(pwm_fname), "pfm")
pwm.weblogo(logo_fname, format="eps", stack_width="large", stacks_per_line=str(self.window_size),
color_scheme="color_classic", unit_name="", show_errorbars=False, logo_title="",
show_xaxis=False, xaxis_label="", show_yaxis=False, yaxis_label="",
show_fineprint=False, show_ends=False)
start = -(self.window_size / 2)
end = (self.window_size / 2) - 1
x = np.linspace(start, end, num=self.window_size)
fig = plt.figure(figsize=(8, 4))
ax2 = fig.add_subplot(111)
min_signal = min(min(mean_signal_atac), min(mean_signal_dnase))
max_signal = max(max(mean_signal_atac), max(mean_signal_dnase))
ax2.plot(x, mean_signal_atac, color='red', label='ATAC-seq')
ax2.plot(x, mean_signal_dnase, color='green', label='DNase-seq')
ax2.xaxis.set_ticks_position('bottom')
ax2.yaxis.set_ticks_position('left')
ax2.spines['top'].set_visible(False)
ax2.spines['right'].set_visible(False)
ax2.spines['left'].set_position(('outward', 15))
ax2.tick_params(direction='out')
ax2.set_title(self.output_prefix, fontweight='bold')
ax2.set_xticks([start, 0, end])
ax2.set_xticklabels([str(start), 0, str(end)])
ax2.set_yticks([min_signal, max_signal])
ax2.set_yticklabels([str(round(min_signal, 2)), str(round(max_signal, 2))], rotation=90)
ax2.set_xlim(start, end)
ax2.set_ylim([min_signal, max_signal])
ax2.legend(loc="upper right", frameon=False)
# ax2.legend(loc="lower right", frameon=False)
ax2.spines['bottom'].set_position(('outward', 40))
figure_name = os.path.join(self.output_loc, "{}.line.eps".format(self.output_prefix))
fig.subplots_adjust(bottom=.2, hspace=.5)
fig.tight_layout()
fig.savefig(figure_name, format="eps", dpi=300)
# Creating canvas and printing eps / pdf with merged results
output_fname = os.path.join(self.output_loc, "{}.eps".format(self.output_prefix))
c = pyx.canvas.canvas()
c.insert(pyx.epsfile.epsfile(0, 0, figure_name, scale=1.0))
c.insert(pyx.epsfile.epsfile(1.51, 0.89, logo_fname, width=18.3, height=1.75))
c.writeEPSfile(output_fname)
os.system("epstopdf " + figure_name)
os.system("epstopdf " + logo_fname)
os.system("epstopdf " + output_fname)
os.remove(pwm_fname)
os.remove(os.path.join(self.output_loc, "{}.line.eps".format(self.output_prefix)))
os.remove(os.path.join(self.output_loc, "{}.logo.eps".format(self.output_prefix)))
os.remove(os.path.join(self.output_loc, "{}.line.pdf".format(self.output_prefix)))
os.remove(os.path.join(self.output_loc, "{}.logo.pdf".format(self.output_prefix)))
os.remove(os.path.join(self.output_loc, "{}.eps".format(self.output_prefix)))
def get_bc_signal(self, ref, start, end, bam, fasta, bias_table, forward_shift, reverse_shift):
# Parameters
window = 50
defaultKmerValue = 1.0
# Initialization
fBiasDict = bias_table[0]
rBiasDict = bias_table[1]
k_nb = len(fBiasDict.keys()[0])
p1 = start
p2 = end
p1_w = p1 - (window / 2)
p2_w = p2 + (window / 2)
p1_wk = p1_w - int(k_nb / 2.)
p2_wk = p2_w + int(k_nb / 2.)
currStr = str(fasta.fetch(ref, p1_wk, p2_wk - 1)).upper()
currRevComp = AuxiliaryFunctions.revcomp(str(fasta.fetch(ref, p1_wk + 1, p2_wk)).upper())
# Iterating on sequence to create the bias signal
signal_bias_f = []
signal_bias_r = []
for i in range(int(k_nb / 2.), len(currStr) - int(k_nb / 2) + 1):
fseq = currStr[i - int(k_nb / 2.):i + int(k_nb / 2.)]
rseq = currRevComp[len(currStr) - int(k_nb / 2.) - i:len(currStr) + int(k_nb / 2.) - i]
try:
signal_bias_f.append(fBiasDict[fseq])
except Exception:
signal_bias_f.append(defaultKmerValue)
try:
signal_bias_r.append(rBiasDict[rseq])
except Exception:
signal_bias_r.append(defaultKmerValue)
# Raw counts
signal_raw_f = [0.0] * (p2_w - p1_w)
signal_raw_r = [0.0] * (p2_w - p1_w)
for read in bam.fetch(ref, p1_w, p2_w):
if not read.is_reverse:
cut_site = read.pos + self.forward_shift
if p1_w <= cut_site < p2_w:
signal_raw_f[cut_site - p1_w] += 1.0
else:
cut_site = read.aend + self.reverse_shift - 1
if p1_w <= cut_site < p2_w:
signal_raw_r[cut_site - p1_w] += 1.0
# Smoothed counts
Nf = []
Nr = []
fSum = sum(signal_raw_f[:window])
rSum = sum(signal_raw_r[:window])
fLast = signal_raw_f[0]
rLast = signal_raw_r[0]
for i in range((window / 2), len(signal_raw_f) - (window / 2)):
Nf.append(fSum)
Nr.append(rSum)
fSum -= fLast
fSum += signal_raw_f[i + (window / 2)]
fLast = signal_raw_f[i - (window / 2) + 1]
rSum -= rLast
rSum += signal_raw_r[i + (window / 2)]
rLast = signal_raw_r[i - (window / 2) + 1]
# Calculating bias and writing to wig file
fSum = sum(signal_bias_f[:window])
rSum = sum(signal_bias_r[:window])
fLast = signal_bias_f[0]
rLast = signal_bias_r[0]
signal_bc = []
for i in range((window / 2), len(signal_bias_f) - (window / 2)):
nhatf = Nf[i - (window / 2)] * (signal_bias_f[i] / fSum)
nhatr = Nr[i - (window / 2)] * (signal_bias_r[i] / rSum)
signal_bc.append(nhatf + nhatr)
fSum -= fLast
fSum += signal_bias_f[i + (window / 2)]
fLast = signal_bias_f[i - (window / 2) + 1]
rSum -= rLast
rSum += signal_bias_r[i + (window / 2)]
rLast = signal_bias_r[i - (window / 2) + 1]
return signal_bc
def atac_dnase_raw_line(self, reads_file1, reads_file2):
genome_data = GenomeData(self.organism)
fasta = Fastafile(genome_data.get_genome())
pwm_dict = dict([("A", [0.0] * self.window_size), ("C", [0.0] * self.window_size),
("G", [0.0] * self.window_size), ("T", [0.0] * self.window_size),
("N", [0.0] * self.window_size)])
mpbs_regions = GenomicRegionSet("Motif Predicted Binding Sites")
mpbs_regions.read(self.motif_file)
bam_atac = Samfile(reads_file1, "rb")
bam_dnase = Samfile(reads_file2, "rb")
mean_signal_atac = np.zeros(self.window_size)
mean_signal_dnase = np.zeros(self.window_size)
num_sites = 0
for region in mpbs_regions:
if str(region.name).split(":")[-1] == "Y":
# Extend by 50 bp
mid = (region.initial + region.final) / 2
p1 = mid - (self.window_size / 2)
p2 = mid + (self.window_size / 2)
# Fetch raw signal
for read in bam_atac.fetch(region.chrom, p1, p2):
if not read.is_reverse:
cut_site = read.pos + self.forward_shift
if p1 <= cut_site < p2:
mean_signal_atac[cut_site - p1] += 1.0
else:
cut_site = read.aend + self.reverse_shift - 1
if p1 <= cut_site < p2:
mean_signal_atac[cut_site - p1] += 1.0
# Fetch raw signal
for read in bam_dnase.fetch(region.chrom, p1, p2):
if not read.is_reverse:
cut_site = read.pos
if p1 <= cut_site < p2:
mean_signal_dnase[cut_site - p1] += 1.0
else:
cut_site = read.aend - 1
if p1 <= cut_site < p2:
mean_signal_dnase[cut_site - p1] += 1.0
num_sites += 1
# Update pwm
aux_plus = 1
dna_seq = str(fasta.fetch(region.chrom, p1, p2)).upper()
if (region.final - region.initial) % 2 == 0:
aux_plus = 0
dna_seq_rev = AuxiliaryFunctions.revcomp(str(fasta.fetch(region.chrom,
p1 + aux_plus, p2 + aux_plus)).upper())
if region.orientation == "+":
for i in range(0, len(dna_seq)):
pwm_dict[dna_seq[i]][i] += 1
elif region.orientation == "-":
for i in range(0, len(dna_seq_rev)):
pwm_dict[dna_seq_rev[i]][i] += 1
mean_signal_atac = self.rescaling(mean_signal_atac)
mean_signal_dnase = self.rescaling(mean_signal_dnase)
# Output the norm and slope signal
output_fname = os.path.join(self.output_loc, "{}.txt".format(self.output_prefix))
f = open(output_fname, "w")
f.write("\t".join((map(str, mean_signal_atac))) + "\n")
f.write("\t".join((map(str, mean_signal_dnase))) + "\n")
f.close()
# Output PWM and create logo
pwm_fname = os.path.join(self.output_loc, "{}.pwm".format(self.output_prefix))
pwm_file = open(pwm_fname, "w")
for e in ["A", "C", "G", "T"]:
pwm_file.write(" ".join([str(int(f)) for f in pwm_dict[e]]) + "\n")
pwm_file.close()
logo_fname = os.path.join(self.output_loc, "{}.logo.eps".format(self.output_prefix))
pwm = motifs.read(open(pwm_fname), "pfm")
pwm.weblogo(logo_fname, format="eps", stack_width="large", stacks_per_line=str(self.window_size),
color_scheme="color_classic", unit_name="", show_errorbars=False, logo_title="",
show_xaxis=False, xaxis_label="", show_yaxis=False, yaxis_label="",
show_fineprint=False, show_ends=False)
start = -(self.window_size / 2)
end = (self.window_size / 2) - 1
x = np.linspace(start, end, num=self.window_size)
fig = plt.figure(figsize=(8, 4))
ax2 = fig.add_subplot(111)
min_signal = min(min(mean_signal_atac), min(mean_signal_dnase))
max_signal = max(max(mean_signal_atac), max(mean_signal_dnase))
ax2.plot(x, mean_signal_atac, color='red', label='ATAC-seq')
ax2.plot(x, mean_signal_dnase, color='green', label='DNase-seq')
ax2.xaxis.set_ticks_position('bottom')
ax2.yaxis.set_ticks_position('left')
ax2.spines['top'].set_visible(False)
ax2.spines['right'].set_visible(False)
ax2.spines['left'].set_position(('outward', 15))
ax2.tick_params(direction='out')
ax2.set_title(self.output_prefix, fontweight='bold')
ax2.set_xticks([start, 0, end])
ax2.set_xticklabels([str(start), 0, str(end)])
ax2.set_yticks([min_signal, max_signal])
ax2.set_yticklabels([str(round(min_signal, 2)), str(round(max_signal, 2))], rotation=90)
ax2.set_xlim(start, end)
ax2.set_ylim([min_signal, max_signal])
ax2.legend(loc="upper right", frameon=False)
ax2.spines['bottom'].set_position(('outward', 40))
figure_name = os.path.join(self.output_loc, "{}.line.eps".format(self.output_prefix))
fig.subplots_adjust(bottom=.2, hspace=.5)
fig.tight_layout()
fig.savefig(figure_name, format="eps", dpi=300)
# Creating canvas and printing eps / pdf with merged results
output_fname = os.path.join(self.output_loc, "{}.eps".format(self.output_prefix))
c = pyx.canvas.canvas()
c.insert(pyx.epsfile.epsfile(0, 0, figure_name, scale=1.0))
c.insert(pyx.epsfile.epsfile(1.51, 0.89, logo_fname, width=18.3, height=1.75))
c.writeEPSfile(output_fname)
os.system("epstopdf " + figure_name)
os.system("epstopdf " + logo_fname)
os.system("epstopdf " + output_fname)
os.remove(pwm_fname)
os.remove(os.path.join(self.output_loc, "{}.line.eps".format(self.output_prefix)))
os.remove(os.path.join(self.output_loc, "{}.logo.eps".format(self.output_prefix)))
os.remove(os.path.join(self.output_loc, "{}.line.pdf".format(self.output_prefix)))
os.remove(os.path.join(self.output_loc, "{}.logo.pdf".format(self.output_prefix)))
os.remove(os.path.join(self.output_loc, "{}.eps".format(self.output_prefix)))
def fragment_size_raw_line(self):
mpbs_regions = GenomicRegionSet("Motif Predicted Binding Sites")
mpbs_regions.read(self.motif_file)
bam = Samfile(self.reads_file, "rb")
signal_f_max_145 = np.zeros(self.window_size)
signal_r_max_145 = np.zeros(self.window_size)
signal_f_146_307 = np.zeros(self.window_size)
signal_r_146_307 = np.zeros(self.window_size)
signal_f_min_307 = np.zeros(self.window_size)
signal_r_min_307 = np.zeros(self.window_size)
signal_f = np.zeros(self.window_size)
signal_r = np.zeros(self.window_size)
for region in mpbs_regions:
if str(region.name).split(":")[-1] == "Y":
# Extend by 50 bp
mid = (region.initial + region.final) / 2
p1 = mid - (self.window_size / 2)
p2 = mid + (self.window_size / 2)
# Fetch raw signal
for read in bam.fetch(region.chrom, p1, p2):
# All reads
if not read.is_reverse:
cut_site = read.pos + self.forward_shift
if p1 <= cut_site < p2:
signal_f[cut_site - p1] += 1.0
else:
cut_site = read.aend + self.reverse_shift - 1
if p1 <= cut_site < p2:
signal_r[cut_site - p1] += 1.0
# length <= 145
if abs(read.template_length) <= 145:
if not read.is_reverse:
cut_site = read.pos + self.forward_shift
if p1 <= cut_site < p2:
signal_f_max_145[cut_site - p1] += 1.0
else:
cut_site = read.aend + self.reverse_shift - 1
if p1 <= cut_site < p2:
signal_r_max_145[cut_site - p1] += 1.0
# length > 145 and <= 307
if 145 < abs(read.template_length) <= 307:
if not read.is_reverse:
cut_site = read.pos + self.forward_shift
if p1 <= cut_site < p2:
signal_f_146_307[cut_site - p1] += 1.0
else:
cut_site = read.aend + self.reverse_shift - 1
if p1 <= cut_site < p2:
signal_r_146_307[cut_site - p1] += 1.0
# length > 307
if abs(read.template_length) > 307:
if not read.is_reverse:
cut_site = read.pos + self.forward_shift
if p1 <= cut_site < p2:
signal_f_min_307[cut_site - p1] += 1.0
else:
cut_site = read.aend + self.reverse_shift - 1
if p1 <= cut_site < p2:
signal_r_min_307[cut_site - p1] += 1.0
# Output the norm and slope signal
output_fname = os.path.join(self.output_loc, "{}.txt".format(self.output_prefix))
f = open(output_fname, "w")
f.write("\t".join((map(str, signal_f))) + "\n")
f.write("\t".join((map(str, signal_r))) + "\n")
f.write("\t".join((map(str, signal_f_max_145))) + "\n")
f.write("\t".join((map(str, signal_r_max_145))) + "\n")
f.write("\t".join((map(str, signal_f_146_307))) + "\n")
f.write("\t".join((map(str, signal_r_146_307))) + "\n")
f.write("\t".join((map(str, signal_f_min_307))) + "\n")
f.write("\t".join((map(str, signal_r_min_307))) + "\n")
f.close()
# find out the linker position
pos_f_1, pos_r_1, pos_f_2, pos_r_2 = self.get_linkers_position(signal_f_146_307,
signal_r_146_307,
signal_f_min_307,
signal_r_min_307)
p1 = (pos_f_1 - pos_f_2) / 2 + pos_f_2
p2 = p1 + 180
p3 = self.window_size - p2
p4 = self.window_size - p1
fig, (ax1, ax2, ax3, ax4) = plt.subplots(4, 1, figsize=(8, 8))
start = -(self.window_size / 2)
end = (self.window_size / 2) - 1
x = np.linspace(start, end, num=self.window_size)
x_ticks = [start, p1 - 500, p2 - 500, 0, p3 - 500, p4 - 500, end]
self.update_axes_for_fragment_size_line(ax1, x, x_ticks, start, end, signal_f, signal_r, p1, p2,
p3, p4)
self.update_axes_for_fragment_size_line(ax2, x, x_ticks, start, end, signal_f_max_145, signal_r_max_145, p1, p2,
p3, p4)
self.update_axes_for_fragment_size_line(ax3, x, x_ticks, start, end, signal_f_146_307, signal_r_146_307, p1, p2,
p3, p4)
self.update_axes_for_fragment_size_line(ax4, x, x_ticks, start, end, signal_f_min_307, signal_r_min_307, p1, p2,
p3, p4)
figure_name = os.path.join(self.output_loc, "{}.pdf".format(self.output_prefix))
fig.subplots_adjust(bottom=.2, hspace=.5)
fig.tight_layout()
fig.savefig(figure_name, format="pdf", dpi=300)
def fragment_size_bc_line(self, bias_table_files):
mpbs_regions = GenomicRegionSet("Motif Predicted Binding Sites")
mpbs_regions.read(self.motif_file)
genomic_signal = GenomicSignal(self.reads_file)
genomic_signal.load_sg_coefs(11)
bam = Samfile(self.reads_file, "rb")
genome_data = GenomeData(self.organism)
fasta = Fastafile(genome_data.get_genome())
bias_table = BiasTable()
bias_table_list = bias_table_files.split(",")
table = bias_table.load_table(table_file_name_F=bias_table_list[0],
table_file_name_R=bias_table_list[1])
signal_f_max_145 = np.zeros(self.window_size)
signal_r_max_145 = np.zeros(self.window_size)
signal_f_146_307 = np.zeros(self.window_size)
signal_r_146_307 = np.zeros(self.window_size)
signal_f_min_307 = np.zeros(self.window_size)
signal_r_min_307 = np.zeros(self.window_size)
signal_f = np.zeros(self.window_size)
signal_r = np.zeros(self.window_size)
for region in mpbs_regions:
if str(region.name).split(":")[-1] == "Y":
mid = (region.initial + region.final) / 2
p1 = mid - (self.window_size / 2)
p2 = mid + (self.window_size / 2)
# All reads
signal_bc_f, signal_bc_r = \
genomic_signal.get_bc_signal_by_fragment_length(ref=region.chrom, start=p1, end=p2,
bam=bam, fasta=fasta,
bias_table=table,
forward_shift=self.forward_shift,
reverse_shift=self.reverse_shift,
min_length=None, max_length=None,
strand=True)
# length <= 145
signal_bc_max_145_f, signal_bc_max_145_r = \
genomic_signal.get_bc_signal_by_fragment_length(ref=region.chrom, start=p1, end=p2,
bam=bam, fasta=fasta,
bias_table=table,
forward_shift=self.forward_shift,
reverse_shift=self.reverse_shift,
min_length=None, max_length=145,
strand=True)
# length > 145 and <= 307
signal_bc_146_307_f, signal_bc_146_307_r = \
genomic_signal.get_bc_signal_by_fragment_length(ref=region.chrom, start=p1, end=p2,
bam=bam, fasta=fasta,
bias_table=table,
forward_shift=self.forward_shift,
reverse_shift=self.reverse_shift,
min_length=145, max_length=307,
strand=True)
# length > 307
signal_bc_min_307_f, signal_bc_min_307_r = \
genomic_signal.get_bc_signal_by_fragment_length(ref=region.chrom, start=p1, end=p2,
bam=bam, fasta=fasta,
bias_table=table,
forward_shift=self.forward_shift,
reverse_shift=self.reverse_shift,
min_length=307, max_length=None,
strand=True)
signal_f = np.add(signal_f,
|
np.array(signal_bc_f)
|
numpy.array
|
# -*- coding: utf-8 -*-
"""
weighted2D.py
Contains functions to perform weighted least squares linear fit when there
is error in both the x and y directions
Reference: B. Reed "Linear least-squares fits with errors in both
coordinates. II: Comments on parameter variances", Am. J. Phys, 60, 1992
"""
def mfunc(m, x_in, y_in):
# MFUNC - function to be minimized in order to find best slope
import numpy as np
# Separate x and y from their weights
x = x_in[:,0]
y = y_in[:,0]
Wxi = x_in[:,1]
Wyi = y_in[:,1]
# Calculate weight for each data point
Wi = Wxi*Wyi/(m**2*Wyi+Wxi) # Eq 8
# Weighted means and deviations from weighted means
xbar = np.sum(Wi*x)/np.sum(Wi) # Eq 11
ybar = np.sum(Wi*y)/np.sum(Wi) # Eq 12
U = x-xbar # Eq 9
V = y-ybar # Eq 10
# Minimization function (eq 19 from paper)
g = (m**2*np.sum((Wi**2)*U*V/Wxi) + m*np.sum((Wi**2)*((U**2)/Wyi -
(V**2)/Wxi)) - np.sum((Wi**2)*U*V/Wyi))
g = g**2
return g
def wls2d(x, y, delx, dely):
"""
WLS2D Calculates the weighted least squares fit to a straight line when
there are errors in both the x and y directions.
Reference: B. Reed "Linear least-squares fits with errors in both
coordinates. II: Comments on parameter variances", Am. J. Phys, 60, 1992
fitparams = wls2d(x, y, delx, dely, flag);
INPUTS
x vector of independent data points
y vector of dependent data points
delx vector of uncertainties/errors in x points
dely vector of uncertainties/errors in y points
OUTPUT
fitparams vector of fit parameters
fitparams[0] best fit slope
fitparams[1] best fit y intercept
fitparams[2] uncertainty in slope
fitparams[3] uncertainty in y-intercept
Note: equation numbers from <NAME>'s paper
"""
import numpy as np
from numpy.matlib import repmat
from scipy.optimize import fmin
N = len(x)
# Calculate weights and weighted means
Wxi = 1/(delx**2)
Wyi = 1/(dely**2)
# Force vectors to be column vectors
x.shape = (N,1)
y.shape = (N,1)
Wxi.shape = (N,1)
Wyi.shape = (N,1)
# Add weights as second columns to x and y
xWxi =
|
np.append(x, Wxi, axis=1)
|
numpy.append
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.