prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
import numpy as np
import math
import shutil
import os
import glob
import random
import os, sys, stat
from os import *
import extract_objects_places_features
import random
import pandas as pd
from save_inDir import *
import argparse
from shutil import copyfile
sys.path.insert(0, '../classifiers/')
from people_reID.test_extraction import extract_reID_features
#extracts and saves the features from posterior analysis
def extracting_features_set(aug,dataset, gpu_ids, finetuned=False):
filename = '../dataset/'
name_final_folder = dataset+aug
classe = ['positive','negative']
finetuned_weights = '../out_files/checkpoints/finetuned'
if not os.path.isdir(finetuned_weights):
os.mkdir(finetuned_weights)
dir_features = '../out_files/features'
pd.set_option('display.max_colwidth', -1)
finetuned_weights = os.path.join(finetuned_weights, name_final_folder)
if not os.path.isdir(finetuned_weights):
os.mkdir(finetuned_weights)
feature_extractor_places = extract_objects_places_features.loadNetworkPlaces(1,finetuned_weights+'best_weights_places.h5',finetuned)
feature_extractor_imagenet = extract_objects_places_features.loadNetworkImagenet(1,finetuned_weights+'best_weights_imagenet.h5',finetuned)
#------------------
#----------------
#extract features objects, places and people -- train/val/test
for n in ['train','val','test']:
out_features = os.path.join(dir_features, name_final_folder)
if not os.path.isdir(out_features):
os.mkdir(out_features)
if n == 'test': # test do not have augmentation
name = os.path.join(filename, dataset,n)
else:
name = os.path.join(filename, dataset,n+aug)
df_paths = pd.read_csv(name+'.csv', header=0)
labels = np.array(df_paths.label)
negative_labels = np.ones(len(labels)) - labels
labels = list(map(bool,labels))
negative_labels = list(map(bool,negative_labels))
#----------------
#extract features objects and places
generatorPlaces= extract_objects_places_features.generateData_fromFile(224,df_paths)
names = generatorPlaces.filenames
generatorImagenet = extract_objects_places_features.generateData_fromFile(299,df_paths)
features_imagenet, features_places = extract_objects_places_features.getFeatures(generatorImagenet,generatorPlaces,feature_extractor_imagenet,feature_extractor_places)
np.save(out_features+'/positive_'+n+'_names'+aug+'.npy', np.array(names)[labels])
np.save(out_features+'/negative_'+n+'_names'+aug+'.npy', np.array(names)[negative_labels])
np.save(out_features+'/positive_'+n+'_imagenet'+aug+'.npy', np.array(features_imagenet)[labels])
np.save(out_features+'/negative_'+n+'_imagenet'+aug+'.npy', np.array(features_imagenet)[negative_labels])
np.save(out_features+'/positive_'+n+'_places'+aug+'.npy', np.array(features_places)[labels])
np.save(out_features+'/negative_'+n+'_places'+aug+'.npy', np.array(features_places)[negative_labels])
#----------------
#extract features from PCB
features_people, names = extract_reID_features('../classifiers/people_reID/opts.yaml','../classifiers/people_reID/model/net_last.pth', n, dataset,df_paths,'path','label', out_features, gpu_ids)
np.save(out_features+'/positive_'+n+'_names_people'+aug+'.npy', np.array(names)[labels])
np.save(out_features+'/negative_'+n+'_names_people'+aug+'.npy', | np.array(names) | numpy.array |
import numpy as np
import scipy as sp
from scipy import sparse
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from scipy import signal
from random import gauss
#import hdf5storage
import h5py
import timeit
import time
#from sympy.solvers.solveset import nonlinsolve
#from sympy.core.symbol import symbols
#from sympy import exp
from scipy import stats
import os
from .analysis import *
opj = os.path.join
#written for python 3.6
####################################################################################################
####################################################################################################
####GRAPH SIMULATIONS
####################################################################################################
####################################################################################################
####################################################################################################
#testing any time-evolution propagator defined by a kernel on the graph
####################################################################################################
def graph_propagator_test(u_0, Time, Delta_t, kernel_param, Graph_Kernel, a=1, b=1, c=1, sigma_noise=0,
one_dim=False, syn=0, gridsize=1000, h=0.01, GF_domain=False, eigvals=None, eigvecs=None,
Visual=False, SaveActivity=False, Filepath=' ', NSim=0):
if one_dim==True:
s,U = one_dim_Laplacian_eigenvalues(gridsize, h, syn, vecs=True)
else:
s=eigvals
U=eigvecs
#note that s is a vector of eigenvalues, not the diagonal matrix of eigenvalues
#s_matrix=sp.sparse.diags(s).toarray()
if Graph_Kernel!='Damped Wave':
kernel_gf = GraphKernel(s,kernel_param, type=Graph_Kernel)
if GF_domain == False:
kernel_matrix=sp.sparse.diags(kernel_gf).toarray()
Laplacian_based_propagator = np.dot(U, np.dot(kernel_matrix, U.T))
else:
kernel_gf, kernel_gf_prime=GraphKernel(s,kernel_param, type=Graph_Kernel, a=a, b=b, c=c, prime=True)
if GF_domain == False:
Laplacian_based_propagator = np.dot(U, np.dot(sp.sparse.diags(kernel_gf).toarray(), U.T))
Laplacian_based_propagator_prime = np.dot(U, np.dot(sp.sparse.diags(kernel_gf_prime).toarray(), U.T))
Timesteps = int(round(Time/Delta_t))
u_Delta_t = np.zeros_like(u_0)
u_prime = np.zeros_like(u_0)
if SaveActivity==True or GF_domain == True:
u_total = np.zeros((len(u_0),Timesteps))
if Visual==True and GF_domain == False:
plt.ion()
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlim(0, len(u_0))
#ax.set_ylim(0, 11)
#line2, = ax.plot(np.arange(len(I_0)), I_0, 'b-')
#line1, = ax.plot(np.arange(len(E_0)), E_0, 'r-')
ax.plot(u_0, 'b-')
fig.canvas.draw()
for i in range(Timesteps):
if sigma_noise!=0:
Noise = sigma_noise * np.array([gauss(0.0, 1.0) for k in range(len(u_0))])
else:
Noise = 0
if SaveActivity==True or GF_domain == True:
u_total[:,i]=np.copy(u_0)
if i%10 == 0:
print(i)
#impulse response
# if i==500:
# E_Delta_t[600:650]=0.9*np.ones(50)
# I_Delta_t[580:620]=0.9*np.ones(40)
if Graph_Kernel!='Damped Wave':
if GF_domain == False:
u_Delta_t = np.dot(Laplacian_based_propagator,u_0)+np.sqrt(Delta_t)*Noise
else:
u_Delta_t = kernel_gf * u_0 + np.sqrt(Delta_t)*Noise
else:
if GF_domain == False:
u_Delta_t = np.dot(Laplacian_based_propagator,u_0)+np.dot(Laplacian_based_propagator_prime,u_prime)+np.sqrt(Delta_t)*Noise
u_prime=(u_Delta_t-u_0)/kernel_param
else:
u_Delta_t = kernel_gf * u_0 + kernel_gf_prime * u_prime + np.sqrt(Delta_t)*Noise
u_prime=(u_Delta_t-u_0)/kernel_param
if Visual==True and i%5 == 0 and GF_domain == False:
time.sleep(0.03)
ax.clear()
ax.set_xlim(0, len(u_0))
#ax.set_ylim(-0.05,0.1)
#line2.set_ydata(I_Delta_t)
#line1.set_ydata(E_Delta_t)
ax.plot(u_Delta_t, 'b-')
fig.canvas.draw()
fig.canvas.flush_events()
u_0 = np.copy(u_Delta_t)
if SaveActivity==True:
if Filepath==' ':
if one_dim==True:
filepath = 'G:/Macbook Stuff/Simulation Results/1D '+Graph_Kernel+' Kernel Test t=%.f/'%(kernel_param)
else:
filepath = 'G:/Macbook Stuff/Simulation Results/'+Graph_Kernel+' Kernel Test t=%.f/'%(kernel_param)
else:
filepath=Filepath
if not os.path.exists(filepath):
os.makedirs(filepath)
with h5py.File(filepath+"%d# Sim Activity.h5"%(NSim)) as hf:
if "Activity" not in list(hf.keys()):
hf.create_dataset("Activity", data=u_total)
else:
print("Warning: overwriting results of a previous simulation.")
del hf["Activity"]
hf.create_dataset("Activity", data=u_total)
if GF_domain == False:
return u_Delta_t
else:
return u_total
####################################################################################################
####################################################################################################
##################################################################
##GRAPH STOCHASTIC <NAME>
##################################################################
#compute the four diffusion operators beforehand
def graph_WCM_propagators(alpha_EE=1, alpha_IE=1, alpha_EI=1, alpha_II=1,
sigma_EE=10, sigma_IE=10, sigma_EI=10, sigma_II=10, D=1,
Graph_Kernel='Gaussian', one_dim=False, syn=0, gridsize=1000, h=0.01,
eigvals=None, eigvecs=None):
t_EE = (0.5*sigma_EE**2)/D
t_IE = (0.5*sigma_IE**2)/D
t_EI = (0.5*sigma_EI**2)/D
t_II = (0.5*sigma_II**2)/D
ForceParallel=True
if one_dim==True:
s,U = one_dim_Laplacian_eigenvalues(gridsize, h, syn, vecs=True)
V=U.T
else:
s=eigvals
U=eigvecs
V=eigvecs.T
if ForceParallel==True:
diag_prop_EE = alpha_EE * GraphKernel(s, t_EE, Graph_Kernel)
diag_prop_IE = alpha_IE * GraphKernel(s, t_IE, Graph_Kernel)
diag_prop_EI = alpha_EI * GraphKernel(s, t_EI, Graph_Kernel)
diag_prop_II = alpha_II * GraphKernel(s, t_II, Graph_Kernel)
mask_EE = np.flatnonzero(diag_prop_EE)
mask_IE = np.flatnonzero(diag_prop_IE)
mask_EI = np.flatnonzero(diag_prop_EI)
mask_II = np.flatnonzero(diag_prop_II)
EE_skip = diag_prop_EE[mask_EE]
IE_skip = diag_prop_IE[mask_IE]
EI_skip = diag_prop_EI[mask_EI]
II_skip = diag_prop_II[mask_II]
prop_EEV = EE_skip[:,None] * V[mask_EE,:]
prop_IEV = IE_skip[:,None] * V[mask_IE,:]
prop_EIV = EI_skip[:,None] * V[mask_EI,:]
prop_IIV = II_skip[:,None] * V[mask_II,:]
propagator_EE = transpose_parallel_dot(V[mask_EE,:], prop_EEV) #np.dot(U, np.dot(s_exp_matrix_EE,V))
propagator_IE = transpose_parallel_dot(V[mask_IE,:], prop_IEV)
propagator_EI = transpose_parallel_dot(V[mask_EI,:], prop_EIV)
propagator_II = transpose_parallel_dot(V[mask_II,:], prop_IIV)
else:
diag_prop_EE = sp.sparse.diags(alpha_EE * GraphKernel(s, t_EE, Graph_Kernel)).toarray()
diag_prop_IE = sp.sparse.diags(alpha_IE * GraphKernel(s, t_IE, Graph_Kernel)).toarray()
diag_prop_EI = sp.sparse.diags(alpha_EI * GraphKernel(s, t_EI, Graph_Kernel)).toarray()
diag_prop_II = sp.sparse.diags(alpha_II * GraphKernel(s, t_II, Graph_Kernel)).toarray()
propagator_EE = np.dot(U, np.dot(diag_prop_EE,V))
propagator_IE = np.dot(U, np.dot(diag_prop_IE,V))
propagator_EI = np.dot(U, np.dot(diag_prop_EI,V))
propagator_II = np.dot(U, np.dot(diag_prop_II,V))
return propagator_EE.astype('float64'), propagator_IE.astype('float64'), propagator_EI.astype('float64'), propagator_II.astype('float64')
#@jit(nopython=True, parallel=True)
def transpose_parallel_dot(A, B):
return np.dot(A.T, B)
#@jit(nopython=True, parallel=True)
def GWCM_Loop(E_0, I_0, Delta_t,
propagator_EE, propagator_IE, propagator_EI, propagator_II,
d_e, d_i, P, Q, tau_e, tau_i, Noise_E, Noise_I):
time_E = Delta_t/tau_e
time_I = Delta_t/tau_i
#print(I_0.dtype)
E_Delta_t = E_0 + time_E*(-d_e*E_0 + 1/(1+np.exp(-np.dot(propagator_EE,np.float64(E_0)) + np.dot(propagator_IE,np.float64(I_0)) - P)))+ Noise_E*np.sqrt(Delta_t)/tau_e
I_Delta_t = I_0 + time_I*(-d_i*I_0 + 1/(1+np.exp(-np.dot(propagator_EI,np.float64(E_0)) + np.dot(propagator_II,np.float64(I_0)) - Q)))+ Noise_I*np.sqrt(Delta_t)/tau_i
#print(E_Delta_t.shape)
return E_Delta_t, I_Delta_t
#Wilson Cowan model
def Graph_Wilson_Cowan_Model(Ess, Iss, Time, Delta_t,
alpha_EE=1, alpha_IE=1, alpha_EI=1, alpha_II=1,
sigma_EE=10, sigma_IE=10, sigma_EI=10, sigma_II=10, D=1,
d_e=1, d_i=1, P=0, Q=0, tau_e=1, tau_i=1, sigma_noise_e=1, sigma_noise_i=1,
Graph_Kernel='Gaussian', one_dim=False, syn=0, gridsize=1000, h=0.01, eigvals=None, eigvecs=None,
Visual=False, SaveActivity=False, Filepath=' ', NSim=0):
propagator_EE, propagator_IE, propagator_EI, propagator_II = graph_WCM_propagators(
alpha_EE, alpha_IE, alpha_EI, alpha_II,
sigma_EE, sigma_IE, sigma_EI, sigma_II, D,
Graph_Kernel, one_dim, syn, gridsize, h, eigvals,eigvecs)
if one_dim==True:
E_0=Ess*np.ones(gridsize, dtype='float64')
I_0=Iss*np.ones(gridsize, dtype='float64')
else:
E_0=Ess*np.ones(len(eigvals), dtype='float64')
I_0=Iss*np.ones(len(eigvals), dtype='float64')
E_Delta_t = np.zeros_like(E_0)
I_Delta_t = np.zeros_like(I_0)
Timesteps = int(round(Time/Delta_t))
E_total = np.zeros((len(E_0),Timesteps-1000), dtype='float32')
if Visual==True:
plt.ion()
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlim(0, len(E_0))
ax.set_ylim(0, 1)
#line2, = ax.plot(np.arange(len(I_0)), I_0, 'b-')
#line1, = ax.plot(np.arange(len(E_0)), E_0, 'r-')
ax.plot(I_0, 'b-')
ax.plot(E_0, 'r-')
fig.canvas.draw()
numerical_SS = True
if numerical_SS == True:
Ess_numerical=[]
Iss_numerical=[]
for i in range(Timesteps):
if sigma_noise_e!=0 or sigma_noise_i!=0:
Noise_E = (sigma_noise_e * np.array([gauss(0.0, 1.0) for k in range(len(E_0))])).astype('float64')
Noise_I = (sigma_noise_i * np.array([gauss(0.0, 1.0) for k in range(len(I_0))])).astype('float64')
else:
Noise_E = 0
Noise_I = 0
#it turns out that the alphas ARE important, ie. at least manually, i cant get oscillations if i set them to one
#simply manipulating the sigmas doesnt appear to be enough for oscillations. analysis or systematic numerics would solve this
#impulse response
# if i==500:
# E_Delta_t[600:650]=0.9*np.ones(50)
# I_Delta_t[580:620]=0.9*np.ones(40)
E_Delta_t, I_Delta_t = GWCM_Loop(E_0, I_0, Delta_t,
propagator_EE, propagator_IE, propagator_EI, propagator_II,
d_e, d_i, P, Q, tau_e, tau_i, Noise_E, Noise_I)
if i>=1000:
E_total[:,i-1000]=np.copy(E_Delta_t).astype('float32')
if numerical_SS == True:
Ess_numerical.append(np.mean(E_Delta_t))
Iss_numerical.append(np.mean(I_Delta_t))
if i%10 == 0:
print(i)
if Visual==True:
ax.clear()
ax.set_ylim(Ess-sigma_noise_e, Ess+sigma_noise_e)
#line2.set_ydata(I_Delta_t)
#line1.set_ydata(E_Delta_t)
ax.plot(I_Delta_t, 'b-')
ax.plot(E_Delta_t, 'r-')
fig.canvas.draw()
fig.canvas.flush_events()
E_0 = np.copy(E_Delta_t)
I_0 = np.copy(I_Delta_t)
#print(E_0.shape)
#print(str(E_0[10])+" "+str(I_0[20]))
if SaveActivity==True:
if Filepath==' ':
filepath = 'G:/Macbook Stuff/Results/'+Graph_Kernel+' Kernel/aEE=%.3f aIE=%.3f aEI=%.3f aII=%.3f dE=%.3f dI=%.3f ' %(alpha_EE,alpha_IE,alpha_EI,alpha_II,d_e,d_i)
filepath += 'P=%.3f Q=%.3f sEE=%.3f sIE=%.3f sEI=%.3f sII=%.3f D=%.3f tE=%.3f tI=%.3f/'%(P,Q,sigma_EE,sigma_IE,sigma_EI,sigma_II,D,tau_e,tau_i)
else:
filepath=Filepath
if not os.path.exists(filepath):
os.makedirs(filepath)
#make DAT files with sim-only parameters (delta t, time, etc)
with h5py.File(filepath+"Activity E0=%.5f Sim #%d.h5"%(Ess, NSim)) as hf:
if "Activity" not in list(hf.keys()):
hf.create_dataset("Activity", data=E_total)
else:
print("Warning: overwriting results of a previous simulation.")
del hf["Activity"]
hf.create_dataset("Activity", data=E_total)
if numerical_SS==True:
print(np.mean(np.array(Ess_numerical)))
print(np.mean(np.array(Iss_numerical)))
return E_total
#################################################################################
#
# LINEARIZED MODEL
#
##################################################################################
def Linearized_GLDomain_Wilson_Cowan_Model(Ess, Iss, Time, Delta_t,
alpha_EE=1, alpha_IE=1, alpha_EI=1, alpha_II=1,
sigma_EE=10, sigma_IE=10, sigma_EI=10, sigma_II=10, D=1,
d_e=1, d_i=1, P=0, Q=0, tau_e=1, tau_i=1, sigma_noise_e=1, sigma_noise_i=1,
Graph_Kernel='Gaussian', one_dim=False, syn=0, gridsize=1000, h=0.01, eigvals=None, eigvecs=None,
Visual=False, SaveActivity=False, Filepath=' ', NSim=0):
t_EE = (0.5*sigma_EE**2)/D
t_IE = (0.5*sigma_IE**2)/D
t_EI = (0.5*sigma_EI**2)/D
t_II = (0.5*sigma_II**2)/D
a = d_e*Ess*(1-d_e*Ess)
b = d_i*Iss*(1-d_i*Iss)
#eigenvectors are used for plotting purposes only.
if one_dim==True:
s, U = one_dim_Laplacian_eigenvalues(gridsize, h, syn, vecs=True)
else:
s=eigvals
U=eigvecs
#fluctuations about the steady state
beta_E_0 = np.zeros(len(s), dtype='float64')
beta_I_0 = np.zeros(len(s), dtype='float64')
prop_EE = (alpha_EE * GraphKernel(s, t_EE, Graph_Kernel)).astype('float64')
prop_IE = (alpha_IE * GraphKernel(s, t_IE, Graph_Kernel)).astype('float64')
prop_EI = (alpha_EI * GraphKernel(s, t_EI, Graph_Kernel)).astype('float64')
prop_II = (alpha_II * GraphKernel(s, t_II, Graph_Kernel)).astype('float64')
beta_E_Delta_t = np.zeros_like(beta_E_0)
beta_I_Delta_t = np.zeros_like(beta_I_0)
Timesteps = int(round(Time/Delta_t))
time_E = Delta_t/tau_e
time_I = Delta_t/tau_i
beta_E_total = np.zeros((len(beta_E_0),Timesteps-1000), dtype='float32')
if Visual==True:
plt.ion()
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlim(0, len(beta_E_0))
#ax.set_ylim(0, 1)
#line2, = ax.plot(np.arange(len(I_0)), I_0, 'b-')
#line1, = ax.plot(np.arange(len(E_0)), E_0, 'r-')
ax.plot( | np.dot(U,beta_I_0) | numpy.dot |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from itertools import product
from pathlib import Path
import numpy as np
import tensorflow as tf
from dotenv import load_dotenv
from annotation.direction import (Direction, get_eight_directions,
get_opposite_direction)
from annotation.piece import Piece
from ..ou import BlackOuEffectLayer
__author__ = 'Yasuhiro'
__date__ = '2018/3/10'
class TestOuEffect(tf.test.TestCase):
@classmethod
def setUpClass(cls):
dotenv_path = Path(__file__).parents[3] / '.env'
load_dotenv(str(dotenv_path))
cls.data_format = os.environ.get('DATA_FORMAT')
cls.use_cudnn = bool(os.environ.get('USE_CUDNN'))
def test_ou_effect_without_long_check(self):
shape = (1, 1, 9, 9) if self.data_format == 'NCHW' else (1, 9, 9, 1)
board = np.empty(shape, dtype=np.int32)
# 適当なマスクを設定
white_effect_mask = np.zeros(81, dtype=np.bool)
white_effect_mask[::2] = True
white_effect_mask = np.reshape(white_effect_mask, shape)
white_long_check = {direction: False
for direction in get_eight_directions()}
ph = tf.placeholder(tf.int32, shape=shape)
ou_effect = BlackOuEffectLayer(
data_format=self.data_format, use_cudnn=self.use_cudnn
)(ph, white_effect_mask, white_long_check)
# 余計な方向がないことを確認する
self.assertEqual(8, len(ou_effect))
# 利用しやすいように次元を下げる
ou_effect = {key: tf.squeeze(value)
for key, value in ou_effect.items()}
# Layerに渡したので、変更しても大丈夫
# アクセスしやすいように次元を下げる
white_effect_mask = np.squeeze(white_effect_mask)
with self.test_session() as sess:
for i, j in product(range(9), repeat=2):
# (i, j)に駒を置く
board[:] = Piece.EMPTY
if self.data_format == 'NCHW':
board[0, 0, i, j] = Piece.BLACK_OU
else:
board[0, i, j, 0] = Piece.BLACK_OU
effect = sess.run(ou_effect, feed_dict={ph: board})
for key, value in effect.items():
x, y = self.get_square(i=i, j=j, direction=key)
with self.subTest(i=i, j=j, direction=key):
if (x in range(9) and y in range(9) and
not white_effect_mask[x, y]):
self.assertTrue(value[x, y])
value[x, y] = False
self.assertFalse( | np.all(value) | numpy.all |
# Filename: HII-CHCm-IR_v1.1.py
import string
import numpy as np
import sys
#sys.stderr = open('errorlog.txt', 'w')
#Function for interpolation of grids
def interpolate(grid,z,zmin,zmax,n):
ncol = 15
vec = []
for col in range(ncol):
inter = 0
no_inter = 0
for row in range(0,len(grid)):
if grid[row,z] < zmin or grid[row,z] > zmax: continue
if z == 2: x = 0; y = 1
if z == 1: x = 0; y = 2
if z == 0: x = 1; y = 2
if row == (len(grid)-1):
vec.append(grid[row,col])
no_inter = no_inter + 1
elif grid[row,x] < grid[row+1,x] or grid[row,y] < grid[row+1,y] :
vec.append(grid[row,col])
no_inter = no_inter + 1
else:
inter = inter + 1
for index in range(0,n):
i = grid[row,col]+(index)*(grid[row+1,col]-grid[row,col])/n
vec.append(i)
out = np.transpose(np.reshape(vec,(-1,n*inter+no_inter)))
return out
print (' ---------------------------------------------------------------------')
print ('This is HII-CHI-mistry IR v. 1.1')
print (' See Fernandez-Ontiveros et al (2020) for details')
print (' Insert the name of your input text file with the following columns:')
print (' HI 4.05m, HI 7.46m, [SIV] 10.5m, HI 12.4m, [NeII] 12.8m, [NeIII] 15.5m, [SIII] 18.7m, [SIII] 33.7m, [OIII] 52m, [NIII] 57m, [OIII] 88m and [NII] 122m')
print ('with their corresponding errors in adjacent columns')
print ('with 0 for missing information.')
print ('---------------------------------------------------------------------')
# Input file reading
if len(sys.argv) == 1:
if int(sys.version[0]) < 3:
input00 = raw_input('Insert input file name:')
else:
input00 = input('Insert input file name:')
else:
input00 = str(sys.argv[1])
try:
input0 = np.loadtxt(input00)
if (input0.ndim == 1 and input0.shape[0] != 24) or (input0.ndim > 1 and input0.shape[1] != 24):
print ('The input file does not have 24 columns. Please check')
sys.exit()
print ('The input file is:'+input00)
except:
print ('Input file error: It does not exist or has wrong format')
sys.exit()
print ('')
output = []
# Iterations for Montecarlo error derivation
if len(sys.argv) < 3:
n = 25
else:
n = int(sys.argv[2])
print ('The number of iterations for MonteCarlo simulation is: ',n)
print ('')
# Reading of models grids. These can be changed
print ('')
while question:
if int(sys.version[0]) < 3:
inter = raw_input('Choose models [0] No interpolated [1] Interpolated: ')
else:
inter = input('Choose models [0] No interpolated [1] Interpolated: ')
if inter == '0' or inter == '1': question = False
print ('')
sed = 1
inter = int(inter)
if inter == 0 and sed==1:
sed_type = 'POPSTAR, age = 1 Myr, Chabrier IMF. No interpolation'
grid1 = np.loadtxt('C17_popstar_v1.1_ir.dat')
grid2 = np.loadtxt('C17_popstar_logU_adapted_emp_v1.1_ir.dat')
grid3 = np.loadtxt('C17_popstar_logU-NO_adapted_emp_v1.1_ir.dat')
print ('No interpolation for the POPSTAR models is going to be used.')
print ('The grid has a resolution of 0.1dex for O/H and 0.125dex for N/O')
print ('')
res_NO = 0.125
elif inter == 1 and sed==1:
sed_type = 'POPSTAR, age = 1 Myr, Chabrier IMF. interpolation'
grid1 = np.loadtxt('C17_popstar_v1.1_ir.dat')
grid2 = np.loadtxt('C17_popstar_logU_adapted_emp_v1.1_ir.dat')
grid3 = np.loadtxt('C17_popstar_logU-NO_adapted_emp_v1.1_ir.dat')
print ('Interpolation for the POPSTAR models is going to be used.')
print ('The grid has a resolution of 0.01dex for O/H and 0.0125dex for N/O')
print ('')
res_NO = 0.125
res_NO = 0.125
# Input file reading
if input0.shape == (24,):
input1 = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,input0[0],input0[1],input0[2],input0[3],input0[4],input0[5],input0[6],input0[7],input0[8],input0[9],input0[10],input0[11],input0[12],input0[13],input0[14],input0[15],input0[16],input0[17],input0[18],input0[19],input0[20],input0[21],input0[22],input0[23]]
input = np.reshape(input1,(2,24))
else:
input = input0
print ('Reading grids ....')
print ('')
print ('')
print ('----------------------------------------------------------------')
print ('(%) Grid 12+log(O/H) log(N/O) log(U)')
print ('-----------------------------------------------------------------')
# Beginning of loop of calculation
count = 0
for tab in input:
count = count + 1
OH_mc = []
NO_mc = []
logU_mc = []
OHe_mc = []
NOe_mc = []
logUe_mc = []
output.append(tab[0])
output.append(tab[1])
output.append(tab[2])
output.append(tab[3])
output.append(tab[4])
output.append(tab[5])
output.append(tab[6])
output.append(tab[7])
output.append(tab[8])
output.append(tab[9])
output.append(tab[10])
output.append(tab[11])
output.append(tab[12])
output.append(tab[13])
output.append(tab[14])
output.append(tab[15])
output.append(tab[16])
output.append(tab[17])
output.append(tab[18])
output.append(tab[19])
output.append(tab[20])
output.append(tab[21])
output.append(tab[22])
output.append(tab[23])
# Selection of grid
if tab[18] > 0:
grid = grid2
grid_type = 2
output.append(2)
else:
grid = grid3
grid_type = 3
output.append(3)
# Calculation of N/O
if tab[18] == 0 and (tab[16] == 0 or tab[20] == 0):
NOff = -10
eNOff = 0
else:
for monte in range(0,n,1):
NO_p = 0
den_NO = 0
NO_e = 0
den_NO_e = 0
tol_max = 1e2
if tab[16] == 0:
OIII_52_obs = 0
else:
OIII_52_obs = np.random.normal(tab[16],tab[17]+1e-5)
if OIII_52_obs <= 0: OIII_52_obs = 0
if tab[18] == 0:
NIII_57_obs = 0
else:
NIII_57_obs = np.random.normal(tab[18],tab[19]+1e-5)
if NIII_57_obs <= 0: NIII_57_obs = 0
if tab[20] == 0:
OIII_88_obs = 0
else:
OIII_88_obs = np.random.normal(tab[20],tab[21]+1e-5)
if OIII_88_obs <= 0: OIII_88_obs = 0
if OIII_52_obs == 0:
N3O3a_obs = -10
else:
N3O3a_obs = np.log10(NIII_57_obs / OIII_52_obs)
if OIII_88_obs == 0:
N3O3b_obs = -10
else:
N3O3b_obs = np.log10(NIII_57_obs / OIII_88_obs)
CHI_N3O3a = 0
CHI_N3O3b = 0
CHI_NO = 0
for index in grid:
if N3O3a_obs == -10:
CHI_N3O3a = 0
elif index[11] == 0 or index[12] == 0:
CHI_N3O3a = tol_max
else:
CHI_N3O3a = (np.log10(index[12]/index[11])- N3O3a_obs)**2/np.log10(index[12]/index[11])
if N3O3b_obs == -10:
CHI_N3O3b = 0
elif index[13] == 0 or index[12] == 0:
CHI_N3O3b = tol_max
else:
CHI_N3O3b = (np.log10(index[12]/index[13])- N3O3b_obs)**2/np.log10(index[12]/index[13])
CHI_NO = (CHI_N3O3a**2 + CHI_N3O3b**2 )**0.5
NO_p = index[1] / (CHI_NO) + NO_p
den_NO = 1 / (CHI_NO) + den_NO
NO = NO_p / den_NO
# Calculation of N/O error
CHI_N3O3a = 0
CHI_N3O3b = 0
CHI_NO = 0
for index in grid:
if N3O3a_obs == -10:
CHI_N3O3a = 0
elif index[11] == 0 or index[12] == 0:
CHI_N3O3a = tol_max
else:
CHI_N3O3a = (np.log10(index[12]/index[11])- N3O3a_obs)**2/np.log10(index[12]/index[11])
if N3O3b_obs == -10:
CHI_N3O3b = 0
elif index[13] == 0 or index[12] == 0:
CHI_N3O3b = tol_max
else:
CHI_N3O3b = (np.log10(index[12]/index[13])- N3O3b_obs)**2/np.log10(index[12]/index[13])
CHI_NO = (CHI_N3O3a**2 + CHI_N3O3b**2 )**0.5
NO_e = (index[1] - NO)**2 / (CHI_NO) + NO_e
den_NO_e = 1 / (CHI_NO) + den_NO_e
eNO = NO_e / den_NO_e
#Iterations for the interpolation mode
if inter == 0 or NO == -10:
NOf = NO
elif inter == 1:
igrid = grid[np.lexsort((grid[:,0],grid[:,2]))]
igrid = interpolate(igrid,1,NO-eNO-0.125,NO+eNO,10)
CHI_N3O3a = 0
CHI_N3O3b = 0
CHI_NO = 0
NO_p = 0
den_NO = 0
for index in igrid:
if N3O3a_obs == -10:
CHI_N3O3a = 0
elif index[11] == 0 or index[12] == 0:
CHI_N3O3a = tol_max
else:
CHI_N3O3a = (np.log10(index[12]/index[11])- N3O3a_obs)**2/np.log10(index[12]/index[11])
if N3O3b_obs == -10:
CHI_N3O3b = 0
elif index[10] == 0 or index[9] == 0:
CHI_N3O3b = tol_max
else:
CHI_N3O3b = (np.log10(index[9]/index[10])- N3O3b_obs)**2/np.log10(index[9]/index[10])
CHI_NO = (CHI_N3O3a**2 + CHI_N3O3b**2 )**0.5
if CHI_NO == 0:
NO_p = NO_p
den_NO = den_NO
else:
NO_p = index[1] / CHI_NO + NO_p
den_NO = 1 / CHI_NO + den_NO
NOf = NO_p / den_NO
NO_mc.append(NOf)
NOe_mc.append(eNO)
NOff = np.mean(NO_mc)
eNOff = (np.std(NO_mc)**2+np.mean(NOe_mc)**2)**0.5
# Creation of a constrained grid on N/O
if NOff == -10:
grid_c = grid
else:
grid_mac = []
for index in grid:
if np.abs(index[1] - NOff) > np.abs(eNOff+res_NO):
continue
else:
grid_mac.append(index[0])
grid_mac.append(index[1])
grid_mac.append(index[2])
grid_mac.append(index[3])
grid_mac.append(index[4])
grid_mac.append(index[5])
grid_mac.append(index[6])
grid_mac.append(index[7])
grid_mac.append(index[8])
grid_mac.append(index[9])
grid_mac.append(index[10])
grid_mac.append(index[11])
grid_mac.append(index[12])
grid_mac.append(index[13])
grid_mac.append(index[14])
grid_c = np.reshape(grid_mac,(int(len(grid_mac)/15),15))
# Calculation of O/H and logU
for monte in range(0,n,1):
OH_p = 0
logU_p = 0
den_OH = 0
OH_e = 0
logU_e = 0
den_OH_e = 0
tol_max = 1e2
if tab[0] == 0:
HI_4m_obs = 0
else:
HI_4m_obs = np.random.normal(tab[0],tab[1]+1e-5)
if HI_4m_obs <= 0: HI_4m_obs = 0
if tab[2] == 0:
HI_7m_obs = 0
else:
HI_7m_obs = np.random.normal(tab[2],tab[3]+1e-5)
if HI_7m_obs <= 0: HI_7m_obs = 0
if tab[4] == 0:
SIV_10m_obs = 0
else:
SIV_10m_obs = np.random.normal(tab[4],tab[5]+1e-5)
if SIV_10m_obs <= 0: SIV_10m_obs = 0
if tab[6] == 0:
HI_12m_obs = 0
else:
HI_12m_obs = np.random.normal(tab[6],tab[7]+1e-5)
if HI_12m_obs <= 0: HI_12m_obs = 0
if tab[8] == 0:
NeII_12m_obs = 0
else:
NeII_12m_obs = np.random.normal(tab[8],tab[9]+1e-3)
if NeII_12m_obs <= 0: NeII_12m_obs = 0
if tab[10] == 0:
NeIII_15m_obs = 0
else:
NeIII_15m_obs = np.random.normal(tab[10],tab[11]+1e-3)
if NeIII_15m_obs <= 0: NeIII_15m_obs = 0
if tab[12] == 0:
SIII_18m_obs = 0
else:
SIII_18m_obs = np.random.normal(tab[12],tab[13]+1e-3)
if SIII_18m_obs <= 0: SIII_18m_obs = 0
if tab[14] == 0:
SIII_33m_obs = 0
else:
SIII_33m_obs = np.random.normal(tab[14],tab[15]+1e-3)
if SIII_33m_obs <= 0: SIII_33m_obs = 0
if tab[16] == 0:
OIII_52m_obs = 0
else:
OIII_52m_obs = np.random.normal(tab[16],tab[17]+1e-5)
if OIII_52m_obs <= 0: OIII_52m_obs = 0
if tab[18] == 0:
NIII_57m_obs = 0
else:
NIII_57m_obs = np.random.normal(tab[18],tab[19]+1e-5)
if NIII_57m_obs <= 0: NIII_57m_obs = 0
if tab[20] == 0:
OIII_88m_obs = 0
else:
OIII_88m_obs = np.random.normal(tab[20],tab[21]+1e-5)
if OIII_88m_obs <= 0: OIII_88m_obs = 0
if tab[22] == 0:
NII_122m_obs = 0
else:
NII_122m_obs = np.random.normal(tab[22],tab[23]+1e-3)
if NII_122m_obs <= 0: NII_122m_obs = 0
if HI_4m_obs == 0 or NeII_12m_obs == 0 or NeIII_15m_obs == 0:
Ne23a_obs = -10
else:
Ne23a_obs = np.log10((NeII_12m_obs + NeIII_15m_obs)/HI_4m_obs)
if HI_7m_obs == 0 or NeII_12m_obs == 0 or NeIII_15m_obs == 0:
Ne23b_obs = -10
else:
Ne23b_obs = np.log10((NeII_12m_obs + NeIII_15m_obs)/HI_7m_obs)
if HI_12m_obs == 0 or NeII_12m_obs == 0 or NeIII_15m_obs == 0:
Ne23c_obs = -10
else:
Ne23c_obs = np.log10((NeII_12m_obs + NeIII_15m_obs)/HI_12m_obs)
if NeII_12m_obs == 0 or NeIII_15m_obs == 0:
Ne2Ne3_obs = -10
else:
Ne2Ne3_obs = np.log10((NeII_12m_obs / NeIII_15m_obs))
if HI_4m_obs == 0 or SIV_10m_obs == 0 or SIII_18m_obs == 0:
S34a_obs = -10
else:
S34a_obs = np.log10((SIV_10m_obs + SIII_18m_obs)/HI_4m_obs)
if HI_7m_obs == 0 or SIV_10m_obs == 0 or SIII_18m_obs == 0:
S34b_obs = -10
else:
S34b_obs = np.log10((SIV_10m_obs + SIII_18m_obs)/HI_7m_obs)
if HI_12m_obs == 0 or SIV_10m_obs == 0 or SIII_18m_obs == 0:
S34c_obs = -10
else:
S34c_obs = np.log10((SIV_10m_obs + SIII_18m_obs)/HI_12m_obs)
if HI_4m_obs == 0 or SIV_10m_obs == 0 or SIII_33m_obs == 0:
S34d_obs = -10
else:
S34d_obs = np.log10((SIV_10m_obs + SIII_33m_obs)/HI_4m_obs)
if HI_7m_obs == 0 or SIV_10m_obs == 0 or SIII_33m_obs == 0:
S34e_obs = -10
else:
S34e_obs = np.log10((SIV_10m_obs + SIII_33m_obs)/HI_7m_obs)
if HI_12m_obs == 0 or SIV_10m_obs == 0 or SIII_33m_obs == 0:
S34f_obs = -10
else:
S34f_obs = np.log10((SIV_10m_obs + SIII_33m_obs)/HI_12m_obs)
if SIV_10m_obs == 0 or SIII_18m_obs == 0:
S3S4a_obs = -10
else:
S3S4a_obs = np.log10((SIII_18m_obs / SIV_10m_obs))
if SIV_10m_obs == 0 or SIII_33m_obs == 0:
S3S4b_obs = -10
else:
S3S4b_obs = np.log10(SIII_33m_obs / SIV_10m_obs)
if HI_4m_obs == 0 or NIII_57m_obs == 0 or NII_122m_obs == 0:
N23a_obs = -10
else:
N23a_obs = np.log10((NII_122m_obs + NIII_57m_obs)/HI_4m_obs)
if HI_7m_obs == 0 or NIII_57m_obs == 0 or NII_122m_obs == 0:
N23b_obs = -10
else:
N23b_obs = np.log10((NII_122m_obs + NIII_57m_obs)/HI_7m_obs)
if HI_12m_obs == 0 or NIII_57m_obs == 0 or NII_122m_obs == 0:
N23c_obs = -10
else:
N23c_obs = np.log10((NII_122m_obs + NIII_57m_obs)/HI_12m_obs)
if NII_122m_obs == 0 or NIII_57m_obs == 0:
N2N3_obs = -10
else:
N2N3_obs = np.log10((NII_122m_obs / NIII_57m_obs))
if NII_122m_obs == 0 or OIII_52m_obs == 0:
O3N2a_obs = -10
else:
O3N2a_obs = np.log10((OIII_52m_obs/NII_122m_obs))
if NII_122m_obs == 0 or OIII_88m_obs == 0:
O3N2b_obs = -10
else:
O3N2b_obs = np.log10((OIII_88m_obs/NII_122m_obs))
if Ne23a_obs == -10 and Ne23b_obs == -10 and Ne23c_obs == -10 and S34a_obs == -10 and S34b_obs == -10 and S34c_obs == -10 and S34d_obs == -10 and S34e_obs == -10 and S34f_obs == -10 and Ne2Ne3_obs == -10 and S3S4a_obs == -10 and S3S4b_obs == -10 and N23a_obs == -10 and N23b_obs == -10 and N23c_obs == -10 and N2N3_obs == -10 and O3N2a_obs == -10 and O3N2b_obs == -10:
OH = 0
logU = 0
else:
CHI_Ne23a = 0
CHI_Ne23b = 0
CHI_Ne23c = 0
CHI_Ne2Ne3 = 0
CHI_S34a = 0
CHI_S34b = 0
CHI_S34c3 = 0
CHI_S34d3 = 0
CHI_S34e = 0
CHI_S34f = 0
CHI_S3S4a = 0
CHI_S3S4b = 0
CHI_N23a = 0
CHI_N23b = 0
CHI_N23c = 0
CHI_N2N3 = 0
CHI_O3N2a = 0
CHI_O3N2b = 0
CHI_OH = 0
for index in grid_c:
if Ne23a_obs == -10:
CHI_Ne23a = 0
elif index[3] == 0 or index[7] == 0 or index[8] == 0:
CHI_Ne23a = tol_max
else:
CHI_Ne23a = (np.log10((index[7]+index[8])/index[3])- Ne23a_obs)**2/np.log10((index[7]+index[8])/index[3])
if Ne23b_obs == -10:
CHI_Ne23b = 0
elif index[4] == 0 or index[7] == 0 or index[8] == 0:
CHI_Ne23b = tol_max
else:
CHI_Ne23b = (np.log10((index[7]+index[8])/index[4])- Ne23b_obs)**2/np.log10((index[7]+index[8])/index[4])
if Ne23c_obs == -10:
CHI_Ne23c = 0
elif index[6] == 0 or index[7] == 0 or index[8] == 0:
CHI_Ne23c = tol_max
else:
CHI_Ne23c = (np.log10((index[7]+index[8])/index[6])- Ne23c_obs)**2/np.log10((index[7]+index[8])/index[6])
if Ne2Ne3_obs == -10:
CHI_Ne2Ne3 = 0
elif index[7] == 0 or index[8] == 0:
CHI_Ne2Ne3 = tol_max
else:
CHI_Ne2Ne3 = (np.log10((index[7]/index[8]))- Ne2Ne3_obs)**2/np.log10((index[7]/index[8]))
if S34a_obs == -10:
CHI_S34a = 0
elif index[3] == 0 or index[5] == 0 or index[9] == 0:
CHI_S34a = tol_max
else:
CHI_S34a = (np.log10((index[5]+index[9])/index[3])- S34a_obs)**2/np.log10((index[5]+index[9])/index[3])
if S34b_obs == -10:
CHI_S34b = 0
elif index[4] == 0 or index[5] == 0 or index[9] == 0:
CHI_S34b = tol_max
else:
CHI_S34b = (np.log10((index[5]+index[9])/index[4])- S34b_obs)**2/np.log10((index[5]+index[9])/index[4])
if S34c_obs == -10:
CHI_S34c = 0
elif index[6] == 0 or index[5] == 0 or index[9] == 0:
CHI_S34c = tol_max
else:
CHI_S34c = (np.log10((index[5]+index[9])/index[6])- S34c_obs)**2/np.log10((index[5]+index[9])/index[6])
if S34d_obs == -10:
CHI_S34d = 0
elif index[3] == 0 or index[5] == 0 or index[10] == 0:
CHI_S34d = tol_max
else:
CHI_S34d = (np.log10((index[5]+index[10])/index[3])- S34d_obs)**2/np.log10((index[5]+index[10])/index[3])
if S34e_obs == -10:
CHI_S34e = 0
elif index[4] == 0 or index[5] == 0 or index[10] == 0:
CHI_S34e = tol_max
else:
CHI_S34d = (np.log10((index[5]+index[10])/index[4])- S34e_obs)**2/np.log10((index[5]+index[10])/index[4])
if S34f_obs == -10:
CHI_S34f = 0
elif index[6] == 0 or index[5] == 0 or index[10] == 0:
CHI_S34f = tol_max
else:
CHI_S34f = (np.log10((index[5]+index[10])/index[6])- S34f_obs)**2/np.log10((index[5]+index[10])/index[6])
if S3S4a_obs == -10:
CHI_S3S4a = 0
elif index[5] == 0 or index[9] == 0 :
CHI_S3S4a = tol_max
else:
CHI_S3S4a = (np.log10(index[9]/index[5])- S3S4a_obs)**2/np.log10((index[9]/index[5]))
if S3S4b_obs == -10:
CHI_S3S4b = 0
elif index[5] == 0 or index[10] == 0 :
CHI_S3S4b = tol_max
else:
CHI_S3S4b = (np.log10(index[10]/index[5])- S3S4b_obs)**2/np.log10((index[10]/index[5]))
if N23a_obs == -10:
CHI_N23a = 0
elif index[3] == 0 or index[12] == 0 or index[14] == 0:
CHI_N23a = tol_max
else:
CHI_N23a = (np.log10((index[12]+index[14])/index[3])- N23a_obs)**2/np.log10((index[12]+index[14])/index[3])
if N23b_obs == -10:
CHI_N23b = 0
elif index[4] == 0 or index[12] == 0 or index[14] == 0:
CHI_N23b = tol_max
else:
CHI_N23b = (np.log10((index[12]+index[14])/index[4])- N23b_obs)**2/np.log10((index[12]+index[14])/index[4])
if N23c_obs == -10:
CHI_N23c = 0
elif index[6] == 0 or index[12] == 0 or index[14] == 0:
CHI_N23c = tol_max
else:
CHI_N23c = (np.log10((index[12]+index[14])/index[6])- N23c_obs)**2/np.log10((index[12]+index[14])/index[6])
if N2N3_obs == -10:
CHI_N2N3 = 0
elif index[12] == 0 or index[14] == 0:
CHI_N2N3 = tol_max
else:
CHI_N2N3 = (np.log10(index[14]/index[12])- N2N3_obs)**2/np.log10((index[14]/index[12]))
if O3N2a_obs == -10:
CHI_O3N2a = 0
elif index[11] == 0 or index[14] == 0:
CHI_O3N2a = tol_max
else:
CHI_O3N2a = (np.log10(index[11]/index[14])- O3N2a_obs)**2/np.log10((index[11]/index[14]))
if O3N2b_obs == -10:
CHI_O3N2b = 0
elif index[13] == 0 or index[14] == 0:
CHI_O3N2b = tol_max
else:
CHI_O3N2b = (np.log10(index[13]/index[14])- O3N2b_obs)**2/np.log10((index[13]/index[14]))
CHI_OH = (CHI_Ne23a**2 + CHI_Ne23b**2 + CHI_Ne23c**2 + CHI_Ne2Ne3**2 + CHI_S34a**2 + CHI_S34b**2 + CHI_S34c**2 + CHI_S3S4a**2 + CHI_S3S4b**2+CHI_N23a**2+CHI_N23b**2+CHI_N23c**2+CHI_N2N3**2+CHI_O3N2a**2+CHI_O3N2b**2)**0.5
if CHI_OH == 0:
OH_p = OH_p
logU_p = logU_p
den_OH = den_OH
else:
OH_p = index[0] / (CHI_OH) + OH_p
logU_p = index[2] / (CHI_OH) + logU_p
den_OH = 1 / (CHI_OH) + den_OH
OH = OH_p / den_OH
logU = logU_p / den_OH
#Calculation of error of O/H and logU
if Ne23a_obs == -10 and Ne23b_obs == -10 and Ne23c_obs == -10 and S34a_obs == -10 and S34b_obs == -10 and S34c_obs == -10 and S34d_obs == -10 and S34e_obs == -10 and S34f_obs == -10 and Ne2Ne3_obs == -10 and S3S4a_obs == -10 and S3S4b_obs == -10 and N23a_obs == -10 and N23b_obs == -10 and N23c_obs == -10 and N2N3_obs == -10 and O3N2a_obs == -10 and O3N2b_obs == -10:
eOH = 0
elogU = 0
else:
CHI_Ne23a = 0
CHI_Ne23b = 0
CHI_Ne23c = 0
CHI_Ne2Ne3 = 0
CHI_S34a = 0
CHI_S34b = 0
CHI_S34c3 = 0
CHI_S34d3 = 0
CHI_S34e = 0
CHI_S34f = 0
CHI_S3S4a = 0
CHI_S3S4b = 0
CHI_N23a = 0
CHI_N23b = 0
CHI_N23c = 0
CHI_N2N3 = 0
CHI_O3N2a = 0
CHI_O3N2b = 0
CHI_OH = 0
for index in grid_c:
if Ne23a_obs == -10:
CHI_Ne23a = 0
elif index[3] == 0 or index[7] == 0 or index[8] == 0:
CHI_Ne23a = tol_max
else:
CHI_Ne23a = (np.log10((index[7]+index[8])/index[3])- Ne23a_obs)**2/np.log10((index[7]+index[8])/index[3])
if Ne23b_obs == -10:
CHI_Ne23b = 0
elif index[4] == 0 or index[7] == 0 or index[8] == 0:
CHI_Ne23b = tol_max
else:
CHI_Ne23b = (np.log10((index[7]+index[8])/index[4])- Ne23b_obs)**2/np.log10((index[7]+index[8])/index[4])
if Ne23c_obs == -10:
CHI_Ne23c = 0
elif index[6] == 0 or index[7] == 0 or index[8] == 0:
CHI_Ne23c = tol_max
else:
CHI_Ne23c = (np.log10((index[7]+index[8])/index[6])- Ne23c_obs)**2/np.log10((index[7]+index[8])/index[6])
if Ne2Ne3_obs == -10:
CHI_Ne2Ne3 = 0
elif index[7] == 0 or index[8] == 0:
CHI_Ne2Ne3 = tol_max
else:
CHI_Ne2Ne3 = (np.log10(index[7]/index[8])- Ne2Ne3_obs)**2/np.log10((index[7]/index[8]))
if S34a_obs == -10:
CHI_S34a = 0
elif index[3] == 0 or index[5] == 0 or index[9] == 0:
CHI_S34a = tol_max
else:
CHI_S34a = (np.log10((index[5]+index[9])/index[3])- S34a_obs)**2/np.log10((index[5]+index[9])/index[3])
if S34b_obs == -10:
CHI_S34b = 0
elif index[4] == 0 or index[5] == 0 or index[9] == 0:
CHI_S34b = tol_max
else:
CHI_S34b = (np.log10((index[5]+index[9])/index[4])- S34b_obs)**2/np.log10((index[5]+index[9])/index[4])
if S34c_obs == -10:
CHI_S34c = 0
elif index[6] == 0 or index[5] == 0 or index[9] == 0:
CHI_S34c = tol_max
else:
CHI_S34c = (np.log10((index[5]+index[9])/index[6])- S34c_obs)**2/np.log10((index[5]+index[9])/index[6])
if S34d_obs == -10:
CHI_S34d = 0
elif index[3] == 0 or index[5] == 0 or index[10] == 0:
CHI_S34d = tol_max
else:
CHI_S34d = (np.log10((index[5]+index[10])/index[3])- S34d_obs)**2/np.log10((index[5]+index[10])/index[3])
if S34e_obs == -10:
CHI_S34e = 0
elif index[4] == 0 or index[5] == 0 or index[10] == 0:
CHI_S34e = tol_max
else:
CHI_S34d = (np.log10((index[5]+index[10])/index[4])- S34e_obs)**2/np.log10((index[5]+index[10])/index[4])
if S34f_obs == -10:
CHI_S34f = 0
elif index[6] == 0 or index[5] == 0 or index[10] == 0:
CHI_S34f = tol_max
else:
CHI_S34f = ( | np.log10((index[5]+index[10])/index[6]) | numpy.log10 |
import numpy
import random
VALID = range(8)
numpFALSE = numpy.array((False, False))
movelist = [
[[(0, 1), (1, 1), (-1, 1)], [(0, -1), (1, -1), (-1, -1)]], # pawn
[(1, 0), (-1, 0), (0, 1), (0, -1)], # rook
[(-1, -2), (1, -2), (2, -1), (2, 1), (1, 2),
(-1, 2), (-2, 1), (-2, -1)], # knight
[(1, 1), (1, -1), (-1, 1), (-1, -1)], # bishop
[(1, 0), (-1, 0), (0, 1), (0, -1), (1, 1),
(1, -1), (-1, 1), (-1, -1)], # queen
[(1, 0), (-1, 0), (0, 1), (0, -1), (1, 1),
(1, -1), (-1, 1), (-1, -1)] # king
]
class Piece:
def __init__(self, board, pos, num, color):
global movelist
self.board = board
self.pos = numpy.array(pos)
self.num = num
if not self.num:
self.skipped = False
self.color = color
self.opp_color = (not self.color).real
self.movelist = movelist[self.num]
self.moved = False
self.name = self.board.pieces[self.num]
if self.color:
self.name = self.name.capitalize()
def __repr__(self):
repr_ = f"{self.name} at pos: {self.pos}"
return repr_
def __str__(self):
return self.__repr__()
def en_passant(self):
x, y = self.pos
if x-1 in VALID:
if rpiece := self.board.get_at(x-1, y):
if not rpiece.num:
if rpiece.skipped:
return numpy.array((x-1, y+1-2*self.color))
if x+1 in VALID:
if rpiece := self.board.get_at(x+1, y):
if not rpiece.num:
if rpiece.skipped:
return numpy.array((x+1, y+1-2*self.color))
return numpFALSE
def absolute_moves(self, king):
return list(filter(lambda move: self.is_safe(move, king),
list(self.possible_moves())))
def possible_moves(self, castling=True):
if self.num in [1, 3, 4]:
return list(self.possible_moves_rbq())
elif self.num == 0:
return list(self.possible_moves_pawn())
elif self.num == 2:
return list(self.possible_moves_knight())
elif self.num == 5:
return list(self.possible_moves_king(castling=castling))
def possible_moves_rbq(self):
for direction in self.movelist:
possible_pos = self.pos.copy()
possible_pos += direction
x, y = possible_pos
while (x in VALID) and (y in VALID):
if bpiece := self.board.get_at(x, y):
if bpiece.color != self.color:
yield numpy.array(possible_pos)
break
yield numpy.array(possible_pos)
possible_pos += direction
x, y = possible_pos
def possible_moves_pawn(self):
if self.num != 0:
return
specific_movelist = self.movelist[self.color]
for i, move in enumerate(specific_movelist):
possible_pos = self.pos.copy()
possible_pos += move
if not i:
first_step = possible_pos
x, y = possible_pos
if not (x in VALID and y in VALID):
continue
rpiece = self.board.get_at(x, y)
if bool(rpiece).real ^ bool(not i).real:
if rpiece:
if rpiece.color != self.color:
yield numpy.array(possible_pos)
continue
yield numpy.array(possible_pos)
if (magicmove := self.en_passant()).any():
yield magicmove
if not self.moved:
x, y = self.pos
extra_step = (x, y+2-4*self.color)
if not (self.board.get_at(*extra_step) or self.board.get_at(*first_step)):
yield | numpy.array(extra_step) | numpy.array |
import audiofile
import numpy as np
import numpy.lib.stride_tricks as npst
import matplotlib.pyplot as plt
from scipy import signal, fft
import glob, os, random
import xcorr
import scikit_talkbox_lpc as scilpc
import filterbanks
def read_wavfile(path):
"""
Read a given wav audio file and return the signal of it and its sampling rate.
@path : path to the .wav file [string]
@return : the signal [ndarray] and the sampling rate of the .wav file [int]
"""
signal, sampling_rate = audiofile.read(path)
return signal, sampling_rate
def normalize(signal):
"""
Normalize a signal in order to make his value ranges from -1 to 1.
@signal : the signal [ndarray]
@return : the normalized signal [ndarray]
"""
min_value = abs(min(signal))
max_value = abs(max(signal))
norm = max(min_value, max_value)
return signal/norm
def split(signal, sampling_rate, window_width, sliding_step):
"""
Split the signal in frames with an overlapping step.
@signal : the signal [ndarray]
@sampling_rate : the sampling rate of the signal [int]
@window_width : the window size in ms [int]
@sliding_step : the sliding step in ms [int]
@return : windows generated [list]
"""
window_samples = int(sampling_rate * (window_width/1000))
sliding_samples = int(sampling_rate * (sliding_step/1000))
v = npst.sliding_window_view(signal, window_samples)[::sliding_samples, :]
return v
def compute_energy(signal):
"""
Return the energy of the given signal
"""
energy = 0
for i in range(len(signal)):
energy += (abs(signal[i]))**2
return energy
def get_voiced(frames, treshold):
"""
Divide frames into two categories:
-voiced_segment : contains all frames with an energy >= treshold
-unvoiced_segment : contains all other frames
"""
voiced_segments = []
unvoiced_segments = []
for frame in frames:
energy = compute_energy(frame)
if (energy >= treshold):
voiced_segments.append(frame)
else:
unvoiced_segments.append(frame)
return voiced_segments, unvoiced_segments
def autocorrelation_pitch_estim(files):
"""
Compute an estimation of the pitch of a speaker using the autocorrelation method.
@list of files where utterances (minimum 5) are stored
Calculate pitch for each frames and then return mean of all pitches
"""
#1.
f0_list = []
for file in files:
current_signal, sampling_rate = read_wavfile(file)
#2.
current_signal = normalize(current_signal)
#3.
frames = split(current_signal, sampling_rate, 50, 25)
#4.
#5.
voiced_segments, unvoiced_segments = get_voiced(frames, 5)
#6.
for segment in voiced_segments:
lags, c = xcorr.xcorr(segment, segment, maxlags=200)
#7.
peaks, p = signal.find_peaks(c)
if(len(peaks) > 1):
peak1 = peaks[0]
peak2 = peaks[1]
for peak in peaks:
if c[peak] > c[peak1]:
peak1 = peak
if c[peak] < c[peak1] and c[peak] > c[peak2]:
peak2 = peak
if (peak1 != peak2):
f0_list.append(sampling_rate/abs(peak1-peak2))
f0_list.sort()
while(f0_list[-1] > 550):
f0_list.pop()
f0 = np.mean(f0_list)
return f0
def cepstrum_pitch_estim(files):
"""
Compute an estimation of the pitch of a speaker using the cepstrum method.
@list of files where utterances (minimum 5) are stored
Calculate pitch for each frames and then return mean of all pitches
"""
#On prend des samples randoms pour les deux personnes
f0_list = []
#On normalise les signaux et on les affiche (point 2)
for file in files:
current_signal, sampling_rate = read_wavfile(file)
current_signal = normalize(current_signal)
#On split et on fait une liste des voiced segments (treshold à vérifier si correct) (point 3-5)
frames = split(current_signal, sampling_rate, 50, 25)
voiced_segment, unvoiced_segment = get_voiced(frames, 5)
for segment in voiced_segment:
#On obtient le ceptrum des signaux (point 6)
w, h = signal.freqz(segment)
logfreq = np.log10(h)
cepstrum = np.fft.ifft(logfreq)
window = signal.hamming(len(segment))
windowed_segment = segment * window
wh, hw = signal.freqz(windowed_segment)
logfreq_windowed = np.log(hw)
cepstrum_windowed = np.fft.ifft(logfreq_windowed)
max_peak = 32
max_windowed_peak = 32
for i in range(32,267): #On recherche dans l'intervalle 60Hz - 500Hz
if (cepstrum[i] > cepstrum[max_peak]):
max_peak = i
if (cepstrum_windowed[i] > cepstrum_windowed[max_windowed_peak]):
max_windowed_peak = i
if (cepstrum_windowed[max_windowed_peak] > cepstrum[max_peak]):
max_peak = max_windowed_peak
f0_temp = sampling_rate/max_peak
f0_list.append(f0_temp)
f0 = np.mean(f0_list)
return f0
def compute_formants(audiofile):
"""
Compute all frame formants of an audiofiles and return it as a 2 dimensional array
"""
#1.
current_signal, sampling_rate = read_wavfile(audiofile)
frames = split(normalize(current_signal), sampling_rate, 25, 25)
#2.
A = [1]
B = [1, 0.67]
lpc_order = int(2 + (sampling_rate/1000))
formants = []
time = 0
for frame in frames:
filtered_frame = signal.lfilter(B, A, frame)
window = signal.hamming(len(filtered_frame))
windowed_frame = filtered_frame * window
lpc = scilpc.lpc_ref(windowed_frame, 10)
roots = np.roots(lpc)
values = []
for r in roots:
if (np.imag(r) > 0):
angle = np.arctan2(np.imag(r), np.real(r))
values.append(angle * ((sampling_rate/10)/2*np.pi))
values.sort()
#values.insert(0, time)
formants.append(values)
#time += 0.025
return formants
def compute_mfcc(audiofile):
#1.
current_signal, sampling_rate = read_wavfile(audiofile)
current_signal = normalize(current_signal)
A= [1., 0.]
B= [1.,-0.97]
emphasized_signal = signal.lfilter(B,A,current_signal)
frames= split(emphasized_signal,sampling_rate, 50, 25)
Ndft = 512
mfccs = []
for frame in frames :
window = signal.hamming(len(frame))
windowed_frames = window*frame
w, h = signal.freqz(windowed_frames, worN=257)
power_spectrum= pow(abs(h),2)/Ndft
filter_bank_values = filterbanks.filter_banks(power_spectrum, sampling_rate)
dct = fft.dct(filter_bank_values, norm='ortho')
mfccs.append(dct)
return mfccs
def analyse(path):
"""
This function is called in each rule-based system in order to compute easily all the features of signals.
Because of the cepstrum and autocorrelation pitch estimation requirements, path must point to
a directory where minimum 5 audiofiles of a speaker are stored.
"""
os.chdir(path)
files = random.sample(glob.glob("*.wav"), 5)
print(files)
autocorr_pitch = autocorrelation_pitch_estim(files)
cepstrum_pitch = cepstrum_pitch_estim(files)
formants_list = []
for file in files:
formants = compute_formants(file)
for f in formants:
formants_list.append(f)
f1_list = []
f2_list = []
for i in range(len(formants_list)):
if (formants_list[i][0] > 90 and formants_list[i][0] < 1000):
f1_list.append(formants_list[i][0])
if (formants_list[i][1] > 600 and formants_list[i][1] < 3200):
f2_list.append(formants_list[i][1])
os.chdir("../../")
return autocorr_pitch, cepstrum_pitch, f1_list, f2_list
def system_01(path):
"""
Simple rule-based system that implements observed rules with if-else statements.
It uses autocorrelation pitch estimation, cepstrum pitch estimation and formant 1.
====Results====
Accuracy global : 0.7
Accuracy cms : 0.0
Accuracy slt : 0.9
Accuracy bdl : 0.9
Accuracy rms : 1.0
"""
autocorr_pitch, cepstrum_pitch, f1_list, f2_list = analyse(path)
f1 = np.mean(f1_list)
print("Estimation du pitch avec la méthode autocorr : " + str(autocorr_pitch))
print("Estimation du pitch avec la méthode cepstrum : " + str(cepstrum_pitch))
print("Estimation du formant 1 : " + str(f1))
if (autocorr_pitch < 150):
if (cepstrum_pitch < 170):
if (f1 < 410):
print("C'est un homme")
return "man"
if (autocorr_pitch > 170):
if(cepstrum_pitch > 210):
if(f1 > 370):
print("C'est une femme")
return "woman"
def system_02(path):
"""
Rule-based system which aims to improve system_01 perf. Use weight to determine the output.
It uses autocorrelation pich estimation, cepstrum pitch estimation and formant 1.
The two pitch have each 0.4 weight in the process of decision where formant 1 has only 0.2
If man probability or woman probability has more thant 0.5, then the system can determine an output.
====Results====
Accuracy global : 1.0
Accuracy cms : 1.0
Accuracy slt : 1.0
Accuracy bdl : 1.0
Accuracy rms : 1.0
"""
autocorr_pitch, cepstrum_pitch, f1_list, f2_list = analyse(path)
f1 = np.mean(f1_list)
print("Estimation du pitch avec la méthode autocorr : " + str(autocorr_pitch))
print("Estimation du pitch avec la méthode cepstrum : " + str(cepstrum_pitch))
print("Estimation du formant 1 : " + str(f1))
autocorr_pitch_weight = 0.4
cepstrum_pitch_weight = 0.4
f1_weight = 0.2
man_prob = 0
woman_prob = 0
if (autocorr_pitch < 150):
man_prob += autocorr_pitch_weight
if (cepstrum_pitch < 170):
man_prob += cepstrum_pitch_weight
if (f1 < 410):
man_prob += f1_weight
if (autocorr_pitch > 170):
woman_prob += autocorr_pitch_weight
if (cepstrum_pitch > 210):
woman_prob += cepstrum_pitch_weight
if (f1 > 370):
woman_prob += f1_weight
if(man_prob > 0.5 and woman_prob > 0.5):
print("unknown")
elif(man_prob > 0.5 and woman_prob < 0.5):
print("C'est un homme")
print(man_prob)
return "man"
elif(man_prob < 0.5 and woman_prob > 0.5):
print("C'est une femme")
print(woman_prob)
return "woman"
def system_03(path):
"""
Rule-based system which uses Formant 2 features in the process of decision.
====Résultat====
Précision globale : 0.925
Précision cms : 1.0
Précision slt : 0.9
Précision bdl : 0.8
Précision rms : 1.0
"""
autocorr_pitch, cepstrum_pitch, f1_list, f2_list = analyse(path)
f1= | np.mean(f1_list) | numpy.mean |
# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
import os
import itertools
from matplotlib import pyplot as plt
# import logging
def remove_duplicates(values):
output = []
seen = set()
for value in values:
# If value has not been encountered yet,
# ... add it to both list and set.
if value not in seen:
output.append(value)
seen.add(value)
return output
def redundancy_index(data_i, demand, n_x):
# exemplarisch berechnung
# take any element of dict data
# data_example = next(iter(data.values()))
# ID of technologies, not considered for redundancy
l_drop_technology = [4, 8, 9, 101] # 4: Saisonalspeicher, 8: Solarthermie, 9: PV, 101: Wärmeübergabestation,
l_drop_variant = [3] # Oberfflächenwasser als Wärmequelle
# just define separate dataframe
df = data_i
df_demand = pd.DataFrame()
df_demand['demand'] = demand['Wärmeverbrauch'] + demand['Netz-/Speicherverluste']
# energy_demand_total = df_demand['demand'].sum()
# drop elements, which are not considered for redundancy
for m in l_drop_technology:
df = df[df['technology'] != m]
for v in l_drop_variant:
df = df[df['heat source (hp)'] != v]
# get number of heat generators
num = len(df.index)
# add all installed power
# installed_power = df.iloc[:, 3].sum()
# add probability to dataframe
df['prop'] = 0.03
# generate list with tuples for each generation plant
l_gen_i = []
for i in range(num):
l_gen_i += [(1, 0)]
# logging.info('Calculate ALL combinations')
risk_total = 0
risk_total_nn = 0
p_total = 0
risk_s = np.zeros(num + 1)
risks_i = [[]] * (len(l_gen_i) + 1)
d_risks = {}
for n in range(len(l_gen_i) + 1):
d_risks.update({n: []})
if n_x == 'all':
# generate all combinations of on/off
kombis = list(itertools.product(*l_gen_i))
for k in kombis:
# make an arry out of tuple
status = np.array(k)
# calculate remaining power
# 0 := Ausfall;
# 1 := Anlage fällt nicht aus;
power = np.sum(df.loc[:, 'power'].values * status)
df_demand['power'] = power
df_demand['energy_lost'] = df_demand['demand'] - power
# Energie which cannot be delivered
energy_lost = df_demand.loc[df_demand['demand'] > power,
'energy_lost'].sum()
# probabilities of events of each generation plant
prop = 1 - np.absolute(1 - status - df['prop'])
prop_total = np.prod(prop)
risk_i = prop_total*energy_lost
risk_total += risk_i
p_total += prop_total
risk_s[num - sum(k)] += risk_i
d_risks[num - sum(k)].append(risk_i)
else:
for i in range(n_x):
print(i)
# logging.info('Start Calculate next kombis_i')
# variant A
# kombis_i = [x for x in kombis if np.sum(x) == num - i]
# # variant B
# a = np.ones(num)
# a[:i] = 0
# kombis_i = list(itertools.permutations(a))
# kombis_i = remove_duplicates(kombis_i)
# variant C
ind = np.arange(num)
kombi_ind = list(itertools.combinations(ind, i))
kombis_i = []
for k in kombi_ind:
a = np.ones(num)
for l in k:
a[l] = 0
kombis_i.append(a)
# logging.info('Finish calculation kombis_i')
for k in kombis_i:
# make an arry out of tuple
status = np.array(k)
# calculate remaining power
# 0 := Ausfall;
# 1 := Anlage fällt nicht aus;
power = np.sum(df.loc[:, 'power'].values * status)
df_demand['power'] = power
df_demand['energy_lost'] = df_demand['demand'] - power
# Energie which cannot be delivered
energy_lost = df_demand.loc[df_demand['demand'] > power,
'energy_lost'].sum()
# probabilities of events of each generation plant
prop = 1 - np.absolute(1 - status - df['prop'])
prop_total = | np.prod(prop) | numpy.prod |
import pandas as pd
import numpy as np
from datetime import timedelta
import geopandas as gpd
import matplotlib.pyplot as plt
import matplotlib.gridspec as grid_spec
from scipy import stats
from .geolocations import *
COLORS = {"light_orange":"#E69F00",
"light_blue":"#56B4E9",
"teal":"#009E73",
"yellow":"#F0E442",
"dark_blue":"#0072B2",
"dark_orange":"#D55E00",
"pink":"#CC79A7",
"purple":"#9370DB",
"black":"#000000",
"silver":"#DCDCDC"}
COLOR_MAP = {
0:"#e7e1ef",
1:"#c994c7",
2:"#dd1c77"
}
START_DATE = pd.to_datetime("2021-04-19")
US_POPULATION = 329500000
def load_national_trend_data():
""" Returns dataframe of national vaccine trends.
"""
df = pd.read_csv("../data/national_level_vaccination_trends.csv")
df = df[df["Date Type"] == "Admin"]
df["Date"] = [pd.to_datetime(d) for d in df["Date"]]
df.set_index("Date", drop = True, inplace = True)
return df
def load_county_trend_data(county, state, download_data= False):
""" Returns dataframe of county level vaccine trends.
Inputs:
county - (str) Capitalized county name "<Name> County"
state - (str) Upper-case two letter state abbreviation.
download_data: (bool) Download full dataset if True
Returns:
Returns county and state level vaccination data for
county, state, if download_data = True then it is
downloaded directly from the CDC website:
https://data.cdc.gov/Vaccinations/COVID-19-Vaccinations-
in-the-United-States-County/8xkx-amqh.
"""
c = county.lower()
s = state.lower()
if download_data == True:
# Caution: this will take a long time.
df = pd.read_csv(
"https://data.cdc.gov/api/views/8xkx-amqh/rows.csv?accessType=DOWNLOAD")
df = df[(df["Recip_County"] == "{} County".format(county.capitalize())
) & (df["Recip_State"] == state.upper())]
else:
df = pd.read_csv("../data/{}_county_{}_vaccination_trends.csv".format(
c,s), index_col = 0)
df["Date"] = [pd.to_datetime(d) for d in df["Date"]]
df.sort_values(by = "Date", inplace = True)
df.set_index("Date", drop = True, inplace = True)
return df
def national_complete_pct(df):
""" Returns timeseries of percentage completely vaccinated.
Input:
df - (dataframe) national vaccination trends.
Output:
Dataframe of national percentage fully vaccinated by date.
"""
complete = 100 * df['People Fully Vaccinated Cumulative']/US_POPULATION
return complete.loc[START_DATE:]
def national_one_dose_pct(df):
""" Returns timeseries of percentage with one dose.
Input:
df - (dataframe) national vaccination trends.
Output:
Dataframe of national percentage with one dose by date.
"""
one_dose = 100 * df[
'People Receiving 1 or More Doses Cumulative']/US_POPULATION
return one_dose.loc[START_DATE:]
def national_expected_complete_pct(df):
""" Returns timeseries of expected percentage completely vaccinated.
Input:
df - (dataframe) national vaccination trends.
Output:
Dataframe of national percentage expected complete by date.
"""
one_dose = 100 * df['People Receiving 1 or More Doses Cumulative'
]/US_POPULATION
expected = one_dose.loc[START_DATE - timedelta(days = 42
):one_dose.index[-1] - timedelta(days = 42)]
expected = pd.Series(expected.values, index = pd.date_range(
START_DATE, one_dose.index[-1]))
return expected
def county_complete_pct(df):
""" Returns timeseries of percentage completely vaccinated.
Input:
df - (dataframe) county level vaccination trends.
Output:
Dataframe of county level percentage complete by date.
"""
return df["Series_Complete_Pop_Pct"].loc[START_DATE:,]
def county_one_dose_pct(df):
""" Returns timeseries of percentage with one dose.
Input:
df - (dataframe) county level vaccination trends.
Output:
Dataframe of county level percentage with one dose by date.
"""
return df['Administered_Dose1_Pop_Pct'].loc[START_DATE:,]
def county_expected_complete_pct(df):
""" Returns timeseries of percentage expected completely vaccinated.
Input:
df - (dataframe) county level vaccination trends.
Output:
Dataframe of county level percentage expected complete by date.
"""
one_dose = df['Administered_Dose1_Pop_Pct']
expected = one_dose.loc[START_DATE - timedelta(days = 42
):one_dose.index[-1] - timedelta(days = 42)]
expected = pd.Series(expected.values, index = pd.date_range(
START_DATE, one_dose.index[-1]))
return expected
def vaccine_trends_plot(county = None,
state = None,
show_us_current = False,
download_data = False):
""" Returns line plot of county vaccine trends.
Inputs:
county - (str) Capitalized county name or None for national
level data.
state - (str) Upper-case two letter state abbreviation or None
for national level data.
show_us_current - (bool) set to False to hide vertical line
at current us vaccination rate.
download_data - (bool) set to True to download data directly
from CDC webiste. Warning: this is slow.
Returns:
Line plot of percentage complete, one-dose, and expected complete
over time with optional vertical line at national current level.
"""
df = load_national_trend_data()
complete = national_complete_pct(df)
one_dose = national_one_dose_pct(df)
expected = national_expected_complete_pct(df)
fig, ax = plt.subplots(figsize = (8,5))
# Add horizontal line at current completely vaccinated.
if show_us_current == True:
y = df.index[-1].year
m = f"{df.index[-1].month:02}"
d = f"{df.index[-1].day:02}"
ax.plot((one_dose.index[0], one_dose.index[-1]),
(complete.iloc[-1],complete.iloc[-1]),
color = "k",
linewidth = 1,
linestyle = "--",
zorder = 0,
label = "US Complete Vaccination Rate {}-{}-{}".format(y,m,d))
ax.annotate("{}%".format(np.around(complete.iloc[-1], decimals = 1)),
(one_dose.index[0], complete.iloc[-1]+1))
ax.set_title("US National Vaccination Rates", fontsize = 15)
# Load county data.
if county:
if state:
c = county.lower().split(" county")[0]
s = state.upper()
df = load_county_trend_data(county = c, state = s,
download_data = download_data)
complete = county_complete_pct(df)
one_dose = county_one_dose_pct(df)
expected = county_expected_complete_pct(df)
ax.set_title("Vaccination Rates in {} County, {}".format(
c.capitalize(), s), fontsize = 15)
else:
raise ValueError("A two-letter state abbreviation must be given.")
# Plot trends
ax.plot(one_dose,
color = COLORS["dark_blue"],
linewidth = 3,
label = "One Dose")
ax.plot(complete,
color = COLORS["light_orange"],
linewidth = 3,
label = "Completely Vaccinated")
ax.plot(expected,
color = "gray",
linestyle = "dotted",
linewidth = 3,
label = "Expected Completely Vaccinated",
zorder = 0)
ax.set_xlabel("Date", fontsize = 12)
ax.set_yticks([0,20,40,60,80])
ax.set_ylim(0,90)
ax.set_yticklabels(["{}%".format(20*i) for i in range(5)])
ax.set_ylabel("Percentage", fontsize = 12)
ax.legend(loc = "lower right", prop = {"size":12})
plt.show()
return None
def relative_vaccine_trends_plot(county = None,
state = None,
download_data = False):
""" Returns bar chart of percentage +/- expected complete.
Inputs:
county - (str) Capitalized county name or None for national
level data.
state - (str) Upper-case two letter state abbreviation or None
for national level data.
download_data - (bool) set to True to download data directly
from CDC webiste. Warning: this is slow.
Returns:
Bar chart showing percentage points above of below the
expected vaccine rate as a function of time.
"""
df = load_national_trend_data()
complete = national_complete_pct(df)
expected = national_expected_complete_pct(df)
fig, ax = plt.subplots(figsize = (8,5))
ax.set_title("Relative US National Vaccination Rates", fontsize = 15)
if county:
if state:
c = county.lower().split(" county")[0]
s = state.upper()
df = load_county_trend_data(county = c, state = s,
download_data = download_data)
complete = county_complete_pct(df)
one_dose = county_one_dose_pct(df)
expected = county_expected_complete_pct(df)
ax.set_title("Relative Vaccination Rates in {} County, {}".format(
c.capitalize(), s), fontsize = 15)
else:
raise ValueError("A two-letter state abbreviation must be given.")
# Compute difference between expected and actual.
diff = complete - expected
diff_weekly = pd.DataFrame(index = pd.date_range(diff.index[0],
diff.index[-1], freq = "W"),
columns = ["mean"])
for i in range(diff.shape[0]-1):
start = diff.index[i]
end = diff.index[i+1]
diff_weekly.loc[start,'mean'] = diff.loc[start:end].mean()
color = [COLORS["teal"] if t >= 0 else COLORS["pink"] for t in
diff_weekly["mean"]]
# Plot trends.
ax.bar(x = diff_weekly.index, width = 1,
height = diff_weekly["mean"],
color = color)
# Add empty plot to generate legend.
ax.plot([],[],
color = COLORS["teal"],
label= "More people than expected are completely vaccinated",
linewidth = 3)
ax.plot([],[],
color = COLORS["pink"],
label= "Fewer people than expected are completely vaccinated",
linewidth = 3)
ax.set_ylabel("Percentage Points", fontsize = 12)
ax.set_xlabel("Date", fontsize = 12)
ax.set_ylim(-12,12)
ax.legend(loc = "lower left", prop = {"size":12})
plt.show()
return None
def plot_triangulated_county(geo_df, bounding_box = None, restricted = False, aspect_ratio = 1):
""" Plots county with triangular regions.
Inputs:
geo_df: (dataframe) geographic datatframe including county geometry.
bounding_box: (list) list of 4 vertices determining a bounding box
where agents are to be added. If no box is given, then the
bounding box is taken as the envelope of the county.
restricted: (bool) if True then region is restrict to bounding box.
aspect_ratio: (float) aspect ratio of final plot.
Returns:
Boundary of county and triangulation of region.
"""
tri_dict = make_triangulation(geo_df)
tri_df = gpd.GeoDataFrame({"geometry":[Polygon(t) for t in tri_dict["geometry"]["coordinates"]]})
# Establish initial CRS
tri_df.crs = "EPSG:3857"
# Set CRS to lat/lon
tri_df = tri_df.to_crs(epsg=4326)
fig, ax = plt.subplots(figsize = (10,10))
linewidth = 1
# Get bounding box geometry.
if bounding_box is not None:
sq_df = gpd.GeoDataFrame({"geometry":[Polygon(bounding_box)]})
# Get bounded triangles.
if restricted == True:
inset = [i for i in tri_df.index if tri_df.loc[i,"geometry"].within(sq_df.loc[0,"geometry"])]
tri_df = tri_df.loc[inset,:].copy()
# Set plot limits.
minx = np.array(bounding_box)[:,0].min()
miny = np.array(bounding_box)[:,1].min()
maxx = | np.array(bounding_box) | numpy.array |
## image processing shit
import math
from typing import Tuple, Union, List
from itertools import combinations, product
import cv2
# import easyocr
import pytesseract
import numpy as np
from PIL import Image
# import face_recognition
from deskew import determine_skew
## initialize reader
# def init_easyocr():
# global reader
# reader = easyocr.Reader(['en'], gpu = False)
sharpen_kernel = np.array([[-1,-1,-1], [-1,9,-1], [-1,-1,-1]])
def read_image(image) -> np.array:
try:
image = Image.open(image).convert('RGB')
image = np.array(image)
image = image[:, :, ::-1].copy() # Convert RGB to BGR
return image
except:
return
def rotate(
image: np.ndarray, angle: float, background: Union[int, Tuple[int, int, int]]
) -> np.ndarray:
old_width, old_height = image.shape[:2]
angle_radian = math.radians(angle)
width = abs(np.sin(angle_radian) * old_height) + abs(np.cos(angle_radian) * old_width)
height = abs(np.sin(angle_radian) * old_width) + abs( | np.cos(angle_radian) | numpy.cos |
# coding=utf-8
# Copyright 2020 The Real-World RL Suite Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for real-world environments."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import operator
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
import realworldrl_suite.environments as rwrl
from realworldrl_suite.environments import realworld_env
NUM_DUMMY = 5
class EnvTest(parameterized.TestCase):
@parameterized.named_parameters(*rwrl.ALL_TASKS)
def testLoadEnv(self, domain_name, task_name):
"""Ensure it is possible to load the environment."""
env = rwrl.load(domain_name=domain_name, task_name=task_name)
env.reset()
self.assertIsNotNone(env)
@parameterized.named_parameters(*rwrl.ALL_TASKS)
def testSafetyConstraintsPresent(self, domain_name, task_name):
"""Ensure observations contain 'constraints' when safety is specified."""
env = rwrl.load(
domain_name=domain_name,
task_name=task_name,
safety_spec={'enable': True})
env.reset()
step = env.step(0)
self.assertIn('constraints', step.observation.keys())
@parameterized.named_parameters(*rwrl.ALL_TASKS)
def testSafetyCoeff(self, domain_name, task_name):
"""Ensure observations contain 'constraints' when safety is specified."""
env = rwrl.load(
domain_name=domain_name,
task_name=task_name,
safety_spec={'enable': True, 'safety_coeff': 0.1})
env.reset()
step = env.step(0)
self.assertIn('constraints', step.observation.keys())
for c in [2, -1]:
with self.assertRaises(ValueError):
env = rwrl.load(
domain_name=domain_name,
task_name=task_name,
safety_spec={'enable': True, 'safety_coeff': c})
@parameterized.named_parameters(*rwrl.ALL_TASKS)
def testSafetyObservationsDisabled(self, domain_name, task_name):
"""Ensure safety observations can be disabled."""
env = rwrl.load(
domain_name=domain_name,
task_name=task_name,
safety_spec={
'enable': True,
'observations': False
})
env.reset()
step = env.step(0)
self.assertNotIn('constraints', step.observation.keys())
@parameterized.named_parameters(*rwrl.ALL_TASKS)
def testDelayActionsNoDelay(self, domain_name, task_name):
"""Ensure there is no action delay if not specified."""
env = rwrl.load(domain_name=domain_name, task_name=task_name)
env.reset()
action_spec = env.action_spec()
# Send zero action and make sure it is immediately executed.
zero_action = np.zeros(shape=action_spec.shape, dtype=action_spec.dtype)
env.step(copy.copy(zero_action))
np.testing.assert_array_equal(env.physics.control(), zero_action)
# Send one action and make sure it is immediately executed.
one_action = np.ones(shape=action_spec.shape, dtype=action_spec.dtype)
if hasattr(action_spec, 'minimum'):
one_action = np.minimum(action_spec.maximum, one_action)
env.step(copy.copy(one_action))
np.testing.assert_array_equal(env.physics.control(), one_action)
@parameterized.named_parameters(*rwrl.ALL_TASKS)
def testDelayActionsDelay(self, domain_name, task_name):
"""Ensure there is action delay as specified."""
actions_delay = np.random.randint(low=1, high=10)
env = rwrl.load(
domain_name=domain_name,
task_name=task_name,
delay_spec={
'enable': True,
'actions': actions_delay
})
env.reset()
action_spec = env.action_spec()
zero_action = np.zeros(shape=action_spec.shape, dtype=action_spec.dtype)
one_action = np.ones(shape=action_spec.shape, dtype=action_spec.dtype)
if hasattr(action_spec, 'minimum'):
one_action = np.minimum(action_spec.maximum, one_action)
# Perfrom first action that fills up the buffer.
env.step(copy.copy(zero_action))
# Send one action and make sure zero action is still executed.
for _ in range(actions_delay):
env.step(copy.copy(one_action))
np.testing.assert_array_equal(env.physics.control(), zero_action)
# Make sure we finally perform the delayed one action.
env.step(copy.copy(zero_action))
np.testing.assert_array_equal(env.physics.control(), one_action)
@parameterized.named_parameters(*rwrl.ALL_TASKS)
def testDelayObservationsNoDelay(self, domain_name, task_name):
"""Ensure there is no observation delay if not specified."""
env = rwrl.load(domain_name=domain_name, task_name=task_name)
env.reset()
action_spec = env.action_spec()
one_action = np.ones(shape=action_spec.shape, dtype=action_spec.dtype)
if hasattr(action_spec, 'minimum'):
one_action = np.minimum(action_spec.maximum, one_action)
obs1 = env._task.get_observation(env._physics)
env.step(copy.copy(one_action))
obs2 = env._task.get_observation(env._physics)
# Make sure subsequent observations are different.
array_equality = []
for key in obs1:
array_equality.append((obs1[key] == obs2[key]).all())
self.assertIn(False, array_equality)
@parameterized.named_parameters(*rwrl.ALL_TASKS)
def testDelayObservationsDelay(self, domain_name, task_name):
"""Ensure there is observation delay as specified."""
observations_delay = np.random.randint(low=1, high=10)
env = rwrl.load(
domain_name=domain_name,
task_name=task_name,
delay_spec={
'enable': True,
'observations': observations_delay
})
obs1 = env.reset()[3]
action_spec = env.action_spec()
one_action = np.ones(shape=action_spec.shape, dtype=action_spec.dtype)
# Make sure subsequent observations are the same (clearing the buffer).
for _ in range(observations_delay):
obs2 = env.step(copy.copy(one_action))[3]
for key in obs1:
np.testing.assert_array_equal(obs1[key], obs2[key])
# Make sure we finally observe a different observation.
obs2 = env.step(copy.copy(one_action))[3]
array_equality = []
for key in obs1:
array_equality.append((obs1[key] == obs2[key]).all())
self.assertIn(False, array_equality)
@parameterized.named_parameters(*rwrl.ALL_TASKS)
def testNoiseGaussianActions(self, domain_name, task_name):
"""Ensure there is an additive Gaussian noise to the action."""
noise = 0.5
env = rwrl.load(
domain_name=domain_name,
task_name=task_name,
noise_spec={'gaussian': {
'enable': True,
'actions': noise
}})
env.reset()
action_spec = env.action_spec()
zero_action = np.zeros(shape=action_spec.shape, dtype=action_spec.dtype)
# Perform zero action.
env.step(copy.copy(zero_action))
# Verify that a non-zero action was actually performed.
np.testing.assert_array_compare(operator.__ne__, env.physics.control(),
zero_action)
@parameterized.named_parameters(*rwrl.ALL_TASKS)
def testAddedDummyObservations(self, domain_name, task_name):
"""Ensure there is an additive Gaussian noise to the observation."""
env = rwrl.load(
domain_name=domain_name,
task_name=task_name,
dimensionality_spec={
'enable': True,
'num_random_state_observations': 5,
})
env.reset()
# Get observation from realworld task.
obs = env._task.get_observation(env._physics)
for i in range(5):
self.assertIn('dummy-{}'.format(i), obs.keys())
for i in range(6, 10):
self.assertNotIn('dummy-{}'.format(i), obs.keys())
@parameterized.named_parameters(*rwrl.ALL_TASKS)
def testAddedDummyObservationsFlattened(self, domain_name, task_name):
"""Ensure there is an additive Gaussian noise to the observation."""
base_env = rwrl.load(
domain_name=domain_name,
task_name=task_name,
safety_spec={'enable': True},
environment_kwargs=dict(flat_observation=True))
base_env.reset()
mod_env = rwrl.load(
domain_name=domain_name,
task_name=task_name,
dimensionality_spec={
'enable': True,
'num_random_state_observations': NUM_DUMMY,
},
safety_spec={'enable': True},
environment_kwargs=dict(flat_observation=True))
mod_env.reset()
# Get observation from realworld task.
base_obs = base_env.step(0)
mod_obs = mod_env.step(0)
self.assertEqual(mod_obs.observation.shape[0],
base_obs.observation.shape[0] + NUM_DUMMY)
@parameterized.named_parameters(*rwrl.ALL_TASKS)
def testNoiseGaussianObservationsFlattening(self, domain_name, task_name):
"""Ensure there is an additive Gaussian noise to the observation."""
noise = 0.5
env = rwrl.load(
domain_name=domain_name,
task_name=task_name,
noise_spec={'gaussian': {
'enable': True,
'observations': noise
}},
environment_kwargs={'flat_observation': True})
env.reset()
env.step(0)
@parameterized.named_parameters(*rwrl.ALL_TASKS)
def testNoiseGaussianObservations(self, domain_name, task_name):
"""Ensure there is an additive Gaussian noise to the observation."""
noise = 0.5
env = rwrl.load(
domain_name=domain_name,
task_name=task_name,
noise_spec={'gaussian': {
'enable': True,
'observations': noise
}})
env.reset()
# Get observation from realworld cartpole.
obs1 = env._task.get_observation(env._physics)
# Get observation from underlying cartpole.
obs2 = collections.OrderedDict()
if domain_name == 'cartpole':
obs2['position'] = env.physics.bounded_position()
obs2['velocity'] = env.physics.velocity()
elif domain_name == 'humanoid':
obs2['joint_angles'] = env.physics.joint_angles()
obs2['head_height'] = env.physics.head_height()
obs2['extremities'] = env.physics.extremities()
obs2['torso_vertical'] = env.physics.torso_vertical_orientation()
obs2['com_velocity'] = env.physics.center_of_mass_velocity()
obs2['velocity'] = env.physics.velocity()
elif domain_name == 'manipulator':
arm_joints = [
'arm_root', 'arm_shoulder', 'arm_elbow', 'arm_wrist', 'finger',
'fingertip', 'thumb', 'thumbtip'
]
obs2['arm_pos'] = env.physics.bounded_joint_pos(arm_joints)
obs2['arm_vel'] = env.physics.joint_vel(arm_joints)
obs2['touch'] = env.physics.touch()
obs2['hand_pos'] = env.physics.body_2d_pose('hand')
obs2['object_pos'] = env.physics.body_2d_pose(env._task._object)
obs2['object_vel'] = env.physics.joint_vel(env._task._object_joints)
obs2['target_pos'] = env.physics.body_2d_pose(env._task._target)
elif domain_name == 'quadruped':
obs2['egocentric_state'] = env.physics.egocentric_state()
obs2['torso_velocity'] = env.physics.torso_velocity()
obs2['torso_upright'] = env.physics.torso_upright()
obs2['imu'] = env.physics.imu()
obs2['force_torque'] = env.physics.force_torque()
elif domain_name == 'walker':
obs2['orientations'] = env.physics.orientations()
obs2['height'] = env.physics.torso_height()
obs2['velocity'] = env.physics.velocity()
else:
raise ValueError('Unknown environment name: %s' % domain_name)
# Verify that the observations are different (noise added).
for key in obs1:
| np.testing.assert_array_compare(operator.__ne__, obs1[key], obs2[key]) | numpy.testing.assert_array_compare |
# -*- coding: utf-8 -*-
"""This module implements the ITKrMM algorithm.
"""
import time
import logging
import functools
import multiprocessing as mp
import numpy as np
import numpy.random as rd
import numpy.linalg as lin
import scipy.sparse as sps
from ..tools.dico_learning import forward_patch_transform,\
inverse_patch_transform, CLS_init
from ..tools import PCA
from ..tools import sec2str
from ..tools import metrics
_logger = logging.getLogger(__name__)
class Dico_Learning_Executer:
"""Class to define execute dictionary learning algorithms.
The following class is a common code for most dictionary learning methods.
It performs the following tasks:
* reshapes the data in patch format,
* performs low-rank component estimation,
* starts the dictionary learning method,
* reshape output data,
* handle CLS initialization to speed-up computation.
Attributes
----------
Y: (m, n) or (m, n, l) numpy array
The input data.
Y_PCA: (m, n) or (m, n, PCA_th) numpy array
The input data in PCA space.
Its value is Y if Y is 2D.
mask: (m, n) numpy array
The acquisition mask.
P: int
The width (or height) of the patch.
K: int
The dictionary dimension. This dictionary is composed of L low-rank
components and K-L non-low-rank components.
L: int
The number of low rank components to learn.
S: int
The code sparsity level.
Nit_lr: int
The number of iterations for the low rank estimation.
Nit: int
The number of iterations.
CLS_init: dico
CLS initialization inofrmation.
verbose: bool
The verbose parameter. Default is True.
mean_std: 2-tuple
Tuple of size 2 which contains the data mean and std.
data: (N, D) numpy array
The Y data in patch format. N (resp. D) is the number of voxels per
patch (resp. patches).
mdata: (N, D) numpy array
The mask in patch format. N (resp. D) is the number of voxels per
patch (resp. patches).
init: (N, L) numpy array
The low-rank estimation initialization in patch format. N is the
number of voxels per patch.
init: (N, K-L) numpy array
The dictionary-learning initialization in patch format. N is the
number of voxels per patch.
PCA_operator: PcaHandler object
The PCA operator.
Note
----
The algorithm can be initialized with CLS as soon as
:code:`CLS_init` is not None. In this case, :code:`CLS_init`
should be a dictionary containing the required :code:`Lambda`
key and eventually the CLS :code:`init` optional argument.
"""
def __init__(self, Y, mask=None,
P=5, K=None, L=1, S=None,
Nit_lr=10, Nit=40,
init_lr=None, init=None, CLS_init=None,
PCA_transform=True, PCA_th='auto',
verbose=True):
"""Dico_Learning_Executer __init__ function.
Arguments
---------
Y: (m, n) or (m, n, l) numpy array
The input data.
mask: optional, None, (m, n) numpy array
The acquisition mask.
Default is None for full sampling.
P: optional, int
The width (or height) of the patch.
Default is 5.
K: optional, int
The dictionary dimension.
Default is 2*P**2-1.
L: optional, int
The number of low rank components to learn.
Default is 1.
S: optional, int
The code sparsity level. Default is P-L.
This should be lower than K-L.
Nit_lr: optional, int
The number of iterations for the low rank estimation.
Default is 10.
Nit: optional, int
The number of iterations. Default is 40.
init_lr: optional, (N, L) numpy array
Initialization for low-rank component. N is the number of voxel in
a patch. Default is random initialization.
init: optional, (N, K-L) numpy array
Initialization for dictionary learning. N is the number of voxel
in a patch. Default is random initialization.
CLS_init: optional, dico
CLS initialization infrmation. See Note for details.
Default is None.
PCA_transform: optional, bool
Enables the PCA transformation if True, otherwise, no PCA
transformation is processed.
Default is True.
PCA_th: optional, int, str
The desired data dimension after dimension reduction.
Possible values are 'auto' for automatic choice, 'max' for maximum
value and an int value for user value.
Default is 'auto'.
verbose: bool
The verbose parameter. Default is True.
Note
----
The algorithm can be initialized with CLS as soon as
:code:`CLS_init` is not None. In this case, :code:`CLS_init`
should be a dictionary containing the required :code:`Lambda`
key and eventually the CLS :code:`init` optional argument.
"""
self.Y = Y
if mask is None:
mask = np.ones(Y.shape[:2])
self.mask = mask
self.P = P
self.K = K if K is not None else 2*P**2-1
self.L = L
self.S = S if S is not None else P-L
self.Nit = Nit
self.Nit_lr = Nit_lr
self.CLS_init = CLS_init
self.verbose = verbose
if CLS_init is not None and Y.ndim != 3:
_logger.warning(
'Dico learning will not be initialized with CLS as input data '
'is not 3D. Random init used.')
if (S > P**2 and Y.ndim == 2) or (
S > P**2*Y.shape[-1] and Y.ndim == 3):
raise ValueError('S input is smaller than the patch size.')
# Perform PCA if Y is 3D
if self.Y.ndim == 3:
PCA_operator = PCA.PcaHandler(
Y, mask, PCA_transform=PCA_transform, PCA_th=PCA_th,
verbose=verbose)
Y_PCA, PCA_th = PCA_operator.Y_PCA, PCA_operator.PCA_th
self.PCA_operator = PCA_operator
if CLS_init is not None and 'init' in CLS_init:
self.CLS_init['init'] = PCA_operator.direct(
self.CLS_init['init'])
else:
Y_PCA = Y.copy()
self.PCA_operator = None
# Normalize and center
Y_m, Y_std = Y_PCA.mean(), Y_PCA.std()
Y_PCA = (Y_PCA - Y_m)/Y_std
if CLS_init is not None and 'init' in CLS_init:
self.CLS_init['init'] = (self.CLS_init['init'] - Y_m)/Y_std
self.mean_std = (Y_m, Y_std)
self.Y_PCA = Y_PCA
# Prepare data
obs_mask = mask if Y.ndim == 2 else np.tile(
mask[:, :, np.newaxis], [1, 1, Y_PCA.shape[2]])
# Observation
self.data = forward_patch_transform(Y_PCA * obs_mask, self.P)
# Mask
self.mdata = forward_patch_transform(obs_mask, self.P)
self.data *= self.mdata
# Initialization
if init_lr is None:
self.init_lr = np.squeeze(rd.randn(self.data.shape[0], self.L))
else:
self.init_lr = init_lr
if init is None:
self.init = rd.randn(self.data.shape[0], self.K - self.L)
else:
self.init = init
def execute(self, method='ITKrMM'):
"""Executes dico learning restoration.
Arguments
---------
method: str
The method to use, which can be 'ITKrMM' or 'wKSVD'.
Default is 'ITKrMM'.
Returns
-------
(m, n) or (m, n, l) numpy array
Restored data.
dict
Aditional informations. See Notes.
Note
----
The output information keys are:
- 'time': Execution time in seconds.
- 'lrc': low rank component.
- 'dico': Estimated dictionary.
- 'E': Evolution of the error.
"""
# Welcome message
if self.verbose:
print("-- {} reconstruction algorithm --".format(method))
start = time.time()
# If CLS init, get init dico and lrc
if self.CLS_init is not None and self.Y.ndim == 3:
if self.verbose:
print('Learning low rank component and init with CLS...')
lrc, dico_init = self.get_CLS_init()
self.init_lr = lrc
self.init = dico_init
else:
# Otherwise, we should estimate the low-rank component.
if self.verbose:
print('Learning low rank component...')
if self.L > 0:
local_init = self.init_lr if self.L > 1 else \
self.init_lr[:, None]
lrc = np.zeros((self.data.shape[0], self.L))
for cnt in range(self.L):
lrc_init = local_init[:, cnt]
if cnt > 0:
lrc_init -= lrc[:, :cnt] @ lrc[:, :cnt].T @ lrc_init
lrc_init /= np.linalg.norm(lrc_init)
lrc[:, cnt] = rec_lratom(
self.data,
self.mdata,
lrc[:, :cnt] if cnt > 0 else None,
self.Nit_lr,
lrc_init)
else:
lrc = None
#
# Learn Dictionary
#
if self.verbose:
print('Learning dictionary...'.format(method))
# Remove lrc and ensures othogonality of input dico initialization.
if self.L > 1:
self.init -= lrc @ lrc.T @ self.init
self.init = self.init @ np.diag(1 / lin.norm(self.init, axis=0))
# Call reconstruction algo
params = {
'data': self.data,
'masks': self.mdata,
'K': self.K,
'S': self.S,
'lrc': lrc,
'Nit': self.Nit,
'init': self.init,
'verbose': self.verbose}
if method == 'ITKrMM':
dico_hat, info = itkrmm_core(**params)
elif method == 'wKSVD':
dico_hat, info = wKSVD_core(**params, preserve_DC=True)
else:
raise ValueError(
'Unknown method parameter for Dico_Learning_Executer object')
#
# Reconstruct data
#
Xhat = self.dico_to_data(dico_hat)
# Reshape output dico
p = self.P
shape_dico = (self.K, p, p) if self.Y.ndim == 2 else (
self.K, p, p, self.Y_PCA.shape[-1])
dico = dico_hat.T.reshape(shape_dico)
# Manage output info
dt = time.time() - start
InfoOut = {'dico': dico, 'time': dt}
if self.CLS_init is not None:
dico_CLS = np.hstack((self.init_lr, self.init))
InfoOut['CLS_init'] = dico_CLS.T.reshape(shape_dico)
if self.PCA_operator is not None:
PCA_info = {
'H': self.PCA_operator.H,
'PCA_th': self.PCA_operator.PCA_th,
'Ym': np.squeeze(self.PCA_operator.Ym[0, 0, :])
}
InfoOut['PCA_info'] = PCA_info
if self.verbose:
print(
"Done in {}.\n---".format(sec2str.sec2str(dt)))
return Xhat, InfoOut
def dico_to_data(self, dico):
"""Estimate reconstructed data based on the provided dictionary.
Arguments
---------
dico: (P**2, K) or (P**2*l, K) numpy array
The estimated dictionary.
Returns
-------
(m, n) or (m, n, l) numpy array
The reconstructed data
"""
# Recontruct data from dico and coeffs.
coeffs = OMPm(dico.T, self.data.T, self.S, self.mdata.T)
outpatches = sps.csc_matrix.dot(dico, (coeffs.T).tocsc())
# Transform from patches to data.
Xhat = inverse_patch_transform(outpatches, self.Y_PCA.shape)
Xhat = Xhat * self.mean_std[1] + self.mean_std[0]
if self.Y.ndim == 3:
Xhat = self.PCA_operator.inverse(Xhat)
return Xhat
def get_CLS_init(self):
"""Computes the initialization with CLS.
Returns
-------
(N, L) numpy array
Low-rank component estimation. N is the number of voxels in a
patch.
(N, K-L) numpy array
Dictionary initialization. N is the number of voxels in a patch.
"""
# Get initialization dictionary
D, C, Xhat, InfoOut = CLS_init(
self.Y_PCA,
mask=self.mask,
P=self.P,
K=self.K - self.L,
S=self.S,
PCA_transform=False,
verbose=self.verbose,
**self.CLS_init)
# Get low rank component
CLS_data = forward_patch_transform(Xhat, self.P)
Uec, _, _ = np.linalg.svd(CLS_data)
init_lr = Uec[:, :self.L]
dico_init = D.T
return init_lr, dico_init
def ITKrMM(Y, mask=None,
P=5, K=None, L=1, S=None,
Nit_lr=10, Nit=40,
init_lr=None, init=None, CLS_init=None,
PCA_transform=True, PCA_th='auto',
verbose=True):
"""ITKrMM restoration algorithm.
Arguments
---------
Y: (m, n) or (m, n, l) numpy array
The input data.
mask: optional, None or (m, n) numpy array
The acquisition mask.
Default is None for full sampling.
P: optional, int
The width (or height) of the patch.
Default is 5.
K: optional, int
The dictionary dimension.
Default is 128.
L: optional, int
The number of low rank components to learn.
Default is 1.
S: optional, int
The code sparsity level. Default is 20.
Nit_lr: optional, int
The number of iterations for the low rank estimation.
Default is 10.
Nit: optional, int
The number of iterations. Default is 40.
init: (P**2, K+L) or (P**2*l, K+L) numpy array
Initialization dictionary.
CLS_init: optional, dico
CLS initialization inofrmation. See Notes for details.
Default is None.
xref: optional, (m, n) or (m, n, l) numpy array
Reference image to compute error evolution.
Default is None for input Y data.
verbose: optional, bool
The verbose parameter. Default is True.
PCA_transform: optional, bool
Enables the PCA transformation if True, otherwise, no PCA
transformation is processed.
Default is True.
PCA_th: optional, int, str
The desired data dimension after dimension reduction.
Possible values are 'auto' for automatic choice, 'max' for maximum
value and an int value for user value.
Default is 'auto'.
Returns
-------
(m, n) or (m, n, l) numpy array
Restored data.
dict
Aditional informations. See Notes.
Notes
-----
The algorithm can be initialized with CLS as soon as
:code:`CLS_init` is not None. In this case, :code:`CLS_init`
should be a dictionary containing the required :code:`Lambda`
key and eventually the :code:`init` optional argument.
The output information keys are:
* :code:`time`: Execution time in seconds.
* :code:`lrc`: low rank component.
* :code:`dico`: Estimated dictionary.
* :code:`E`: Evolution of the error.
"""
obj = Dico_Learning_Executer(
Y, mask, P, K, L, S, Nit_lr, Nit, init_lr, init, CLS_init,
PCA_transform, PCA_th, verbose)
return obj.execute(method='ITKrMM')
def wKSVD(Y, mask=None,
P=5, K=None, L=1, S=None,
Nit_lr=10, Nit=40,
init_lr=None, init=None, CLS_init=None,
PCA_transform=True, PCA_th='auto',
verbose=True):
"""wKSVD restoration algorithm.
Arguments
---------
Y: (m, n) or (m, n, l) numpy array
The input data.
mask: optional, None or (m, n) numpy array
The acquisition mask.
Default is None for full sampling.
P: optional, int
The width (or height) of the patch.
Default is 5.
K: optional, int
The dictionary dimension.
Default is 128.
L: optional, int
The number of low rank components to learn.
Default is 1.
S: optional, int
The code sparsity level. Default is 20.
Nit_lr: optional, int
The number of iterations for the low rank estimation.
Default is 10.
Nit: optional, int
The number of iterations. Default is 40.
init: (P**2, K+L) or (P**2*l, K+L) numpy array
Initialization dictionary.
CLS_init: optional, dico
CLS initialization inofrmation. See Notes for details.
Default is None.
xref: optional, (m, n) or (m, n, l) numpy array
Reference image to compute error evolution.
Default is None for input Y data.
verbose: optional, bool
The verbose parameter. Default is True.
PCA_transform: optional, bool
Enables the PCA transformation if True, otherwise, no PCA
transformation is processed.
Default is True.
PCA_th: optional, int, str
The desired data dimension after dimension reduction.
Possible values are 'auto' for automatic choice, 'max' for maximum
value and an int value for user value.
Default is 'auto'.
Returns
-------
(m, n) or (m, n, l) numpy array
Restored data.
dict
Aditional informations. See Notes.
Notes
-----
The algorithm can be initialized with CLS as soon as
:code:`CLS_init` is not None. In this case, :code:`CLS_init`
should be a dictionary containing the required :code:`Lambda`
key and eventually the :code:`init` optional argument.
The output information keys are:
* :code:`time`: Execution time in seconds.
* :code:`lrc`: low rank component.
* :code:`dico`: Estimated dictionary.
* :code:`E`: Evolution of the error.
"""
obj = Dico_Learning_Executer(
Y, mask, P, K, L, S, Nit_lr, Nit, init_lr, init, CLS_init,
PCA_transform, PCA_th, verbose)
return obj.execute(method='wKSVD')
def rec_lratom(data, masks=None, lrc=None, Nit=10, inatom=None, verbose=True):
"""Recover new low rank atom equivalent to itkrmm with K = S = 1.
Arguments
---------
data: (d, N) numpy array
The (corrupted) training signals as its columns.
masks: (d, N) numpy array
Mask data as its columns.
masks(.,.) in {0,1}.
Default is masks = 1.
lrc: (d, n) numpy array
Orthobasis for already recovered low rank component.
Default is None.
Nit: int
Number of iterations.
Default is 10.
inatom: (d, ) numpy array
Initialisation that should be normalized.
Default is None for random.
verbose: bool
If verbose is True, information is sent to the output.
Default is True.
Returns
-------
atom: (d, ) numpy array
Estimated low rank component.
"""
d, N = data.shape
if masks is None:
masks = np.ones((d, N))
data = data*masks # Safeguard
# Create random initial point if needed or check input initialization is
# normalized.
if inatom is None:
inatom = np.random.randn(d)
inatom = inatom/np.linalg.norm(inatom)
#
if lrc is not None:
# If lrc has 1 dimension, one should add a dimension to have correct
# L.
if lrc.ndim == 1:
lrc = lrc[:, np.newaxis]
L = lrc.shape[1]
# Remove low rank component from initial atom and re-normalize.
inatom = inatom - lrc @ lrc.T @ inatom
inatom = inatom/np.linalg.norm(inatom)
# Project data into orthogonal of lrc
# start = time.time()
for n in range(N):
lrcMn = lrc * np.tile(masks[:, n][:, np.newaxis], [1, L])
data[:, n] -= lrcMn @ np.linalg.pinv(lrcMn) @ data[:, n]
# if verbose:
# print('Elapsed time: {}'.format(
# sec2str.sec2str(time.time()-start)))
#
# Start estimation
atom_k = inatom
for it in range(Nit):
ip = atom_k.T.dot(data)
maskw = np.sum(masks, 1)
if lrc is None:
atom_kp1 = data @ np.sign(ip).T
else:
atom_kp1 = np.zeros(atom_k.shape)
for n in range(N):
# The masked basis of the current low-rank space.
lrcplus = np.concatenate(
(lrc, atom_k[:, np.newaxis]),
axis=1) * np.tile(masks[:, n][:, np.newaxis], [1, L+1])
# The data is projected into the orthogonal space of lrcplus.
resn = data[:, n] - \
lrcplus @ np.linalg.pinv(lrcplus) @ data[:, n]
# The masked current estimated lrc.
atom_k_mm = atom_k * masks[:, n]
# Calculate incremented atom_kp1.
atom_kp1 += \
np.sign(ip[n]) * resn + \
np.abs(ip[n])*atom_k_mm/np.sum(atom_k_mm**2)
# Normalize with mask score.
if maskw.min() > 0:
atom_kp1 /= maskw
else:
atom_kp1 /= (maskw + 1e-2)
# Remove previous low rank components from current estimate.
if lrc is not None:
atom_kp1 -= lrc @ lrc.T @ atom_kp1
# Re-normalize current estimation
atom_kp1 /= np.linalg.norm(atom_kp1)
# Update
atom_k = atom_kp1
return atom_k
def OMPm(D, X, S, Masks=None):
r"""Masked OMP.
This is a modified version of OMP to account for corruptions in the signal.
Consider some input data :math:`\mathbf{X}` (whose shape is (N, P) where N
is the number of signals) which are masked by :math:`\mathbf{M}`. Given an
input dictionary :math:`\mathbf{D}` of shape (K, P), this algorithm returns
the optimal sparse :math:`\hat{\mathbf{A}}` matrix such that:
.. math::
\gdef \A {\mathbf{A}}
\gdef \M {\mathbf{M}}
\gdef \X {\mathbf{X}}
\gdef \D {\mathbf{D}}
\begin{aligned}
\hat{\A} &= \arg\min_\A \frac{1}{2}||\M\X - \M(\A\D)||_F^2\\
&s.t. \max_k||\A_{k,:}||_{0} \leq S
\end{aligned}
A slightly different modification of Masked OMP is available in "Sparse
and Redundant Representations: From Theory to Applications in Signal and
Image Processing," the book written by <NAME> in 2010.
Arguments
---------
D: (K, P) numpy array
The dictionary.
Its rows MUST be normalized, i.e. their norm must be 1.
X: (N, P) numpy array
The masked signals to represent.
S: int
The max. number of coefficients for each signal.
Masks: optional, (N, P) numpy array or None
The sampling masks that should be 1 if sampled and 0 otherwise.
Default is None for full sampling.
Returns
-------
(N, K) sparse coo_matrix array
sparse coefficient matrix.
"""
# Get some dimensions
N = X.shape[0] # # of pixels in atoms
P = X.shape[1] # # of signals
K = D.shape[0] # # of atoms
if Masks is None:
Masks = np.ones((N, P))
# Prepare the tables that will be used to create output sparse matrix.
iTab = np.zeros(N*S)
jTab = np.zeros(N*S)
dataTab = np.zeros(N*S)
Ncomp = 0 # Count the number of nnz elements for output.
for k in range(N):
# Local mask and signal # k
x = X[k, :]
m = Masks[k, :]
xm = x*m # Masked data
# Masked atoms
Dm = D * np.tile(m[np.newaxis, :], [K, 1])
# Normalization of available masked atoms
scale = np.linalg.norm(Dm, axis=1)
nz = np.flatnonzero(scale > 1e-3 / np.sqrt(N))
scale[nz] = 1/scale[nz]
# Initialize residuals
residual = xm
# Initialize the sequence of atom indexes
indx = np.zeros(S, dtype=int)
for j in range(S):
# Projection of the residual into dico
proj = scale * (Dm @ residual)
# Search max scalar product
indx[j] = np.argmax(np.abs(proj))
# Update residual
a = np.linalg.pinv(Dm[indx[:j+1], :].T) @ xm
residual = xm - Dm[indx[:j+1], :].T @ a
# In case of small residual, break
if np.linalg.norm(residual)**2 < 1e-6:
break
iTab[Ncomp:Ncomp+j+1] = k * np.ones(j+1)
jTab[Ncomp:Ncomp+j+1] = indx[:j+1]
dataTab[Ncomp:Ncomp+j+1] = a
Ncomp += j+1
# Build sparse output as scipy.sparse.coo_matrix
return sps.coo_matrix((dataTab, (iTab, jTab)), shape=(N, K))
def _itkrmm_multi(n, lrc, data, masks, L):
"""
"""
lrcMn = lrc * np.tile(masks[:, n][:, np.newaxis], [1, L])
return lrcMn @ np.linalg.pinv(lrcMn) @ data[:, n]
def itkrmm_core(
data, masks=None, K=None, S=1, lrc=None, Nit=50, init=None,
verbose=True, parent=None):
"""Iterative Thresholding and K residual Means masked.
Arguments
---------
data: (d, N) numpy array
The (corrupted) training signals as its columns.
masks: optional, None, (d, N) numpy array
The masks as its columns.
masks(.,.) in {0,1}.
Default is None for full sampling.
K: optional, None or int
Dictionary size.
Default is None for d.
S: optional, int
Desired or estimated sparsity level of the signals.
Default is 1.
lrc: optional, None or (d, L) numpy array
Orthobasis for low rank component. Default is None.
Nit: optional, int
Number of iterations.
Default is 50.
init: optional, None or (d, K-L) numpy array
Initialisation, unit norm column matrix.
Here, L is the number of low rank components.
Default is None for random.
verbose: optional, optional, bool
The verbose parameter.
Default is True.
parent: optional, None or Dico_Learning_Executer object
The Dico_Learning_Executer object that called this function.
If this is not None, the SNR between initial true data (given
throught the `xref`argument of Dico_Learning_Executer) and the
currently reconstructed data will be computed for each
iteration. As this means one more OMPm per iteration, this is
quite longer.
Default is None for faster code and non-SNR output.
Returns
-------
(d, K) numpy array
Estimated dictionary
dictionary
Output information. See Note.
Note
----
The output dictionary contains the following keys.
* `time` (float): Execution time in seconds.
* 'SNR' (None, (Nit, ) array): Evolution of the SNR across the
iterations in case `parent`is not None.
"""
# d is patch size, N is # of patches.
d, N = data.shape
if masks is None:
masks = np.ones(data.shape)
data = data*masks # safeguard
if K is None:
K = data.shape[0]
if lrc is not None:
L = 1 if lrc.ndim == 1 else lrc.shape[1]
K = K - L
if N < K-1:
_logger.warning(
'Less training signals than atoms: trivial solution is data.')
return data, None
if init is not None and not np.array_equal(init.shape, np.array([d, K])):
_logger.warning(
'Initialisation does not match dictionary shape. '
'Random initialisation used.')
init = None
if init is None:
init = np.random.randn(d, K)
# Normalization of the columns
init = init.dot(np.diag(1/lin.norm(init, axis=0)))
# if xref is None:
# xref = data
# Start algorithm --------------
#
start_0 = time.time()
if lrc is not None:
if lrc.ndim == 1:
lrc = lrc[:, np.newaxis]
L = lrc.shape[1]
# Remove lrc from init and normalize columns.
init = init - lrc @ lrc.T @ init
init = init.dot(np.diag(1/lin.norm(init, axis=0)))
# Remove lrc from data
# start = time.time()
pool = mp.Pool(processes=mp.cpu_count())
f = functools.partial(
_itkrmm_multi, lrc=lrc, data=data, masks=masks, L=L)
res = pool.map(f, range(N))
data -= np.asarray(res).T
# if verbose:
# print('elapsed time: {}'.format(
# sec2str.sec2str(time.time()-start)))
# Learn dictionary --------------
#
dico_k = init
time_step = 0
if parent is not None:
SNR = np.zeros(Nit)
for it in range(Nit):
# Print information
if verbose:
if it == 0:
print('Iteration #{} over {}.'.format(it, Nit))
else:
print(
'Iteration #{} over {}'.format(it, Nit),
' (estimated remaining time: ',
'{}).'.format(
sec2str.sec2str(
time_step*(Nit-it+1))) +
'SNR: {:.2f}.'.format(SNR[it-1])
if parent is not None else '')
start = time.time()
# Learn dictionary
#
# Init.
dico_kp1, maskw = np.zeros((d, K)), np.zeros((d, K))
for n in range(N): # N
# Get support of mask for patch #n.
supp = np.flatnonzero(masks[:, n])
if supp.size == 0:
continue
#
# Thresholding
# Project data into dico to get code.
# The dictionary is normalized with the norm of masked dico.
dico_k_norm = lin.norm(dico_k[supp, :], axis=0)
ipn = dico_k.T @ data[:, n] / dico_k_norm
# Find support Int.
absipn = np.abs(ipn)
signipn = np.sign(ipn)
In = np.argsort(absipn, axis=0)[::-1]
Int = In[:S]
#
# Dico learning
# Renormalised corrupted dico on support.
masks_t = np.tile(masks[:, n], [S, 1]).T
dInm = (dico_k[:, Int] * masks_t) @ np.diag(
1/dico_k_norm[Int])
# Construct residuals
if lrc is not None:
dico_LMn = lrc * np.tile(masks[:, n], [L, 1]).T
dILnm = np.concatenate((dico_LMn, dInm), axis=1)
resn = np.real(
data[:, n] - np.linalg.pinv(dILnm).T @
np.concatenate((np.zeros(L), ipn[Int]), axis=0)
)
else:
resn = np.real(data[:, n] - np.linalg.pinv(dInm).T @ ipn[Int])
# Update new dictionary and maskweight
dico_kp1[:, Int] += \
resn[:, np.newaxis] @ signipn[np.newaxis, Int] +\
dInm @ np.diag(absipn[Int])
maskw[:, Int] += np.tile(masks[:, n], [S, 1]).T
if maskw.min() > 0:
dico_kp1 = N * dico_kp1 / maskw
else:
dico_kp1 = N * dico_kp1 / (maskw + 1e-3)
if lrc is not None:
dico_kp1 = dico_kp1 - lrc @ lrc.T @ dico_kp1
# Compute the dico norm.
scale = lin.norm(dico_kp1, axis=0)
# Redraw atoms that are not used
Iz = np.flatnonzero(scale**2 < 1e-5)
dico_kp1[:, Iz] = rd.randn(d, Iz.size)
scale = lin.norm(dico_kp1, axis=0)
# Normalize
dico_kp1 = dico_kp1 @ np.diag(1/scale)
# Update
dico_k = dico_kp1
# Compute error
if parent is not None:
dico_hat = np.concatenate((lrc, dico_k), axis=1)
xhat = parent.invert_function(dico_hat)
# Compute error
SNR[it] = metrics.SNR(xhat=xhat, xref=parent.xref)
time_step = time.time() - start
dico_hat = np.concatenate((lrc, dico_k), axis=1)
out_info = {'time': time.time() - start_0}
if parent is not None:
out_info['SNR'] = SNR
return dico_hat, out_info
def improve_atom(data, masks, dico, coeffs, j):
"""This function performs dictionary update for atom #j.
In case the j'th atom is not used, a new atom is chosen among the data
and the third output is set to True (False otherwise).
Arguments
---------
data: (d, N) numpy array
The (corrupted) training signals as its columns.
masks: (d, N) numpy array
The masks as its columns.
dico: (d, K) numpy array
Initialisation, unit norm column matrix.
coeffs: (K, N) numpy array
The sparse codding.
j: int
The atom indice to update.
Returns
-------
(d, ) numpy array
The updated atom.
(K, N) numpy array
The updated atoms
redrawn: int
1 if a new atom has been generated, 0 therwise.
"""
# All data indices i that uses the j'th dictionary element,
# i.e. s.t. coeffs[j, i] != 0.
nnz = coeffs[j, :].nonzero()[1] # np.flatnonzero(coeffs[j, :])
if nnz.size == 0:
# No data uses the j'th atom.
# In this case, this atom should be replaced.
#
# To replace this atom, the data which is has the greatest
# reconstruction error should be chosen.
error = data - dico @ coeffs
error_norm = np.sum(error**2, axis=0)
pos = np.argmax(error_norm)
best_atom = data[:, pos] # other possibility: error[:,pos]
# Normalization
best_atom = best_atom / np.linalg.norm(best_atom)
if best_atom[0] != 0:
best_atom *= np.sign(best_atom[0])
M = coeffs.shape[1]
coeffs[j, :] = sps.coo_matrix((1, M), dtype=np.float64)
redrawn = 1
else:
redrawn = 0
tmp_coeffs = coeffs[:, nnz]
# The coefficients of the element we now improve are not relevant.
tmp_coeffs[j, :] = 0
# Vector of errors that we want to minimize with the new element.
errors = data[:, nnz] - dico*tmp_coeffs
#
# wKSVD update:
# min || beta.*(errors - atom*coeff) ||_F^2 for beta = mask
#
Nit = 10 # The paper suggests 10-20 but 10 is fine and faster.
best_atom = np.zeros((dico.shape[0], 1))
coeff_new = np.zeros((1, nnz.size))
for i in range(Nit):
NewF = \
masks[:, nnz]*errors + \
(np.ones((masks.shape[0], nnz.size)) -
masks[:, nnz])*(
best_atom.dot(coeff_new))
if nnz.size > 1:
[best_atom, s, beta] = sps.linalg.svds(
sps.coo_matrix(NewF), 1)
else:
s = np.linalg.norm(NewF)
beta = | np.array([[1.0]]) | numpy.array |
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import patches
####################
# 8 custom methods #
####################
def plot_custom_bar_chart_with_error(input_data,
input_names=None,
fig_tag=1,
input_fig_size=(9, 7),
titles=('bar plot', 'field'),
window_title_input='bar plot',
color_bar='b',
kind=None,
additional_field=None,
input_parameters=None,
log_scale=False,
add_extra_numbers=None):
fig = plt.figure(fig_tag, figsize=input_fig_size, dpi=100, facecolor='w', edgecolor='k')
fig.subplots_adjust(left=0.04, right=0.98, top=0.92, bottom=0.08)
fig.canvas.set_window_title(window_title_input)
ax_bar = plt.subplot2grid((3, 4), (0, 0), colspan=3, rowspan=3)
if additional_field is not None:
ax_field = plt.subplot2grid((3, 4), (2, 3), colspan=1, rowspan=1)
index = np.arange(len(input_data))
bar_width = 0.35
# bar plot
ax_bar.bar(index, list(input_data), bar_width,
color=color_bar)
ax_bar.set_title(titles[0])
ax_bar.xaxis.grid(True, linestyle='-', which='major', color='lightgrey', alpha=0.5)
ax_bar.yaxis.grid(True, linestyle='-', which='major', color='lightgrey', alpha=0.5)
ax_bar.set_axisbelow(True)
ax_bar.set_xlabel('Methods')
ax_bar.set_ylabel('Error (pixel)')
if log_scale:
ax_bar.set_yscale('log')
ax_bar.set_ylabel('Error - log scale - (pixel)')
ax_bar.set_xlim(0 - bar_width, len(input_data) - bar_width)
if input_names is not None:
ax_bar.set_xticks(index, minor=False)
xtick_names = plt.setp(ax_bar, xticklabels=input_names)
plt.setp(xtick_names, rotation=45, fontsize=12)
ax_bar.grid(True)
# fig.text(0.5, 0.04, 'Methods', ha='center')
# fig.text(0.01, 0.5, 'Errors', va='center', rotation='vertical')
# right side of the figure:
# Quiver
if additional_field is not None:
ax_field.set_title(titles[1])
X, Y = np.meshgrid(np.arange(additional_field.shape[0]), np.arange(additional_field.shape[1]))
ax_field.quiver(Y,
X,
additional_field[:, :, 0, 0, 0],
additional_field[:, :, 0, 0, 1],
color='r', linewidths=0.2, units='xy', angles='xy', scale=1, scale_units='xy')
# Annotate computational time:
if add_extra_numbers is not None:
y_val_a, y_val_b = ax_bar.get_ylim()
for i in range(len(input_data)):
ax_bar.text(i + bar_width/2, 0.85*(y_val_b - y_val_a), str(np.around(add_extra_numbers[i], decimals=9)),
horizontalalignment='center', size='small',
color='k', rotation=90)
# Text on the figure customise this part for the need!
# 6 options 'one_SE2', 'multi_SE2', 'one_GAUSS', 'multi_GAUSS', 'one_REALI', 'multi_REALI'
if kind is not None:
dom = tuple([int(j) for j in input_parameters[:3]])
fig.text(.78, .80, r'Domain = ' + str(dom))
if kind == 'one_SE2':
fig.text(.765, .85, r'SE(2) generated SVF: ')
fig.text(.78, .75, r'$\theta = $ ' + str(input_parameters[3]))
fig.text(.78, .70, r'$t_x = $ ' + str(input_parameters[4]))
fig.text(.78, .65, r'$t_y = $ ' + str(input_parameters[5]))
elif kind == 'one_HOM':
fig.text(.765, .85, r'HOM generated SVF: ')
fig.text(.78, .75, r'center: ' + str(input_parameters[3]))
fig.text(.78, .70, r'kind: ' + str(input_parameters[4]))
fig.text(.78, .65, r'scale_factor: ' + str(input_parameters[5]))
fig.text(.78, .60, r'sigma: ' + str(input_parameters[6]))
fig.text(.78, .55, r'in_psl: ' + str(input_parameters[7]))
elif kind == 'one_GAUSS':
fig.text(.765, .85, r'Gauss generated SVF: ')
fig.text(.78, .75, r'$\sigma_i = $ ' + str(input_parameters[3]))
fig.text(.78, .70, r'$\sigma_g = $ ' + str(input_parameters[4]))
fig.text(.78, .60, r'Ground truth, steps ')
fig.text(.78, .55, str(input_parameters[5]) + ' ' + str(input_parameters[6]))
elif kind == 'one_REAL':
fig.text(.78, .85, r'id element: ' + str(input_parameters[3]))
fig.text(.78, .60, r'Ground truth method ')
fig.text(.78, .55, str(input_parameters[4]))
else:
raise Warning('Kind not recognized.')
fig.set_tight_layout(True)
return fig
def plot_custom_boxplot(input_data,
input_names=None,
fig_tag=1,
input_fig_size=(11, 7.5),
x_axis_label='Methods',
y_axis_label='Error (pixel)',
input_titles=('Error', 'field'),
window_title_input='boxplot plot',
kind=None,
additional_field=None,
input_parameters=None,
log_scale=False,
annotate_mean=True,
add_extra_annotation=None):
"""
:param input_data: list of lists, one for each block!
:param input_names:
:param fig_tag:
:param x_axis_label:
:param y_axis_label:
:param input_fig_size:
:param input_titles:
:param window_title_input:
:param kind:
:param additional_field:
:param input_parameters:
:param log_scale:
:param annotate_mean:
:param add_extra_annotation:
:return:
"""
fig = plt.figure(fig_tag, figsize=input_fig_size, dpi=100, facecolor='w', edgecolor='k')
fig.subplots_adjust(left=0.04, right=0.98, top=0.92, bottom=0.08)
font = {'family': 'serif', 'color': 'darkblue', 'weight': 'normal', 'size': 14}
fig.canvas.set_window_title(window_title_input)
if input_parameters is None:
ax_box = plt.subplot(111)
else:
ax_box = plt.subplot2grid((3, 4), (0, 0), colspan=3, rowspan=3)
if additional_field is not None:
ax_field = plt.subplot2grid((3, 4), (2, 3), colspan=1, rowspan=1)
num_boxes = len(input_data)
index_boxes = np.arange(1, num_boxes+1)
bp = ax_box.boxplot(input_data, notch=False, patch_artist=False, sym='+', vert=1, whis=1.5)
# set the colors:
plt.setp(bp['boxes'], color='blue')
plt.setp(bp['whiskers'], color='blue')
plt.setp(bp['fliers'], color='red', marker='+')
ax_box.set_title(input_titles[0])
ax_box.xaxis.grid(True, linestyle='-', which='major', color='lightgrey', alpha=0.5)
ax_box.yaxis.grid(True, linestyle='-', which='major', color='lightgrey', alpha=0.5)
ax_box.set_axisbelow(True)
ax_box.set_xlabel(x_axis_label, fontdict=font, labelpad=18)
ax_box.set_ylabel(y_axis_label, fontdict=font, labelpad=10)
if log_scale:
ax_box.set_yscale('log')
ax_box.set_ylabel(y_axis_label + ' log-scale')
# ax_box.set_xlim(0 - 0.5, num_boxes + 0.5)
if input_names is not None:
ax_box.set_xticks(index_boxes, minor=False)
xtick_names = plt.setp(ax_box, xticklabels=input_names)
plt.setp(xtick_names, rotation=45, fontsize=12)
#ax_box.grid(True)
# right side of the figure:
# Quiver
if additional_field is not None:
ax_field.set_title(input_titles[1])
xx, yy = np.meshgrid(np.arange(additional_field.shape[0]), np.arange(additional_field.shape[1]))
ax_field.quiver(yy,
xx,
additional_field[:, :, 0, 0, 0],
additional_field[:, :, 0, 0, 1],
color='r', linewidths=0.2, units='xy', angles='xy', scale=1, scale_units='xy')
# Annotate mean
mu = [np.mean(input_data[i]) for i in range(len(input_data))]
colors_num = ['green', 'green', 'green', 'green', 'green']
if annotate_mean:
y_val = ax_box.get_ylim()[1]
for i in range(len(mu)):
ax_box.text(i + 0.775, y_val - y_val * 0.1, str(np.around(mu[i], decimals=9)),
horizontalalignment='center', size='small',
color=colors_num[i % 5], rotation=90)
if add_extra_annotation is not None:
y_val = ax_box.get_ylim()[1]
for i in range(len(add_extra_annotation)):
ax_box.text(i + 1.225, y_val - y_val * 0.1, str(np.around(add_extra_annotation[i], decimals=9)),
horizontalalignment='center', size='small',
color='k', rotation=90)
# Text on the figure customise this part for the need!
# 6 options 'one_SE2', 'multi_SE2', 'one_GAUSS', 'multi_GAUSS', 'one_REALI', 'multi_REALI'
if kind is not None and input_parameters is not None:
dom = tuple([int(j) for j in input_parameters[:3]])
fig.text(.78, .80, r'Domain = ' + str(dom))
if kind == 'multiple_SE2':
fig.text(.765, .85, r'SE(2) generated SVF: ')
fig.text(.78, .75, r'number of samples: ' + str(int(input_parameters[3])))
fig.text(.78, .70, str(np.round(input_parameters[4], 3)) +
r'$ \leq \theta \leq $ ' +
str(np.round(input_parameters[5], 3)))
fig.text(.78, .65, str(np.round(input_parameters[3], 3)) +
r'$ \leq t_x \leq $ ' +
str(np.round(input_parameters[7], 3)))
fig.text(.78, .60, str(np.round(input_parameters[5], 3)) +
r'$ \leq t_y \leq $ ' +
str(np.round(input_parameters[9], 3)))
elif kind == 'multiple_HOM':
fig.text(.765, .85, r'HOM generated SVF: ')
fig.text(.78, .75, r'center: ' + str(input_parameters[3]))
fig.text(.78, .70, r'kind: ' + str(input_parameters[4]))
fig.text(.78, .65, r'scale_factor: ' + str(input_parameters[5]))
fig.text(.78, .60, r'sigma: ' + str(input_parameters[6]))
fig.text(.78, .55, r'in_psl: ' + str(input_parameters[7]))
fig.text(.78, .50, r'number of samples: ' + str(int(input_parameters[8])))
elif kind == 'multiple_GAUSS':
fig.text(.765, .85, r'Gauss generated SVF: ')
fig.text(.78, .75, r'number of samples = ' + str(input_parameters[3]))
fig.text(.78, .70, r'$\sigma_i$ = ' + str(input_parameters[4]))
fig.text(.78, .65, r'$\sigma_g$ = ' + str(input_parameters[5]))
fig.text(.78, .60, r'Ground truth, steps: ')
fig.text(.78, .57, str(input_parameters[6]) + ' ' + str(input_parameters[7]))
elif kind == 'multiple_REAL':
fig.text(.765, .85, r'Real Data: ')
fig.text(.78, .70, r'SFVs id string:')
fig.text(.78, .65, str(input_parameters[3]))
fig.text(.78, .60, r'Ground truth method ')
fig.text(.78, .55, str(input_parameters[4]))
else:
raise Warning('Kind not recognized.')
fig.set_tight_layout(True)
return fig
def plot_custom_step_versus_error_single(list_steps,
matrix_of_lines, # errors ordered row-major
label_lines,
fig_tag=2,
input_parameters=None,
additional_field=None,
window_title_input='errors',
titles=('iterations vs. error', 'Field'),
x_axis_label='number of steps',
y_axis_label='Error',
kind=None,
input_fig_size=(9, 7),
input_colors=None,
input_line_style=None,
input_marker=None,
log_scale=False,
additional_vertical_line=None,
legend_location='upper right',
):
assert len(list_steps) == matrix_of_lines.shape[1]
fig = plt.figure(fig_tag, figsize=input_fig_size, dpi=100, facecolor='w', edgecolor='k')
fig.subplots_adjust(left=0.04, right=0.98, top=0.92, bottom=0.08)
font = {'family': 'serif', 'color': 'darkblue', 'weight': 'normal', 'size': 14}
fig.canvas.set_window_title(window_title_input)
ax_graph = plt.subplot2grid((3, 4), (0, 0), colspan=3, rowspan=3)
if additional_field is not None:
ax_svf = plt.subplot2grid((3, 4), (2, 3), colspan=1, rowspan=1)
if input_colors is None:
colors = ['b', 'g', 'r', 'c', 'm', 'y', 'k']
input_colors = [colors[j % len(colors)] for j in range(len(list_steps))]
if input_marker is None:
input_marker = ['.', ] * len(list_steps)
if input_line_style is None:
input_line_style = ['-', ] * len(list_steps)
for j in range(matrix_of_lines.shape[0]):
ax_graph.plot(list_steps, matrix_of_lines[j, :],
color=input_colors[j],
linestyle=input_line_style[j],
marker=input_marker[j],
label=label_lines[j])
ax_graph.set_title(titles[0])
ax_graph.legend(loc=legend_location, shadow=False)
ax_graph.xaxis.grid(True, linestyle='-', which='major', color='lightgrey', alpha=0.5)
ax_graph.yaxis.grid(True, linestyle='-', which='major', color='lightgrey', alpha=0.5)
ax_graph.set_axisbelow(True)
ax_graph.set_xlabel(x_axis_label, fontdict=font, labelpad=18)
ax_graph.set_ylabel(y_axis_label, fontdict=font, labelpad=10)
if log_scale:
ax_graph.set_yscale('log')
ax_graph.set_ylabel(y_axis_label + ' log-scale')
if additional_vertical_line is not None:
# print vertical lines:
xa, xb, ya, yb = list(ax_graph.axis())
ax_graph.plot([additional_vertical_line, additional_vertical_line], [ya, yb], 'k--', lw=0.5, color='0.3')
ax_graph.text(additional_vertical_line + 0.2, (yb - ya)/2., r'automatic = '+str(additional_vertical_line))
# ax_graph.set_xlim(0 - 0.5, num_boxes + 0.5)
# right side of the figure:
# Quiver
if additional_field is not None:
ax_svf.set_title(titles[1])
xx, yy = np.meshgrid(np.arange(additional_field.shape[0]), np.arange(additional_field.shape[1]))
ax_svf.quiver(yy,
xx,
additional_field[:, :, 0, 0, 0],
additional_field[:, :, 0, 0, 1],
color='r', linewidths=0.2, units='xy', angles='xy', scale=1, scale_units='xy')
# Text on the figure customise this part for the need!
# 6 options 'one_SE2', 'multi_SE2', 'one_GAUSS', 'multi_GAUSS', 'one_REALI', 'multi_REALI'
if kind is not None and input_parameters is not None:
dom = tuple([int(j) for j in input_parameters[:3]])
fig.text(.78, .80, r'Domain = ' + str(dom))
if kind == 'one_SE2':
fig.text(.765, .85, r'SE(2) generated SVF: ')
fig.text(.78, .75, r'$\theta = $ ' + str(input_parameters[3]))
fig.text(.78, .70, r'$t_x = $ ' + str(input_parameters[4]))
fig.text(.78, .65, r'$t_y = $ ' + str(input_parameters[5]))
if kind == 'one_HOM':
fig.text(.765, .85, r'HOM generated SVF: ')
fig.text(.78, .75, r'center: ' + str(input_parameters[3]))
fig.text(.78, .70, r'kind: ' + str(input_parameters[4]))
fig.text(.78, .65, r'scale_factor: ' + str(input_parameters[5]))
fig.text(.78, .60, r'sigma: ' + str(input_parameters[6]))
fig.text(.78, .55, r'in_psl: ' + str(input_parameters[7]))
elif kind == 'one_GAUSS':
fig.text(.765, .85, r'Gauss generated SVF: ')
fig.text(.78, .75, r'$\sigma_i = $ ' + str(input_parameters[3]))
fig.text(.78, .70, r'$\sigma_g = $ ' + str(input_parameters[4]))
if len(input_parameters) > 5:
fig.text(.745, .65, r'Ground truth method, steps: ')
fig.text(.78, .60, str(input_parameters[5]) + ' ' + str(input_parameters[6]))
elif kind == 'one_REAL':
fig.text(.765, .85, r'Real data: ')
fig.text(.78, .75, r'id svf:')
fig.text(.78, .70, str(input_parameters[3]))
if len(input_parameters) > 5:
fig.text(.745, .65, r'Ground truth method, steps: ')
fig.text(.78, .60, str(input_parameters[4]) + ' ' + str(input_parameters[5]))
else:
raise Warning('Kind not recognized.')
fig.set_tight_layout(True)
return fig
def plot_custom_step_versus_error_multiple(list_steps,
matrix_of_lines_means, # errors ordered row-major
label_lines,
y_error=None,
fig_tag=2,
input_parameters=None,
additional_field=None,
window_title_input='errors',
titles=('iterations vs. error', 'Field'),
x_axis_label='number of steps',
y_axis_label='Error',
kind=None,
input_fig_size=(9, 7),
input_colors=None,
input_line_style=None,
input_marker=None,
log_scale=False,
additional_vertical_line=None,
legend_location='upper right',
):
fig = plt.figure(fig_tag, figsize=input_fig_size, dpi=100, facecolor='w', edgecolor='k')
fig.subplots_adjust(left=0.04, right=0.98, top=0.92, bottom=0.08)
font = {'family': 'serif', 'color': 'darkblue', 'weight': 'normal', 'size': 14}
fig.canvas.set_window_title(window_title_input)
if input_parameters is None:
ax_graph = plt.subplot(111)
else:
ax_graph = plt.subplot2grid((3, 4), (0, 0), colspan=3, rowspan=3)
if additional_field is not None:
ax_svf = plt.subplot2grid((3, 4), (2, 3), colspan=1, rowspan=1)
if input_colors is None:
colors = ['b', 'g', 'r', 'c', 'm', 'y', 'k']
input_colors = [colors[j % len(colors)] for j in range(len(list_steps))]
if input_marker is None:
input_marker = ['.', ] * len(list_steps)
if input_line_style is None:
input_line_style = ['-', ] * len(list_steps)
for j in range(matrix_of_lines_means.shape[0]):
if y_error is None:
ax_graph.errorbar(list_steps, matrix_of_lines_means[j, :],
color=input_colors[j],
linestyle=input_line_style[j],
marker=input_marker[j],
label=label_lines[j])
else:
if len(y_error) == 2:
ax_graph.errorbar(list_steps, matrix_of_lines_means[j, :],
yerr=[y_error[0][j], y_error[1][j]],
color=input_colors[j],
linestyle=input_line_style[j],
marker=input_marker[j],
label=label_lines[j])
else:
ax_graph.errorbar(list_steps, matrix_of_lines_means[j, :],
yerr=y_error[j],
color=input_colors[j],
linestyle=input_line_style[j],
marker=input_marker[j],
label=label_lines[j])
ax_graph.set_title(titles[0])
ax_graph.legend(loc=legend_location, shadow=False)
ax_graph.xaxis.grid(True, linestyle='-', which='major', color='lightgrey', alpha=0.5)
ax_graph.yaxis.grid(True, linestyle='-', which='major', color='lightgrey', alpha=0.5)
ax_graph.set_axisbelow(True)
ax_graph.set_xlabel(x_axis_label, fontdict=font, labelpad=18)
ax_graph.set_ylabel(y_axis_label, fontdict=font, labelpad=10)
if log_scale:
ax_graph.set_yscale('log', nonposy='mask')
ax_graph.set_ylabel(y_axis_label + ' log-scale')
if additional_vertical_line is not None:
# print vertical lines:
xa, xb, ya, yb = list(ax_graph.axis())
ax_graph.plot([additional_vertical_line, additional_vertical_line], [ya, yb], 'k--', lw=0.5, color='0.3')
ax_graph.text(additional_vertical_line + 0.2, (yb - ya)/2., r'automatic = '+str(additional_vertical_line))
# ax_graph.set_xlim(0 - 0.5, num_boxes + 0.5)
# right side of the figure:
# Quiver
if additional_field is not None and input_parameters is not None:
ax_svf.set_title(titles[1])
xx, yy = np.meshgrid(np.arange(additional_field.shape[0]), np.arange(additional_field.shape[1]))
ax_svf.quiver(yy,
xx,
additional_field[:, :, 0, 0, 0],
additional_field[:, :, 0, 0, 1],
color='r', linewidths=0.2, units='xy', angles='xy', scale=1, scale_units='xy')
# Text on the figure customise this part for the need!
# 6 options 'one_SE2', 'multi_SE2', 'one_GAUSS', 'multi_GAUSS', 'one_REALI', 'multi_REALI'
if kind is not None and input_parameters is not None:
dom = tuple([int(j) for j in input_parameters[:3]])
fig.text(.78, .80, r'Domain = ' + str(dom))
if kind == 'multiple_SE2':
fig.text(.765, .85, r'SE(2) generated SVF: ')
fig.text(.78, .75, r'$N = $ ' + str(int(input_parameters[3])))
fig.text(.78, .70, str(np.round(input_parameters[4], 3)) +
r'$ \leq \theta \leq $ ' +
str(np.round(input_parameters[5], 3)))
fig.text(.78, .65, str( | np.round(input_parameters[3], 3) | numpy.round |
# Watershed Se detection function
# This function is based on code contributed by <NAME>, Arkansas State University.
# For more information see https://github.com/lsx1980/Leaf_count
import cv2
import numpy as np
from scipy import ndimage as ndi
from skimage.feature import peak_local_max
from skimage.morphology import watershed
from . import print_image
from . import plot_image
from . import apply_mask
from . import color_palette
def watershed_segmentation(device, img, mask, distance=10, filename=False, debug=None):
"""Uses the watershed algorithm to detect boundary of objects. Needs a marker file which specifies area which is
object (white), background (grey), unknown area (black).
Inputs:
device = device number. Used to count steps in the pipeline
img = image to perform watershed on needs to be 3D (i.e. np.shape = x,y,z not np.shape = x,y)
mask = binary image, single channel, object in white and background black
distance = min_distance of local maximum
filename = if user wants to output analysis images change filenames from false
debug = None, print, or plot. Print = save to file, Plot = print to screen.
Returns:
device = device number
watershed_header = shape data table headers
watershed_data = shape data table values
analysis_images = list of output images
:param device: int
:param img: numpy array
:param mask: numpy array
:param distance: int
:param filename: str
:param debug: str
:return device: int
:return watershed_header: list
:return watershed_data: list
:return analysis_images: list
"""
dist_transform = cv2.distanceTransform(mask, cv2.cv.CV_DIST_L2, maskSize=0)
localMax = peak_local_max(dist_transform, indices=False, min_distance=distance, labels=mask)
markers = ndi.label(localMax, structure= | np.ones((3, 3)) | numpy.ones |
# Authors: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
# License: BSD (3-clause)
from mne_connectivity.io import read_connectivity
import numpy as np
import pytest
from numpy.testing import (assert_allclose, assert_array_equal,
assert_array_less)
from scipy.signal import hilbert
from mne.utils import catch_logging, use_log_level
from mne_connectivity.envelope import envelope_correlation, symmetric_orth
def _compute_corrs_orig(data):
# This is the version of the code by Sheraz and Denis.
# For this version (epochs, labels, time) must be -> (labels, time, epochs)
n_epochs, n_labels, _ = data.shape
corr = np.zeros((n_labels, n_labels))
for epoch_data in data:
for ii in range(n_labels):
for jj in range(n_labels):
# Get timeseries for each pair
x, y = epoch_data[ii], epoch_data[jj]
x_mag = np.abs(x)
x_conj_scaled = x.conj()
x_conj_scaled /= x_mag
# Calculate orthogonalization
y_orth_x = (y * x_conj_scaled).imag
y_orth_x_mag = np.abs(y_orth_x)
# Estimate correlation
corr[ii, jj] += np.abs(np.corrcoef(x_mag, y_orth_x_mag)[0, 1])
corr = (corr + corr.T) / (2. * n_epochs)
corr.flat[::n_labels + 1] = 0.
return corr
def test_roundtrip_envelope_correlation(tmp_path):
"""Test write/read roundtrip for envelope correlation."""
rng = np.random.RandomState(0)
n_epochs, n_signals, n_times = 1, 4, 64
data = rng.randn(n_epochs, n_signals, n_times)
data_hilbert = hilbert(data, axis=-1)
corr = envelope_correlation(data_hilbert)
tmp_file = tmp_path / 'temp_file.nc'
corr.save(tmp_file)
read_corr = read_connectivity(tmp_file)
assert_array_equal(corr.get_data(), read_corr.get_data())
def test_empty_epochs_correlation():
"""Test empty epochs object results in error."""
rng = | np.random.RandomState(0) | numpy.random.RandomState |
"""
Helper classes and functions for the SCF folder.
References:
- RHF/UHF equations & algorithms from [Szabo:1996]
- DIIS equations & algorithm from [Sherrill:1998], [Pulay:1980:393], & [Pulay:1969:197]
- Orbital rotaion expressions from [Helgaker:2000]
"""
__authors__ = "<NAME>"
__credits__ = ["<NAME>"]
__copyright__ = "(c) 2014-2018, The Psi4NumPy Developers"
__license__ = "BSD-3-Clause"
__date__ = "2017-9-30"
import time
import numpy as np
import psi4
np.set_printoptions(precision=5, linewidth=200, suppress=True)
class helper_HF(object):
"""
A generalized Hartree-Fock helper script.
Notes
-----
Equations and algorithms from [Szabo:1996]
"""
def __init__(self, mol, basis=None, memory=2, ndocc=None, scf_type='DF', guess='CORE'):
"""
Initializes the helper_HF object.
Parameters
----------
mol : psi4.core.Molecule or str
The molecule to be used for the given helper object.
basis : {str, None}, optional
The basis string to be used
memory : {int, 2}, optional
The amount of memory (in GB) to use.
ndocc : {int, None}, optional
The number of occupied orbitals for the HF helper. Defaults to the number of electrons divided by 2.
scf_type : {"DF", "PK", "OUT_OF_CORE"}, optional
The type of JK object to use.
guess : {"CORE", "SAD"}, optional
The initial guess type to attempt.
Returns
------
ret : helper_HF
A initialized helper_HF object
Examples
--------
# Construct the helper object
>>> helper = helper_HF("He\nHe 1 2.0", "cc-pVDZ")
# Take a Roothan-Hall step
>>> F = helper.build_fock()
>>> helper.compute_hf_energy()
-5.4764571474633197
# Take a Roothan-Hall step
>>> e, C = helper.diag(F)
>>> helper.set_Cleft(C)
>>> F = helper.build_fock()
>>> helper.compute_hf_energy()
-5.706674424039214
"""
# Build and store all 2D values
print('Building rank 2 integrals...')
t = time.time()
if not isinstance(mol, psi4.core.Molecule):
mol = psi4.geometry(mol)
if basis is None:
basis = psi4.core.get_global_option('BASIS')
else:
psi4.core.set_global_option("BASIS", basis)
wfn = psi4.core.Wavefunction.build(mol, basis)
self.wfn = wfn
self.mints = psi4.core.MintsHelper(wfn.basisset())
self.enuc = mol.nuclear_repulsion_energy()
# Build out necessary 2D matrices
self.S = np.asarray(self.mints.ao_overlap())
self.V = np.asarray(self.mints.ao_potential())
self.T = np.asarray(self.mints.ao_kinetic())
self.H = self.T + self.V
# Holder objects
self.Da = None
self.Db = None
self.Ca = None
self.Cb = None
self.J = None
self.K = None
# Build symmetric orthoganlizer
A = self.mints.ao_overlap()
A.power(-0.5, 1.e-14)
self.A = np.asarray(A)
# Get nbf and ndocc for closed shell molecules
self.epsilon = None
self.nbf = self.S.shape[0]
if ndocc:
self.ndocc = ndocc
else:
self.ndocc = int(sum(mol.Z(A) for A in range(mol.natom())) / 2)
# Only rhf for now
self.nvirt = self.nbf - self.ndocc
print('\nNumber of occupied orbitals: %d' % self.ndocc)
print('Number of basis functions: %d' % self.nbf)
self.C_left = psi4.core.Matrix(self.nbf, self.ndocc)
self.npC_left = | np.asarray(self.C_left) | numpy.asarray |
import numpy as np
import torch
import os
from generic import get_match_result, to_np, get_match_result_obs_gen
def evaluate_with_ground_truth_graph(env, agent, num_games):
# here we do not eval command generation
achieved_game_points = []
total_game_steps = []
game_name_list = []
game_max_score_list = []
game_id = 0
while(True):
if game_id >= num_games:
break
obs, infos = env.reset()
# filter look and examine actions
for commands_ in infos["admissible_commands"]:
for cmd_ in [cmd for cmd in commands_ if cmd != "examine cookbook" and cmd.split()[0] in ["examine", "look"]]:
commands_.remove(cmd_)
game_name_list += [game.metadata["uuid"].split("-")[-1] for game in infos["game"]]
game_max_score_list += [game.max_score for game in infos["game"]]
batch_size = len(obs)
agent.eval()
agent.init()
chosen_actions, prev_step_dones = [], []
for _ in range(batch_size):
chosen_actions.append("restart")
prev_step_dones.append(0.0)
prev_h, prev_c = None, None
observation_strings, current_triplets, action_candidate_list, _, _ = agent.get_game_info_at_certain_step(obs, infos, prev_actions=None, prev_facts=None)
observation_strings = [item + " <sep> " + a for item, a in zip(observation_strings, chosen_actions)]
still_running_mask = []
final_scores = []
for step_no in range(agent.eval_max_nb_steps_per_episode):
# choose what to do next from candidate list
chosen_actions, chosen_indices, prev_h, prev_c = agent.act_greedy(observation_strings, current_triplets, action_candidate_list, prev_h, prev_c)
# send chosen actions to game engine
chosen_actions_before_parsing = [item[idx] for item, idx in zip(infos["admissible_commands"], chosen_indices)]
obs, scores, dones, infos = env.step(chosen_actions_before_parsing)
# filter look and examine actions
for commands_ in infos["admissible_commands"]:
for cmd_ in [cmd for cmd in commands_ if cmd != "examine cookbook" and cmd.split()[0] in ["examine", "look"]]:
commands_.remove(cmd_)
observation_strings, current_triplets, action_candidate_list, _, _ = agent.get_game_info_at_certain_step(obs, infos, prev_actions=None, prev_facts=None)
observation_strings = [item + " <sep> " + a for item, a in zip(observation_strings, chosen_actions)]
still_running = [1.0 - float(item) for item in prev_step_dones] # list of float
prev_step_dones = dones
final_scores = scores
still_running_mask.append(still_running)
# if all ended, break
if np.sum(still_running) == 0:
break
achieved_game_points += final_scores
still_running_mask = np.array(still_running_mask)
total_game_steps += np.sum(still_running_mask, 0).tolist()
game_id += batch_size
achieved_game_points = np.array(achieved_game_points, dtype="float32")
game_max_score_list = np.array(game_max_score_list, dtype="float32")
normalized_game_points = achieved_game_points / game_max_score_list
print_strings = []
print_strings.append("======================================================")
print_strings.append("EVAL: rewards: {:2.3f} | normalized reward: {:2.3f} | used steps: {:2.3f}".format(np.mean(achieved_game_points), np.mean(normalized_game_points), np.mean(total_game_steps)))
for i in range(len(game_name_list)):
print_strings.append("game name: {}, reward: {:2.3f}, normalized reward: {:2.3f}, steps: {:2.3f}".format(game_name_list[i], achieved_game_points[i], normalized_game_points[i], total_game_steps[i]))
print_strings.append("======================================================")
print_strings = "\n".join(print_strings)
print(print_strings)
return np.mean(achieved_game_points), np.mean(normalized_game_points), np.mean(total_game_steps), 0.0, print_strings
def evaluate(env, agent, num_games):
if agent.fully_observable_graph:
return evaluate_with_ground_truth_graph(env, agent, num_games)
achieved_game_points = []
total_game_steps = []
game_name_list = []
game_max_score_list = []
game_id = 0
while(True):
if game_id >= num_games:
break
obs, infos = env.reset()
# filter look and examine actions
for commands_ in infos["admissible_commands"]:
for cmd_ in [cmd for cmd in commands_ if cmd != "examine cookbook" and cmd.split()[0] in ["examine", "look"]]:
commands_.remove(cmd_)
game_name_list += [game.metadata["uuid"].split("-")[-1] for game in infos["game"]]
game_max_score_list += [game.max_score for game in infos["game"]]
batch_size = len(obs)
agent.eval()
agent.init()
triplets, chosen_actions, prev_game_facts = [], [], []
prev_step_dones = []
for _ in range(batch_size):
chosen_actions.append("restart")
prev_game_facts.append(set())
triplets.append([])
prev_step_dones.append(0.0)
prev_h, prev_c = None, None
observation_strings, current_triplets, action_candidate_list, _, current_game_facts = agent.get_game_info_at_certain_step(obs, infos, prev_actions=chosen_actions, prev_facts=None)
observation_strings = [item + " <sep> " + a for item, a in zip(observation_strings, chosen_actions)]
still_running_mask = []
final_scores = []
for step_no in range(agent.eval_max_nb_steps_per_episode):
# choose what to do next from candidate list
chosen_actions, chosen_indices, prev_h, prev_c = agent.act_greedy(observation_strings, current_triplets, action_candidate_list, prev_h, prev_c)
# send chosen actions to game engine
chosen_actions_before_parsing = [item[idx] for item, idx in zip(infos["admissible_commands"], chosen_indices)]
obs, scores, dones, infos = env.step(chosen_actions_before_parsing)
# filter look and examine actions
for commands_ in infos["admissible_commands"]:
for cmd_ in [cmd for cmd in commands_ if cmd != "examine cookbook" and cmd.split()[0] in ["examine", "look"]]:
commands_.remove(cmd_)
prev_game_facts = current_game_facts
observation_strings, current_triplets, action_candidate_list, _, current_game_facts = agent.get_game_info_at_certain_step(obs, infos, prev_actions=chosen_actions, prev_facts=prev_game_facts)
observation_strings = [item + " <sep> " + a for item, a in zip(observation_strings, chosen_actions)]
still_running = [1.0 - float(item) for item in prev_step_dones] # list of float
prev_step_dones = dones
final_scores = scores
still_running_mask.append(still_running)
# if all ended, break
if np.sum(still_running) == 0:
break
achieved_game_points += final_scores
still_running_mask = np.array(still_running_mask)
total_game_steps += np.sum(still_running_mask, 0).tolist()
game_id += batch_size
achieved_game_points = np.array(achieved_game_points, dtype="float32")
game_max_score_list = np.array(game_max_score_list, dtype="float32")
normalized_game_points = achieved_game_points / game_max_score_list
print_strings = []
print_strings.append("======================================================")
print_strings.append("EVAL: rewards: {:2.3f} | normalized reward: {:2.3f} | used steps: {:2.3f}".format(np.mean(achieved_game_points), np.mean(normalized_game_points), np.mean(total_game_steps)))
for i in range(len(game_name_list)):
print_strings.append("game name: {}, reward: {:2.3f}, normalized reward: {:2.3f}, steps: {:2.3f}".format(game_name_list[i], achieved_game_points[i], normalized_game_points[i], total_game_steps[i]))
print_strings.append("======================================================")
print_strings = "\n".join(print_strings)
print(print_strings)
return np.mean(achieved_game_points), np.mean(normalized_game_points), np.mean(total_game_steps), 0.0, print_strings
def evaluate_belief_mode(env, agent, num_games):
achieved_game_points = []
total_game_steps = []
total_command_generation_f1 = []
game_name_list = []
game_max_score_list = []
game_id = 0
while(True):
if game_id >= num_games:
break
obs, infos = env.reset()
# filter look and examine actions
for commands_ in infos["admissible_commands"]:
for cmd_ in [cmd for cmd in commands_ if cmd != "examine cookbook" and cmd.split()[0] in ["examine", "look"]]:
commands_.remove(cmd_)
game_name_list += [game.metadata["uuid"].split("-")[-1] for game in infos["game"]]
game_max_score_list += [game.max_score for game in infos["game"]]
batch_size = len(obs)
agent.eval()
agent.init()
triplets, chosen_actions, prev_game_facts = [], [], []
avg_command_generation_f1_in_a_game, prev_step_dones = [], []
for _ in range(batch_size):
chosen_actions.append("restart")
prev_game_facts.append(set())
triplets.append([])
avg_command_generation_f1_in_a_game.append([])
prev_step_dones.append(0.0)
prev_h, prev_c = None, None
observation_strings, _, action_candidate_list, target_command_strings, current_game_facts = agent.get_game_info_at_certain_step(obs, infos, prev_actions=chosen_actions, prev_facts=None, return_gt_commands=True)
observation_strings = [item + " <sep> " + a for item, a in zip(observation_strings, chosen_actions)]
still_running_mask = []
final_scores = []
for step_no in range(agent.eval_max_nb_steps_per_episode):
# generate triplets to update the observed info into KG
generated_commands = agent.command_generation_greedy_generation(observation_strings, triplets)
triplets = agent.update_knowledge_graph_triplets(triplets, generated_commands)
# choose what to do next from candidate list
chosen_actions, chosen_indices, prev_h, prev_c = agent.act_greedy(observation_strings, triplets, action_candidate_list, prev_h, prev_c)
# send chosen actions to game engine
chosen_actions_before_parsing = [item[idx] for item, idx in zip(infos["admissible_commands"], chosen_indices)]
obs, scores, dones, infos = env.step(chosen_actions_before_parsing)
# filter look and examine actions
for commands_ in infos["admissible_commands"]:
for cmd_ in [cmd for cmd in commands_ if cmd != "examine cookbook" and cmd.split()[0] in ["examine", "look"]]:
commands_.remove(cmd_)
still_running = [1.0 - float(item) for item in prev_step_dones] # list of float
# eval command generation
for i in range(batch_size):
if still_running[i] == 0:
continue
_, _, exact_f1 = get_match_result(generated_commands[i], target_command_strings[i], type='exact')
avg_command_generation_f1_in_a_game[i].append(exact_f1)
prev_game_facts = current_game_facts
observation_strings, _, action_candidate_list, target_command_strings, current_game_facts = agent.get_game_info_at_certain_step(obs, infos, prev_actions=chosen_actions, prev_facts=prev_game_facts, return_gt_commands=True)
observation_strings = [item + " <sep> " + a for item, a in zip(observation_strings, chosen_actions)]
prev_step_dones = dones
final_scores = scores
still_running_mask.append(still_running)
# if all ended, break
if np.sum(still_running) == 0:
break
achieved_game_points += final_scores
still_running_mask = np.array(still_running_mask)
total_game_steps += np.sum(still_running_mask, 0).tolist()
total_command_generation_f1 += [np.mean(item) for item in avg_command_generation_f1_in_a_game]
game_id += batch_size
achieved_game_points = np.array(achieved_game_points, dtype="float32")
game_max_score_list = np.array(game_max_score_list, dtype="float32")
normalized_game_points = achieved_game_points / game_max_score_list
print_strings = []
print_strings.append("======================================================")
print_strings.append("EVAL: rewards: {:2.3f} | normalized reward: {:2.3f} | used steps: {:2.3f} | command generation f1: {:2.3f}".format(np.mean(achieved_game_points), np.mean(normalized_game_points), np.mean(total_game_steps), np.mean(total_command_generation_f1)))
for i in range(len(game_name_list)):
print_strings.append("game name: {}, reward: {:2.3f}, normalized reward: {:2.3f}, steps: {:2.3f}, cmd gen f1: {:2.3f}".format(game_name_list[i], achieved_game_points[i], normalized_game_points[i], total_game_steps[i], total_command_generation_f1[i]))
print_strings.append("======================================================")
print_strings = "\n".join(print_strings)
print(print_strings)
return np.mean(achieved_game_points), np.mean(normalized_game_points), np.mean(total_game_steps), np.mean(total_command_generation_f1), print_strings
def evaluate_rl_with_real_graphs(env, agent, num_games):
achieved_game_points = []
total_game_steps = []
game_name_list = []
game_max_score_list = []
game_id = 0
while(True):
if game_id >= num_games:
break
obs, infos = env.reset()
# filter look and examine actions
for commands_ in infos["admissible_commands"]:
for cmd_ in [cmd for cmd in commands_ if cmd != "examine cookbook" and cmd.split()[0] in ["examine", "look"]]:
commands_.remove(cmd_)
game_name_list += [game.metadata["uuid"].split("-")[-1] for game in infos["game"]]
game_max_score_list += [game.max_score for game in infos["game"]]
batch_size = len(obs)
agent.eval()
agent.init()
chosen_actions, prev_game_facts = [], []
prev_step_dones = []
prev_graph_hidden_state = torch.zeros(batch_size, agent.online_net.block_hidden_dim)
if agent.use_cuda:
prev_graph_hidden_state = prev_graph_hidden_state.cuda()
for _ in range(batch_size):
chosen_actions.append("restart")
prev_game_facts.append(set())
prev_step_dones.append(0.0)
prev_h, prev_c = None, None
########
## remove for obs_gen
observation_strings, _, action_candidate_list, _, current_game_facts = agent.get_game_info_at_certain_step(obs, infos, prev_actions=chosen_actions, prev_facts=None, return_gt_commands=True)
########
still_running_mask = []
final_scores = []
for step_no in range(agent.eval_max_nb_steps_per_episode):
new_adjacency_matrix, new_graph_hidden_state = agent.generate_adjacency_matrix_for_rl(observation_strings, chosen_actions, prev_graph_hidden_state)
chosen_actions, chosen_indices, prev_h, prev_c = agent.act_greedy(observation_strings, new_adjacency_matrix, action_candidate_list, previous_h=prev_h, previous_c=prev_c)
# send chosen actions to game engine
chosen_actions_before_parsing = [item[idx] for item, idx in zip(infos["admissible_commands"], chosen_indices)]
obs, scores, dones, infos = env.step(chosen_actions_before_parsing)
# filter look and examine actions
for commands_ in infos["admissible_commands"]:
for cmd_ in [cmd for cmd in commands_ if cmd != "examine cookbook" and cmd.split()[0] in ["examine", "look"]]:
commands_.remove(cmd_)
prev_graph_hidden_state = new_graph_hidden_state
prev_graph_hidden_state = prev_graph_hidden_state.detach()
prev_game_facts = current_game_facts
observation_strings, _, action_candidate_list, _, current_game_facts = agent.get_game_info_at_certain_step(obs, infos, prev_actions=chosen_actions, prev_facts=prev_game_facts, return_gt_commands=True)
chosen_actions_before_parsing = chosen_actions # for adj_for_mp
still_running = [1.0 - float(item) for item in prev_step_dones] # list of float
prev_step_dones = dones
final_scores = scores
still_running_mask.append(still_running)
# if all ended, break
if np.sum(still_running) == 0:
break
achieved_game_points += final_scores
still_running_mask = np.array(still_running_mask)
total_game_steps += np.sum(still_running_mask, 0).tolist()
game_id += batch_size
achieved_game_points = np.array(achieved_game_points, dtype="float32")
game_max_score_list = np.array(game_max_score_list, dtype="float32")
normalized_game_points = achieved_game_points / game_max_score_list
print_strings = []
print_strings.append("======================================================")
print_strings.append("EVAL: rewards: {:2.3f} | normalized reward: {:2.3f} | used steps: {:2.3f}".format(np.mean(achieved_game_points), np.mean(normalized_game_points), np.mean(total_game_steps)))
for i in range(len(game_name_list)):
print_strings.append("game name: {}, reward: {:2.3f}, normalized reward: {:2.3f}, steps: {:2.3f}".format(game_name_list[i], achieved_game_points[i], normalized_game_points[i], total_game_steps[i]))
print_strings.append("======================================================")
print_strings = "\n".join(print_strings)
print(print_strings)
return np.mean(achieved_game_points), np.mean(normalized_game_points), np.mean(total_game_steps), print_strings
def evaluate_pretrained_command_generation(env, agent, valid_test="valid", verbose=False):
env.split_reset(valid_test)
agent.eval()
total_soft_f1, total_exact_f1 = [], []
counter = 0
to_print = []
while(True):
observation_strings, triplets, target_strings = env.get_batch()
pred_strings = agent.command_generation_greedy_generation(observation_strings, triplets)
for i in range(len(observation_strings)):
_, _, exact_f1 = get_match_result(pred_strings[i], target_strings[i], type='exact')
_, _, soft_f1 = get_match_result(pred_strings[i], target_strings[i], type='soft')
total_exact_f1.append(exact_f1)
total_soft_f1.append(soft_f1)
if verbose:
to_print.append(str(counter) + " -------------------------------------------- exact f1: " + str(exact_f1) + ", soft f1: " + str(soft_f1))
to_print.append("OBS: %s " % (observation_strings[i]))
trips = []
for t in triplets[i]:
trips.append(t[0] + "-" + t[2] + "-" + t[1])
to_print.append("TRIPLETS: %s " % (" | ".join(trips)))
to_print.append("PRED: %s " % (pred_strings[i]))
to_print.append("GT: %s " % (target_strings[i]))
to_print.append("")
counter += 1
if env.batch_pointer == 0:
break
with open(agent.experiment_tag + "_output.txt", "w") as f:
f.write("\n".join(to_print))
print("Hard F1: ", np.mean(np.array(total_exact_f1)), "Soft F1:", np.mean( | np.array(total_soft_f1) | numpy.array |
from __future__ import print_function, division
import os
import sys
sys.path.append(os.path.dirname(sys.path[0]))
import warnings
import numpy as np
from scipy import interpolate
from scipy.ndimage import interpolation as spinterp
from scipy.stats import threshold
import geometry
import density
def cart2pol(*coords):
"""Convert cartesian coordinates to polar coordinates.
rho, theta = cart2pol(x, y)"""
if len(coords) == 1:
cart = coords[0]
assert cart.shape[1] == 2
rho = np.sqrt(np.sum(cart ** 2, 1))
theta = np.arctan2(cart[:, 1], cart[:, 0])
return np.vstack((rho, theta)).T
elif len(coords) == 2:
x, y = coords
assert x.shape == y.shape
rho = np.sqrt(x**2 + y**2)
theta = np.arctan2(y, x)
return rho, theta
else:
raise ValueError('inappropriate arguments')
def pol2cart(*coords):
"""Convert polar coordinates to cartesian coordinates.
x, y = pol2cart(rho, theta)"""
if len(coords) == 1:
pol = coords[0]
assert pol.shape[1] == 2
x = pol[:, 0] * np.cos(pol[:, 1])
y = pol[:, 0] * np.sin(pol[:, 1])
return np.vstack((x, y)).T
elif len(coords) == 2:
rho, theta = coords
assert rho.shape == theta.shape
x = rho * np.cos(theta)
y = rho * np.sin(theta)
return x, y
else:
raise ValueError('inappropriate arguments')
# Image center:
# The center of rotation of a 2D image of dimensions xdim x ydim is defined by
# ((int)xdim/2, (int)(ydim/2)) (with the first pixel in the upper left being (0,0).
# Note that for both xdim=ydim=65 and for xdim=ydim=64, the center will be at (32,32).
# This is the same convention as used in SPIDER and XMIPP. Origin offsets reported
# for individual images translate the image to its center and are to be applied
# BEFORE rotations.
def imgpolarcoord(img, rad=1.0):
"""
Convert a given image from cartesian coordinates to polar coordinates.
"""
row, col = img.shape
cx = int(col/2)
cy = int(row/2)
radius = int(min([row-cy, col-cx, cx, cy]) * rad)
angle = 360.0
# Interpolation: Nearest
pcimg = np.zeros((int(radius), int(angle)))
radius_range = np.arange(0, radius, 1)
angle_range = np.arange(0, 2*np.pi, 2*np.pi/angle)
i = 0
for r in radius_range:
j = 0
for a in angle_range:
pcimg[i, j] = img[int(cy+round(r*np.sin(a))), int(cx+round(r*np.cos(a)))]
j = j + 1
i = i + 1
return pcimg
def imgpolarcoord3(img, rad=1.0):
"""
converts a given image from cartesian coordinates to polar coordinates.
"""
row, col = img.shape
cx = int(col/2)
cy = int(row/2)
radius = float(min([row-cy, col-cx, cx, cy])) * rad
angle = 360.0
# Interpolation: Linear
rho_range = np.arange(0, radius, 1)
theta_range = np.arange(0, 2*np.pi, 2*np.pi/angle)
theta_grid, rho_grid = np.meshgrid(theta_range, rho_range)
new_x_grid, new_y_grid = pol2cart(rho_grid, theta_grid)
pcimg = spinterp.map_coordinates(img, (new_x_grid + radius, new_y_grid + radius))
return pcimg
def get_corr_img(img, rad=1.0, pcimg_interpolation='nearest'):
"""
get a angular correlation image
"""
if 'nearest' in pcimg_interpolation.lower():
pcimg = imgpolarcoord(img, rad=rad)
elif 'linear' in pcimg_interpolation.lower():
pcimg = imgpolarcoord3(img, rad=rad)
pcimg_fourier = np.fft.fftshift(np.fft.fft(pcimg, axis=1))
corr_img = np.fft.ifft(np.fft.ifftshift(pcimg_fourier*np.conjugate(pcimg_fourier)), axis=1)
return np.require(corr_img.real, dtype=density.real_t)
def get_corr_imgs(imgs, rad=1.0, pcimg_interpolation='nearest'):
num_imgs = imgs.shape[0]
N = imgs.shape[1]
assert N == imgs.shape[2]
corr_imgs = np.zeros((num_imgs, int(N/2.0), 360), dtype=density.real_t)
for i, img in enumerate(imgs):
corr_imgs[i, :, :] = get_corr_img(img, rad=rad, pcimg_interpolation=pcimg_interpolation)
return corr_imgs
def gencoords_outside(N, d, rad=None, truncmask=False, trunctype='circ'):
""" generate coordinates of all points in an NxN..xN grid with d dimensions
coords in each dimension are [-N/2, N/2)
N should be even"""
if not truncmask:
_, truncc, _ = gencoords_outside(N, d, rad, True)
return truncc
c = geometry.gencoords_base(N, d)
if rad is not None:
if trunctype == 'circ':
r2 = np.sum(c**2, axis=1)
trunkmask = r2 > (rad*N/2.0)**2
elif trunctype == 'square':
r = np.max(np.abs(c), axis=1)
trunkmask = r > (rad*N/2.0)
truncc = c[trunkmask, :]
else:
trunkmask = np.ones((c.shape[0],), dtype=np.bool8)
truncc = c
return c, truncc, trunkmask
def calc_angular_correlation(trunc_slices, N, rad, beamstop_rad=None, pixel_size=1.0, interpolation='nearest',
sort_theta=True, clip=True, outside=False,):
"""compute angular correlation for input array
outside: True or False (default: False)
calculate angular correlation in radius or outside of radius
sort_theta: True or False (default: True)
sort theta when slicing the same rho in trunc array
"""
# 1. get a input (single: N_T or multi: N_R x N_T) with normal sequence.
# 2. sort truncation array by rho value of polar coordinates
# 3. apply angular correlation function to sorted slice for both real part and imaginary part
# 4. deal with outlier beyond 3 sigma (no enough points to do sampling via fft)
# (oversampling is unavailable, hence dropout points beyond 3 sigma)
# 5. return angluar correlation slice with normal sequence.
# 1.
iscomplex = np.iscomplexobj(trunc_slices)
if outside:
trunc_xy = gencoords_outside(N, 2, rad)
else:
if beamstop_rad is None:
trunc_xy = geometry.gencoords(N, 2, rad)
else:
trunc_xy = geometry.gencoords_centermask(N, 2, rad, beamstop_rad)
if trunc_slices.ndim < 2:
assert trunc_xy.shape[0] == trunc_slices.shape[0], "wrong length of trunc slice or wrong radius"
else:
assert trunc_xy.shape[0] == trunc_slices.shape[1], "wrong length of trunc slice or wrong radius"
# 2.
pol_trunc_xy = cart2pol(trunc_xy)
if sort_theta:
# lexsort; first, sort rho; second, sort theta
sorted_idx = np.lexsort((pol_trunc_xy[:, 1], pol_trunc_xy[:, 0]))
else:
sorted_idx = np.argsort(pol_trunc_xy[:, 0])
axis = trunc_slices.ndim - 1
sorted_rho = np.take(pol_trunc_xy[:, 0], sorted_idx)
sorted_slice = np.take(trunc_slices, sorted_idx, axis=axis)
# 3.
if 'none' in interpolation:
pass
elif 'nearest' in interpolation:
sorted_rho = np.round(sorted_rho)
elif 'linear' in interpolation:
raise NotImplementedError()
else:
raise ValueError('unsupported method for interpolation')
# sorted_rho_freqs = sorted_rho / (N * pixel_size)
resolution = 1.0 / (N * pixel_size)
_, unique_idx, unique_counts = np.unique(sorted_rho, return_index=True, return_counts=True)
indices = [slice(None)] * trunc_slices.ndim
angular_correlation = np.zeros_like(trunc_slices, dtype=trunc_slices.dtype)
for i, count in enumerate(unique_counts):
indices[axis] = slice(unique_idx[i], unique_idx[i] + count)
# minimum points to do fft (2 or 4 times than Nyquist frequency)
minimum_sample_points = (4 / count) / resolution
if count < minimum_sample_points:
angular_correlation[indices] = np.copy(sorted_slice[indices])
else:
# use view (slicing) or copy (fancy indexing, np.take(), np.put())?
same_rho = np.copy(sorted_slice[indices])
fpcimg_real = density.real_to_fspace(same_rho.real, axes=(axis,)) # polar image in fourier sapce
angular_correlation[indices].real = density.fspace_to_real(
fpcimg_real * fpcimg_real.conjugate(), axes=(axis,)).real
if iscomplex: # FIXME: stupid way. optimize this
fpcimg_fourier = density.real_to_fspace(same_rho.imag, axes=(axis,)) # polar image in fourier sapce
angular_correlation[indices].imag = density.fspace_to_real(
fpcimg_fourier * fpcimg_fourier.conjugate(), axes=(axis,)).real
# check inf and nan
if np.any( | np.isinf(angular_correlation) | numpy.isinf |
import matplotlib.pyplot as plt
from matplotlib import rcParams
import matplotlib as mpl
mpl.use("Agg")
import seaborn
import time
from pprint import pprint
import json
import numpy as np
np.random.seed(42)
import uproot
from scipy.stats import rv_histogram, expon, poisson
from veloGeom import buildTileXY, boundingVolumes, testIntersection
from genPhSp import genPhaseSpace
from genVeloHits import genTracks, addDecays
from genVeloHits import formDecayproducts, sampleBKinematics, genHits
plt.style.use(["seaborn-whitegrid", "seaborn-ticks"])
rcParams["figure.figsize"] = 12, 12
rcParams["axes.facecolor"] = "FFFFFF"
rcParams["savefig.facecolor"] = "FFFFFF"
rcParams["figure.facecolor"] = "FFFFFF"
rcParams["xtick.direction"] = "in"
rcParams["ytick.direction"] = "in"
rcParams["mathtext.fontset"] = "cm"
rcParams["mathtext.rm"] = "serif"
rcParams.update({"figure.autolayout": True})
def drawXY(tile, colour="k"):
# bottom
plt.plot([tile[0][0], tile[0][1]], [tile[1][0], tile[1][0]], color=colour)
# right
plt.plot([tile[0][1], tile[0][1]], [tile[1][0], tile[1][1]], color=colour)
# top
plt.plot([tile[0][0], tile[0][1]], [tile[1][1], tile[1][1]], color=colour)
# left
plt.plot([tile[0][0], tile[0][0]], [tile[1][0], tile[1][1]], color=colour)
def drawZ(geom, xRangeA, xRangeC):
for z in geom["z"]["a"]:
plt.plot([z, z], [xRangeA[0], xRangeA[1]], color="red")
for z in geom["z"]["c"]:
plt.plot([z, z], [xRangeC[0], xRangeC[1]], color="black")
def drawXYTrack(geom, ray):
rayO, rayD = ray
xMax = 500
ts = np.linspace(0, 1000, 100)
# z-x projection
pointsX = [rayO[0] + rayD[0] * t for t in ts]
pointsY = [rayO[1] + rayD[1] * t for t in ts]
plt.plot(pointsX, pointsY, linestyle=":", alpha=0.5)
def drawTrack(geom, ray):
rayO, rayD = ray
zMax = geom["z"]["a"][-1] + 10.0
maxT = (zMax - rayO[2]) / rayD[2]
ts = np.linspace(0, 1000, 100)
# z-x projection
pointsX = [rayO[0] + rayD[0] * t for t in ts]
pointsZ = [rayO[2] + rayD[2] * t for t in ts]
plt.plot(pointsZ, pointsX, linestyle=":", alpha=0.5)
if __name__ == "__main__":
geom = json.load(open("veloGeom.json", "r"))
decayParams = json.load(open("decayProbs.json", "r"))
bottom, right, top, left = buildTileXY(geom) # , offset = (10, 10))
drawXY(bottom, "black")
drawXY(right, "blue")
drawXY(top, "red")
drawXY(left, "green")
plt.savefig("test.pdf")
plt.clf()
volsA, volsC = boundingVolumes(geom, (bottom, right, top, left))
xRangeA = (top[0][0], top[0][1])
xRangeC = (bottom[0][0], bottom[0][1])
drawZ(geom, xRangeA, xRangeC)
plt.xlim(-400, 800)
plt.ylim(-10, 100)
plt.savefig("testZ.pdf")
plt.clf()
tracks = genTracks(geom, 1000, allFONLL=True)
# Select only those going forwards for now, *in principle* a cut on eta
tracks = list(
filter(
lambda x: np.arcsinh(x[1][2] / np.sqrt(x[1][0] ** 2 + x[1][1] ** 2)) > 3,
tracks,
)
)
print("N tracks:", len(tracks))
tracks = addDecays(decayParams, tracks)
drawXY(bottom, "black")
drawXY(right, "blue")
drawXY(top, "red")
drawXY(left, "green")
tracks, hits, hitsPix = genHits(nGen=1000, tracks=tracks)
for t in hits:
plt.plot([h[0] for h in t], [h[1] for h in t], "+")
plt.xlim(-10, 80)
plt.ylim(-10, 80)
plt.savefig("hitXY.pdf")
plt.clf()
drawXY(bottom, "black")
drawXY(right, "blue")
drawXY(top, "red")
drawXY(left, "green")
for r in np.array(tracks): # [list(toDraw)]:
drawXYTrack(geom, r)
plt.xlim(-10, 80)
plt.ylim(-10, 80)
plt.savefig("trackXY.pdf")
plt.clf()
drawZ(geom, xRangeA, xRangeC)
for track in | np.array(tracks) | numpy.array |
import unittest
import numpy
from cqcpy import test_utils
import cqcpy.spin_utils as spin_utils
import cqcpy.cc_equations as cc_equations
class CCRDMTest(unittest.TestCase):
def setUp(self):
pass
def test_1rdm_opt(self):
no = 4
nv = 8
thresh = 1e-12
T1, T2 = test_utils.make_random_T(no, nv)
L1, L2 = test_utils.make_random_L(no, nv)
pba_ref = cc_equations.ccsd_1rdm_ba(T1, T2, L1, L2)
pba_out = cc_equations.ccsd_1rdm_ba_opt(T1, T2, L1, L2)
diff = numpy.linalg.norm(pba_ref - pba_out)/numpy.sqrt(pba_ref.size)
self.assertTrue(diff < thresh, "Error in p_ba: {}".format(diff))
pji_ref = cc_equations.ccsd_1rdm_ji(T1, T2, L1, L2)
pji_out = cc_equations.ccsd_1rdm_ji_opt(T1, T2, L1, L2)
diff = numpy.linalg.norm(pji_ref - pji_out)/numpy.sqrt(pji_ref.size)
self.assertTrue(diff < thresh, "Error in p_ji: {}".format(diff))
pai_ref = cc_equations.ccsd_1rdm_ai(T1, T2, L1, L2)
pai_out = cc_equations.ccsd_1rdm_ai_opt(T1, T2, L1, L2)
diff = numpy.linalg.norm(pai_ref - pai_out)/numpy.sqrt(pai_ref.size)
self.assertTrue(diff < thresh, "Error in p_ai: {}".format(diff))
def test_2rdm_opt(self):
no = 4
nv = 8
thresh = 1e-12
T1, T2 = test_utils.make_random_T(no, nv)
L1, L2 = test_utils.make_random_L(no, nv)
pcdab_ref = cc_equations.ccsd_2rdm_cdab(T1, T2, L1, L2)
pcdab_out = cc_equations.ccsd_2rdm_cdab_opt(T1, T2, L1, L2)
diff = numpy.linalg.norm(pcdab_ref - pcdab_out)
diff /= numpy.sqrt(pcdab_ref.size)
self.assertTrue(diff < thresh, "Error in p_cdab: {}".format(diff))
pbcai_ref = cc_equations.ccsd_2rdm_bcai(T1, T2, L1, L2)
pbcai_out = cc_equations.ccsd_2rdm_bcai_opt(T1, T2, L1, L2)
diff = numpy.linalg.norm(pbcai_ref - pbcai_out)
diff /= numpy.sqrt(pbcai_ref.size)
self.assertTrue(diff < thresh, "Error in p_bcai: {}".format(diff))
pbjai_ref = cc_equations.ccsd_2rdm_bjai(T1, T2, L1, L2)
pbjai_out = cc_equations.ccsd_2rdm_bjai_opt(T1, T2, L1, L2)
diff = numpy.linalg.norm(pbjai_ref - pbjai_out)
diff /= numpy.sqrt(pbjai_ref.size)
self.assertTrue(diff < thresh, "Error in p_bjai: {}".format(diff))
pabij_ref = cc_equations.ccsd_2rdm_abij(T1, T2, L1, L2)
pabij_out = cc_equations.ccsd_2rdm_abij_opt(T1, T2, L1, L2)
diff = numpy.linalg.norm(pabij_ref - pabij_out)
diff /= numpy.sqrt(pabij_ref.size)
self.assertTrue(diff < thresh, "Error in p_abij: {}".format(diff))
pkaij_ref = cc_equations.ccsd_2rdm_kaij(T1, T2, L1, L2)
pkaij_out = cc_equations.ccsd_2rdm_kaij_opt(T1, T2, L1, L2)
diff = numpy.linalg.norm(pkaij_ref - pkaij_out)
diff /= numpy.sqrt(pkaij_ref.size)
self.assertTrue(diff < thresh, "Error in p_kaij: {}".format(diff))
pklij_ref = cc_equations.ccsd_2rdm_klij(T1, T2, L1, L2)
pklij_out = cc_equations.ccsd_2rdm_klij_opt(T1, T2, L1, L2)
diff = numpy.linalg.norm(pklij_ref - pklij_out)
diff /= numpy.sqrt(pklij_ref.size)
self.assertTrue(diff < thresh, "Error in p_klij: {}".format(diff))
def test_u1rdm(self):
noa = 3
nva = 5
nob = 2
nvb = 6
thresh = 1e-14
# use unrestricted one-particle property
Aa = test_utils.make_random_F(noa, nva)
Ab = test_utils.make_random_F(nob, nvb)
Atot = spin_utils.F_to_spin(Aa, Ab, noa, nva, nob, nvb)
# get unrestricted and general amplitudes
T1a, T1b = test_utils.make_random_T1_spatial(noa, nva, nob, nvb)
T2aa, T2ab, T2bb \
= test_utils.make_random_T2_spatial(noa, nva, nob, nvb)
L1a, L1b = test_utils.make_random_T1_spatial(nva, noa, nvb, nob)
L2aa, L2ab, L2bb \
= test_utils.make_random_T2_spatial(nva, noa, nvb, nob)
T1 = spin_utils.T1_to_spin(T1a, T1b, noa, nva, nob, nvb)
L1 = spin_utils.T1_to_spin(L1a, L1b, nva, noa, nvb, nob)
T2 = spin_utils.T2_to_spin(T2aa, T2ab, T2bb, noa, nva, nob, nvb)
L2 = spin_utils.T2_to_spin(L2aa, L2ab, L2bb, nva, noa, nvb, nob)
# make general pieces of 1-rdm
pia = L1.copy()
pba = cc_equations.ccsd_1rdm_ba_opt(T1, T2, L1, L2)
pji = cc_equations.ccsd_1rdm_ji_opt(T1, T2, L1, L2)
pai = cc_equations.ccsd_1rdm_ai_opt(T1, T2, L1, L2)
# make unrestricted 1-rdm
pia_a = L1a.copy()
pia_b = L1b.copy()
pba_a, pba_b = cc_equations.uccsd_1rdm_ba(
T1a, T1b, T2aa, T2ab, T2bb, L1a, L1b, L2aa, L2ab, L2bb)
pji_a, pji_b = cc_equations.uccsd_1rdm_ji(
T1a, T1b, T2aa, T2ab, T2bb, L1a, L1b, L2aa, L2ab, L2bb)
pai_a, pai_b = cc_equations.uccsd_1rdm_ai(
T1a, T1b, T2aa, T2ab, T2bb, L1a, L1b, L2aa, L2ab, L2bb)
# ia
ref = numpy.einsum('ia,ai->', pia, Atot.vo)
out = numpy.einsum('ia,ai->', pia_a, Aa.vo)
out += numpy.einsum('ia,ai->', pia_b, Ab.vo)
diff = abs(out - ref) / abs(ref)
self.assertTrue(diff < thresh, "Error in Pia: {}".format(diff))
# ba
ref = numpy.einsum('ba,ab->', pba, Atot.vv)
out = numpy.einsum('ba,ab->', pba_a, Aa.vv)
out += numpy.einsum('ba,ab->', pba_b, Ab.vv)
diff = abs(out - ref) / abs(ref)
self.assertTrue(diff < thresh, "Error in Pba: {}".format(diff))
# ji
ref = numpy.einsum('ji,ij->', pji, Atot.oo)
out = numpy.einsum('ji,ij->', pji_a, Aa.oo)
out += numpy.einsum('ji,ij->', pji_b, Ab.oo)
diff = abs(out - ref) / abs(ref)
self.assertTrue(diff < thresh, "Error in Pji: {}".format(diff))
# ai
ref = numpy.einsum('ai,ia->', pai, Atot.ov)
out = numpy.einsum('ai,ia->', pai_a, Aa.ov)
out += numpy.einsum('ai,ia->', pai_b, Ab.ov)
diff = abs(out - ref) / abs(ref)
self.assertTrue(diff < thresh, "Error in Pai: {}".format(diff))
def test_u2rdm(self):
noa = 3
nva = 5
nob = 2
nvb = 6
thresh = 1e-14
# use unrestricted one-particle property
Aa = test_utils.make_random_I_anti(noa, nva)
Ab = test_utils.make_random_I_anti(nob, nvb)
Aab = test_utils.make_random_Ifull_gen(
noa, nva, nob, nvb, noa, nva, nob, nvb)
Atot = spin_utils.int_to_spin2(Aa, Ab, Aab, noa, nva, nob, nvb)
# get unrestricted and general amplitudes
T1a, T1b = test_utils.make_random_T1_spatial(noa, nva, nob, nvb)
T2aa, T2ab, T2bb \
= test_utils.make_random_T2_spatial(noa, nva, nob, nvb)
L1a, L1b = test_utils.make_random_T1_spatial(nva, noa, nvb, nob)
L2aa, L2ab, L2bb \
= test_utils.make_random_T2_spatial(nva, noa, nvb, nob)
T1 = spin_utils.T1_to_spin(T1a, T1b, noa, nva, nob, nvb)
L1 = spin_utils.T1_to_spin(L1a, L1b, nva, noa, nvb, nob)
T2 = spin_utils.T2_to_spin(T2aa, T2ab, T2bb, noa, nva, nob, nvb)
L2 = spin_utils.T2_to_spin(L2aa, L2ab, L2bb, nva, noa, nvb, nob)
# make general pieces of 2-rdm
Pijab = L2.copy()
Pciab = cc_equations.ccsd_2rdm_ciab(T1, T2, L1, L2)
Pjkai = cc_equations.ccsd_2rdm_jkai(T1, T2, L1, L2)
Pcdab = cc_equations.ccsd_2rdm_cdab(T1, T2, L1, L2)
Pbjai = cc_equations.ccsd_2rdm_bjai(T1, T2, L1, L2)
Pklij = cc_equations.ccsd_2rdm_klij(T1, T2, L1, L2)
Pbcai = cc_equations.ccsd_2rdm_bcai(T1, T2, L1, L2)
Pkaij = cc_equations.ccsd_2rdm_kaij(T1, T2, L1, L2)
Pabij = cc_equations.ccsd_2rdm_abij(T1, T2, L1, L2)
# make unrestricted RDMs
Pijab_u = L2aa.copy()
PIJAB_u = L2bb.copy()
PiJaB_u = L2ab.copy()
Pciab_u, PCIAB_u, PcIaB_u, PCiAb_u = cc_equations.uccsd_2rdm_ciab(
T1a, T1b, T2aa, T2ab, T2bb, L1a, L1b, L2aa, L2ab, L2bb)
Pjkai_u, PJKAI_u, PjKaI_u, PJkAi_u = cc_equations.uccsd_2rdm_jkai(
T1a, T1b, T2aa, T2ab, T2bb, L1a, L1b, L2aa, L2ab, L2bb)
Pcdab_u, PCDAB_u, PcDaB_u = cc_equations.uccsd_2rdm_cdab(
T1a, T1b, T2aa, T2ab, T2bb, L1a, L1b, L2aa, L2ab, L2bb)
Pbjai_u, PBJAI_u, PbJaI_u, PbJAi_u, PBjaI_u, PBjAi_u \
= cc_equations.uccsd_2rdm_bjai(
T1a, T1b, T2aa, T2ab, T2bb, L1a, L1b, L2aa, L2ab, L2bb)
Pklij_u, PKLIJ_u, PkLiJ_u = cc_equations.uccsd_2rdm_klij(
T1a, T1b, T2aa, T2ab, T2bb, L1a, L1b, L2aa, L2ab, L2bb)
Pbcai_u, PBCAI_u, PbCaI_u, PBcAi_u = cc_equations.uccsd_2rdm_bcai(
T1a, T1b, T2aa, T2ab, T2bb, L1a, L1b, L2aa, L2ab, L2bb)
Pkaij_u, PKAIJ_u, PkAiJ_u, PKaIj_u = cc_equations.uccsd_2rdm_kaij(
T1a, T1b, T2aa, T2ab, T2bb, L1a, L1b, L2aa, L2ab, L2bb)
Pabij_u, PABIJ_u, PaBiJ_u = cc_equations.uccsd_2rdm_abij(
T1a, T1b, T2aa, T2ab, T2bb, L1a, L1b, L2aa, L2ab, L2bb)
# ijab
ref = numpy.einsum('ijab,abij->', Pijab, Atot.vvoo)
out = numpy.einsum('ijab,abij->', Pijab_u, Aa.vvoo)
out += numpy.einsum('ijab,abij->', PIJAB_u, Ab.vvoo)
out += 4.0*numpy.einsum('ijab,abij->', PiJaB_u, Aab.vvoo)
diff = abs(out - ref) / abs(ref + 0.001)
self.assertTrue(diff < thresh, "Error in Pijab: {}".format(diff))
# ciab
ref = numpy.einsum('ciab,abci->', Pciab, Atot.vvvo)
out = numpy.einsum('ciab,abci->', Pciab_u, Aa.vvvo)
out += numpy.einsum('ciab,abci->', PCIAB_u, Ab.vvvo)
out += 2.0*numpy.einsum('ciab,abci->', PcIaB_u, Aab.vvvo)
out += 2.0*numpy.einsum('ciab,baic->', PCiAb_u, Aab.vvov)
diff = abs(out - ref) / abs(ref + 0.001)
self.assertTrue(diff < thresh, "Error in Pciab: {}".format(diff))
# jkai
ref = numpy.einsum('jkai,aijk->', Pjkai, Atot.vooo)
out = numpy.einsum('jkai,aijk->', Pjkai_u, Aa.vooo)
out += numpy.einsum('jkai,aijk->', PJKAI_u, Ab.vooo)
out += 2.0*numpy.einsum('jKaI,aIjK->', PjKaI_u, Aab.vooo)
out += 2.0*numpy.einsum('JkAi,iAkJ->', PJkAi_u, Aab.ovoo)
diff = abs(out - ref) / abs(ref + 0.001)
self.assertTrue(diff < thresh, "Error in Pciab: {}".format(diff))
# cdab
ref = numpy.einsum('cdab,abcd->', Pcdab, Atot.vvvv)
out = | numpy.einsum('cdab,abcd->', Pcdab_u, Aa.vvvv) | numpy.einsum |
from __future__ import absolute_import, division
# External modules
import logging, os, sys
import numpy as np
from astropy.table import Table, Column
from astropy.cosmology import WMAP9 as cosmo
from astropy import units as u
if sys.version_info[0] >= 3:
from io import StringIO
else:
from cStringIO import StringIO
# Local modules
from .convert_units import (DivideInterval, RadiiUnknown2Arcsec, RadiiUnknown2Parsec, RescaleArray)
from ..stellar_module import StarGenerator
from ..stellar_module import StarGenerator
from ..utilities import GetStipsData
from ..utilities import OffsetPosition
from ..utilities import SelectParameter
from ..utilities import StipsDataTable
#-----------
class SceneModule(object):
#-----------
def __init__(self, **kwargs):
"""
Noiseless scene generator module.
:Author: <NAME>
:Organization: Space Telescope Science Institute
:History:
* 2010/10/19 PLL created this module.
* 2011/06/14 PLL added single star simulation.
* 2011/06/28 PLL reorganized functions.
* 2011/10/28 PLL added galaxies simulation.
* 2014/02/14 BY modified the code to be instrument-independent
Examples
--------
>>> from stips import SceneModule
Parameters
----------
self: obj
Class instance.
**kwargs: dictionary
Additional arguments needed to make the scene
"""
self.out_path = SelectParameter('out_path', kwargs)
self.prefix = kwargs.get('out_prefix', 'sim')
self.cat_type = SelectParameter('cat_type', kwargs)
if 'logger' in kwargs:
self.logger = kwargs['logger']
else:
self.logger = logging.getLogger('__stips__')
log_level = SelectParameter('log_level', kwargs)
print("Log level: {}".format(log_level))
self.logger.setLevel(getattr(logging, log_level))
if not len(self.logger.handlers):
stream_handler = logging.StreamHandler(sys.stderr)
stream_handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s: %(message)s'))# [in %(pathname)s:%(lineno)d]'))
self.logger.addHandler(stream_handler)
if 'scene_general' in kwargs:
self.ra = kwargs['scene_general'].get('ra', 0.0)
self.dec = kwargs['scene_general'].get('dec', 0.0)
self.seed = SelectParameter('seed', kwargs['scene_general'])
else:
self.ra = kwargs.get('ra', 0.0)
self.dec = kwargs.get('dec', 0.0)
self.seed = SelectParameter('seed', kwargs)
self.params = [ 'Random seed: {}'.format(self.seed) ]
msg = 'Centre (RA,DEC) = ({:.3f},{:.3f})'
self.params += [ msg.format(self.ra, self.dec) ]
self.catalogues = {}
#-----------
def CreatePopulation(self, pop, id=0):
"""
Generate a stellar population.
Output list will have these columns:
# ID
# RA
# DEC
# Distance
# Age
# Metallicity
# Mass
# Count Rate (in the chosen instrument/filter), Absolute
# Count Rate (in the chosen instrument/filter), Observed
Parameters
----------
self: obj
Class instance.
pop: dictionary
Information about the population. Includes:
n_stars: int
Number of stars
age_low,age_high: floating point
Minimum and maximum ages.
z_low,z_high: floating point
Minimum and maximum metallicities.
imf: string
Initial Mass Function for the population
alpha: float
Exponent (if imf = 'powerlaw')
binary_fraction: float
Binary Fraction
distribution: string
Stellar distribution in the sky (e.g. power law, inverse power law, uniform, etc.)
clustered: bool
Cluster higher masses to centre?
radius: float
Radius in (units)
radius_units: string
Units of radius (above)
distance_low,distance_high: floating point
Minimum and maximum distance (in kpc) of the 'cluster'
offset_ra,offset_dec: floating point
Offset of the cluster from the scene centre in arcseconds
Returns
-------
outList: string
The catalogue file produced
"""
star_chunk = 100000
age_bins = DivideInterval("1.0e6,1.35e10,d5")
met_bins = DivideInterval("-2.5,0.5,i0.1")
out_file = "{}_stars_{:03d}.{}".format(self.prefix, id, self.cat_type)
outList = os.path.join(self.out_path, out_file)
if os.path.isfile(outList):
os.remove(outList) # No append
data_table = StipsDataTable.dataTableFromFile(outList)
self._log("info","Creating catalogue {}".format(outList))
n_stars = int(pop['n_stars'])
age_l = age_bins[(np.abs(age_bins-float(pop['age_low']))).argmin()]
age_h = age_bins[(np.abs(age_bins-float(pop['age_high']))).argmin()]
age_bins = age_bins[np.where((age_bins>=age_l) & (age_bins<=age_h))]
met_l = met_bins[(np.abs(met_bins-float(pop['z_low']))).argmin()]
met_h = met_bins[(np.abs(met_bins-float(pop['z_high']))).argmin()]
met_bins = met_bins[np.where((met_bins>=met_l) & (met_bins<=met_h))]
imf = pop['imf']
alpha = abs(float(pop['alpha']))
distribution = pop['distribution']
clustered = pop['clustered']
radius = float(pop['radius'])
rad_units = pop['radius_units']
dist_l = float(pop['distance_low']) * 1.e3 #convert kpc to pc
dist_h = float(pop['distance_high']) * 1.e3 #convert kpc to pc
binary_fraction = float(pop['binary_fraction'])
offset_ra = float(pop['offset_ra'])/3600. #offset in RA arcseconds, convert to degrees.
offset_dec = float(pop['offset_dec'])/3600. #offset in DEC arcseconds, convert to degrees.
metadata = {'type': 'phoenix', 'id': id, 'n_stars': n_stars, 'age_l': age_l, 'age_h': age_h,
'met_l': met_l, 'met_h': met_h, 'imf': imf, 'alpha': alpha,
'distribution': distribution, 'clustered': clustered, 'radius': radius,
'radius_units': rad_units, 'dist_l': dist_l, 'dist_h': dist_h,
'offset_ra': offset_ra, 'offset_dec': offset_dec,
'name': 'Phoenix Stellar Population Table', 'bandpass': 'johnson,i'}
data_table.meta = metadata
self._log("info","Creating age and metallicity numbers")
# ages = np.random.RandomState(seed=self.seed).random_sample(size=len(age_bins))
ages = np.random.random_sample(size=len(age_bins))
ages /= ages.sum()
# mets = np.random.RandomState(seed=self.seed).random_sample(size=len(met_bins))
mets = np.random.random_sample(size=len(met_bins))
mets /= mets.sum()
self._log("info","Created age and metallicity numbers")
self._log("info","Creating stars")
#Generate star masses
datasets = 0
total = 0
for i, age in enumerate(age_bins):
self.logger.info("Age %g",age)
n_age = int(round(n_stars * ages[i]))
for j, met in enumerate(met_bins):
self.logger.info("Metallicity %f",met)
num_stars = int(round(n_age * mets[j]))
if num_stars == 0:
continue
self.logger.info("Creating %d stars",num_stars)
stargen = StarGenerator(age, met, imf=imf, alpha=alpha, seed=self.seed, logger=self.logger)
all_masses, all_rates, all_temps, all_gravs = stargen.make_cluster(num_stars)
all_x, all_y, all_z = self._MakeCoords(num_stars, radius, func=distribution, scale=2.8, do_z=True)
# all_distances = np.random.RandomState(seed=self.seed).uniform(low=dist_l, high=dist_h, size=num_stars)
all_distances = np.random.uniform(low=dist_l, high=dist_h, size=num_stars)
if clustered:
all_x, all_y, all_z = self._CenterObjByMass(all_x, all_y, all_masses, z=all_z)
# all_binaries = np.random.RandomState(seed=self.seed).binomial(1,binary_fraction,len(all_masses))
all_binaries = np.random.binomial(1,binary_fraction,len(all_masses))
idx = np.where(all_binaries==1)[0]
mb, rb, tb, gb = stargen.make_cluster(len(idx))
xb, yb, zb = all_x[idx], all_y[idx], all_z[idx]
db = all_distances[idx]
all_masses = np.insert(all_masses, idx, mb)
all_rates = np.insert(all_rates, idx, rb)
all_temps = np.insert(all_temps, idx, tb)
all_gravs = np.insert(all_gravs, idx, gb)
all_x = np.insert(all_x, idx, xb)
all_y = np.insert(all_y, idx, yb)
all_z = np.insert(all_z, idx, zb)
all_distances = np.insert(all_distances, idx, db)
all_binaries = np.insert(all_binaries, idx+1, 0)
num_stars += len(idx)
cached_ra = 0.
cached_dec = 0.
cached_distance = 0.
cached = False
for k in range(num_stars // star_chunk + 1):
xl, xh = k * star_chunk, min(k * star_chunk + star_chunk, num_stars-1)
star_set = xh - xl
self._log("info", "Chunk {}: {} stars".format(k+1, star_set))
masses = all_masses[xl:xh]
rates = all_rates[xl:xh]
temps = all_temps[xl:xh]
gravs = all_gravs[xl:xh]
x, y, z = all_x[xl:xh], all_y[xl:xh], all_z[xl:xh]
distances = all_distances[xl:xh]
binaries = all_binaries[xl:xh]
ids = np.arange(total + xl, total + xh) + 1
x = RadiiUnknown2Arcsec(x, rad_units, distances)
y = RadiiUnknown2Arcsec(y ,rad_units, distances)
z = RadiiUnknown2Parsec(z, rad_units, distances)
distances += z
ras = x/3600. #decimal degrees
decs = y/3600. #decimal degrees
base_ra,base_dec = OffsetPosition(self.ra,self.dec,offset_ra,offset_dec)
decs += base_dec
idxg = np.where(decs>90.)
idxl = np.where(decs<-90.)
decs[idxg] = 180. - decs[idxg]
ras[idxg] = 180. + ras[idxg]
decs[idxl] = -180. - decs[idxl]
ras[idxl] = 180. + ras[idxl]
ras = (ras + base_ra)%360
apparent_rates = rates + (5.0 * np.log10(distances) - 5.0)
t = Table()
t['id'] = Column(data=ids, format="%8d")
t['ra'] = Column(data=ras, unit=u.deg, format="%17.9e")
t['dec'] = Column(data=decs, unit=u.deg, format="%17.9e")
t['distance'] = Column(data=distances, unit='pc', format="%17.9e")
t['age'] = Column(data=np.full_like(ids, age), unit=u.yr, format="%17d")
t['metallicity'] = Column(data=np.full_like(ras, met), format="%4.1f")
t['mass'] = Column(data=masses,unit=u.Msun, format="%17.9e")
t['teff'] = Column(data=temps, unit='K', format="%13.8f")
t['log_g'] = Column(data=gravs, format="%12.9f")
t['binary'] = Column(data=binaries, format="%3d")
t['dataset'] = Column(data=np.full_like(ids, datasets), format="%6d")
t['absolute'] = Column(data=rates,unit=u.mag, format="%14.6e")
t['apparent'] = Column(data=apparent_rates,unit=u.mag, format="%12.4e")
data_table.write_chunk(t)
del t
datasets += 1
total += num_stars
self._log("info","Done creating catalogue")
return outList
#-----------
def CreateGalaxies(self, gals, id=0):
"""
Generate galaxies list.
Output list will have these columns:
# ID
# RA
# DEC
# Redshift
# Model
# Age
# Profile
# Half-flux_radius
# Axial_ratio
# Position_angle
# Johnson,V absolute
# Johnson,V apparent
Parameters
----------
self: obj
Class instance.
gals: dictionary
Information about the galaxies. Includes:
n_gals: int
Number of galaxies
z_low,z_high: float
Minimum and maximum redshifts (converted to distances?).
rad_low,rad_high: float
Minimum and maximum galactic half-light radii (in arcseconds)
sb_v_low, sb_v_high: float
Minimum and maximum V-band average surface brightness within rad
distribution: string
Stellar distribution in the sky (e.g. power law, inverse power law, uniform, etc.)
clustered: bool
Cluster higher masses to centre?
radius: float
Radius in (units)
radius_units: string
Units of radius (above)
offset_ra,offset_dec: float
Offset of cluster from scene centre in mas
Returns
-------
outList: string
The catalogue file produced
"""
bc95_models = np.array(('a','b','c','d','e','f'))
bc95_ages = np.array(("10E5","25E5","50E5","76E5","10E6","25E6","50E6","10E7","50E7","10E8","50E8","10E9"))
out_file = "{}_gals_{:03d}.{}".format(self.prefix, id, self.cat_type)
outList = os.path.join(self.out_path, out_file)
if os.path.isfile(outList):
os.remove(outList) # No append
data_table = StipsDataTable.dataTableFromFile(outList)
# Write star list (overwrite)
self.logger.info("Creating catalogue %s",outList)
# Generate galaxy list
n_gals = int(gals['n_gals'])
z_l = float(gals['z_low'])
z_h = float(gals['z_high'])
r_l = float(gals['rad_low'])
r_h = float(gals['rad_high'])
m_l = float(gals['sb_v_low'])
m_h = float(gals['sb_v_high'])
distribution = gals['distribution']
clustered = gals['clustered']
radius = float(gals['radius'])
rad_units = gals['radius_units']
offset_ra = float(gals['offset_ra'])/3600. #offset in RA arcseconds, convert to degrees.
offset_dec = float(gals['offset_dec'])/3600. #offset in DEC arcseconds, convert to degrees.
self._log("info","Wrote preamble")
self._log("info","Parameters are: {}".format(gals))
ids = np.arange(n_gals)
# Roughly 50% spiral, 50% elliptical
ellipRatio = 0.5
# binoDist = np.random.RandomState(seed=self.seed).binomial(1, ellipRatio, n_gals)
binoDist = np.random.binomial(1, ellipRatio, n_gals)
idx_ellip = np.where(binoDist == 1)
idx_spiral = np.where(binoDist != 1)
types = np.array( ['expdisk'] * n_gals )
types[idx_ellip] = 'devauc'
n_ellip = len( idx_ellip[0] )
n_spiral = n_gals - n_ellip
# Axial ratio
# Spiral = 0.1 to 1
# Elliptical = 0.5 to 1
axialRatioSpiralMin, axialRatioSpiralMax = 0.1, 1.0
axialRatioEllipMin, axialRatioEllipMax = 0.5, 1.0
axials = np.zeros(n_gals)
# axials[idx_spiral] = np.random.RandomState(seed=self.seed).uniform(axialRatioSpiralMin, axialRatioSpiralMax, n_spiral)
# axials[idx_ellip] = np.random.RandomState(seed=self.seed).uniform(axialRatioEllipMin, axialRatioEllipMax, n_ellip)
axials[idx_spiral] = np.random.uniform(axialRatioSpiralMin, axialRatioSpiralMax, n_spiral)
axials[idx_ellip] = np.random.uniform(axialRatioEllipMin, axialRatioEllipMax, n_ellip)
# Position angle
posAngleAlgo = 'uniform'
# angles = np.random.RandomState(seed=self.seed).uniform(0.0, 359.9, n_gals)
angles = np.random.uniform(0.0, 359.9, n_gals)
# Half-flux radius - uniform
# rads = np.random.RandomState(seed=self.seed).uniform(r_l, r_h, n_gals)
rads = np.random.uniform(r_l, r_h, n_gals)
# Redshifts
# If both z_low and z_high are zero, do local galaxies. Distance is 0.5 Mpc -- 50 Mpc.
# In the future, offer an option for straight distance or redshift.
if z_l == 0. and z_h == 0.:
z_label = "distance"
# distances = np.random.RandomState(seed=self.seed).uniform(5.e5, 5.e7, n_gals)
distances = np.random.uniform(5.e5, 5.e7, n_gals)
zs = distances / 1.e3
convs = np.log10(distances)
else:
z_label = "redshift"
# zs = np.random.RandomState(seed=self.seed).uniform(z_l, z_h, n_gals)
zs = np.random.uniform(z_l, z_h, n_gals)
distances = np.array(cosmo.comoving_distance(zs).to(u.pc))
convs = np.log10(np.array(cosmo.luminosity_distance(zs).to(u.pc)))
# Luminosity function - power law
lumPow = -1.8
# vmags = np.random.RandomState(seed=self.seed).power(np.abs(lumPow)+1.0, size=n_gals)
vmags = np.random.power(np.abs(lumPow)+1.0, size=n_gals)
if lumPow < 0: vmags = 1.0 - vmags
vmags = RescaleArray(vmags, m_l, m_h)
vmags_abs = vmags - 5*(convs-1.)
# models = np.random.RandomState(seed=self.seed).choice(bc95_models,size=n_gals)
# ages = np.random.RandomState(seed=self.seed).choice(bc95_ages,size=n_gals)
models = np.random.choice(bc95_models,size=n_gals)
ages = np.random.choice(bc95_ages,size=n_gals)
self._log("info","Making Co-ordinates")
x,y = self._MakeCoords(n_gals,radius,func=distribution,scale=2.8)
x = RadiiUnknown2Arcsec(x,rad_units,distances)
y = RadiiUnknown2Arcsec(y,rad_units,distances)
if clustered:
self._log("info","Clustering")
x,y = self._CenterObjByMass(x,y,1/vmags)
self._log("info","Converting Co-ordinates into RA,DEC")
ras = x/3600. #decimal degrees
decs = y/3600. #decimal degrees
base_ra,base_dec = OffsetPosition(self.ra,self.dec,offset_ra,offset_dec)
decs += base_dec
idxg = np.where(decs>90.)
idxl = np.where(decs<-90.)
decs[idxg] = 180. - decs[idxg]
ras[idxg] = 180. + ras[idxg]
decs[idxl] = -180. - decs[idxl]
ras[idxl] = 180. + ras[idxl]
ras = (ras + base_ra)%360
metadata = {'type': 'bc95', 'id': id, 'n_gals': n_gals, 'z_l': z_l, 'z_h': z_h,
'radius_l': r_l, 'radius_h': r_h, 'sb_v_l': m_l, 'sb_v_h': m_h,
'distribution': distribution, 'clustered': clustered, 'radius': radius,
'radius_units': rad_units, 'offset_ra': offset_ra, 'offset_dec': offset_dec,
'name': 'Galaxy Population Table', 'bandpass': 'johnson,v'}
data_table.meta = metadata
t = Table()
t['id'] = Column(data=ids)
t['ra'] = Column(data=ras,unit=u.deg)
t['dec'] = Column(data=decs,unit=u.deg)
t[z_label] = Column(data=zs)
t['model'] = Column(data=models)
t['age'] = Column(data=ages,unit=u.yr)
t['profile'] = Column(data=types)
t['radius'] = Column(data=rads)
t['axial_ratio'] = Column(data=axials,unit=u.deg)
t['pa'] = Column(data=angles,unit=u.deg)
t['absolute_surface_brightness'] = Column(data=vmags_abs,unit=u.mag)
t['apparent_surface_brightness'] = Column(data=vmags,unit=u.mag)
data_table.write_chunk(t)
self._log("info","Done creating catalogue")
return outList
#-----------
def _CenterObjByMass(self, x, y, mass, z=None):
"""
Place slightly more massive stars near image center
to simulate mass segragation.
Parameters
----------
self: obj
Class instance.
x, y: array_like
Initial coordinates of object placement.
mass: array_like
Stellar masses.
z: array_like, optional
Initial z co-ordinates of object placement. If provided, return 3D co-ordinates.
Returns
-------
new_x, new_y, [new_z]: array_like
Re-ordered `x` and `y`.
"""
x_cen = 0.
y_cen = 0.
z_cen = 0.
n_stars = x.size
# Central coordinates will have smallest values
dx = x - x_cen
dy = y - y_cen
if z is not None:
dz = z - z_cen
d = np.sqrt(dy*dy + dx*dx + dz*dz)
else:
d = | np.sqrt(dy*dy + dx*dx) | numpy.sqrt |
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import pandas as pd
import numpy as np
import pickle
class Net(nn.Module):
def __init__(self, input_size):
super(Net, self).__init__()
self.dense1 = nn.Linear(input_size, 2000)
self.dense2 = nn.Linear(2000, 2000)
self.out = nn.Linear(2000, 1)
def forward(self, x):
x = F.relu(self.dense1(x))
x = F.relu(self.dense2(x))
x = F.relu(self.dense2(x))
x = torch.sigmoid(self.out(x))
return x
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Load and process the data
df = pd.read_csv('sequence_data.tsv',sep='\t')
with open('all','rb') as f:
test = pickle.load(f)
status = df.iloc[test].status
status = np.array(status)
#pos = 6931
#neg = 101232
#total = pos + neg
gen = np.load('genomic.npy')
gen = np.reshape(gen[test], (gen[test].shape[0], gen[test].shape[1], gen[test].shape[2]))
signal = np.load('signal.npy')
signal = | np.reshape(signal[test], (signal[test].shape[0], signal[test].shape[1], signal[test].shape[2])) | numpy.reshape |
# HaloFeedback
import warnings
from abc import ABC, abstractmethod
import matplotlib.pyplot as plt
import numpy as np
from scipy.integrate import simpson
from scipy.special import ellipeinc, ellipkinc, ellipe, betainc
from scipy.special import gamma as Gamma
from scipy.special import beta as Beta
# ------------------
G_N = 4.300905557082141e-3 # [(km/s)^2 pc/M_sun] [Legacy: 4.3021937e-3]
c = 299792.458 # [km/s] [Legacy: 2.9979e5]
# Conversion factors
pc_to_km = 3.085677581491367e13 # [km] [Legacy: 3.085677581e13]
# Numerical parameters
N_GRID = 10000 # Number of grid points in the specific energy.
N_KICK = 50 # Number of points to use for integration over Delta-epsilon. [Legacy: 50]
float_2eps = 2.0 * np.finfo(float).eps
# ------------------
def ellipeinc_alt(phi, m):
""" An alternative elliptic function that is valid for m > 1."""
beta = np.arcsin(np.clip(np.sqrt(m) * np.sin(phi), 0, 1))
return np.sqrt(m) * ellipeinc(beta, 1 / m) + ((1 - m) / np.sqrt(m)) * ellipkinc(beta, 1 / m)
class DistributionFunction(ABC):
"""
Base class for phase space distribution of a DM spike surrounding a black
hole with an orbiting body. Child classes must implement the following:
Methods
- rho_init(): initial density function
- f_init() initial phase-space distribution function
Attributes
- r_sp: DM halo extent [pc]. Used for making grids for the calculation.
- IDstr_model: ID string used for file names.
"""
def __init__(self, m1: float = 1e3, m2: float = 1.0, mDM: float = 0):
self.m1 = m1 # [M_sun]
self.m2 = m2 # [M_sun]
self.mDM = mDM # [M_sun]
self.r_isco = 6.0 * G_N * m1 / c ** 2
# Initialise grid of r, eps and f(eps) and append an extra loose grid far away.
self.r_grid = np.geomspace(self.r_isco, 1e5 * self.r_isco, int(0.9 *N_GRID))
self.r_grid = np.append(
self.r_grid, np.geomspace(1.01 * self.r_grid[-1], 1e3 * self.r_sp, int(0.1*N_GRID))
)
self.r_grid = np.sort(self.r_grid)
self.eps_grid = self.psi(self.r_grid)
self.f_eps = self.f_init(self.eps_grid)
# Density of states
self.DoS = (
np.sqrt(2) * (np.pi * G_N * self.m1) ** 3 * self.eps_grid ** (-5/2)
)
# Define a string which specifies the model parameters
# and numerical parameters (for use in file names etc.)
self.IDstr_num = "lnLambda=%.1f" % (np.log(np.sqrt(m2/m1)),)
@abstractmethod
def rho_init(self, r):
""" The initial dark matter density [M_sun/pc^3] of the system at distance r from the
halo center.
Parameters:
- r : distance [pc] from center of spike.
"""
pass
@abstractmethod
def f_init(self, eps):
""" The initial phase-space distribution function at energy eps.
Parameters
- eps : float or np.array Energy per unit mass in (km/s)^2
"""
pass
def plotDF(self):
""" Plots the initial and current distribution function of the spike. """
plt.figure()
plt.loglog(self.eps_grid, self.f_init(self.eps_grid), "k--", label = "Initial DF")
plt.loglog(self.eps_grid, self.f_eps)
plt.ylabel(r"$f(\mathcal{E})$ [$M_\odot$ pc$^{-3}$ (km/s)$^{-3}$]")
plt.xlabel(r"$\mathcal{E} = \Psi(r) - \frac{1}{2}v^2$ [(km/s)$^2$]")
plt.legend()
plt.show()
return plt.gca()
def psi(self, r: float) -> float:
""" The gravitational potential [km^2/s^2] at distance r [pc]."""
return G_N *self.m1 /r # [km^2/s^2]
def v_max(self, r: float) -> float:
""" The maximum velocity [km/s] allowed for bound orbits in the system at position r [pc]."""
return np.sqrt(2 * self.psi(r)) # [km/s]
def rho(self, r: float, v_cut: float = -1) -> float:
""" Returns the local density [M_sun/pc^3] of the dark matter particles at position
r [pc] from the halo center, that move slower than v_cut [km/s].
Parameters:
- r: The distance from the dark matter halo center.
- v_cut : maximum speed to include in density calculation
(defaults to v_max if not specified)
"""
if v_cut < 0: v_cut = self.v_max(r)
v_cut = np.clip(v_cut, 0, self.v_max(r))
vlist = np.sqrt(np.linspace(0, v_cut ** 2, 20000))
# Interpolate the integrand onto the new array vlist.
flist = np.interp(self.psi(r) - 0.5 * vlist ** 2,
self.eps_grid[::-1], self.f_eps[::-1],
left = 0, right = 0,
)
integ = vlist ** 2 * flist
return 4 * np.pi *simpson(integ, vlist) # [M_sun/pc^3]
def averageVelocity(self, r: float) -> float:
""" Returns the local average velocity [km/s] <u> from the velocity distribution of the
dark matter particles at position r [pc] from the halo center.
"""
v_cut = self.v_max(r)
# Interpolate the integrand onto the new array vlist.
v_cut = np.clip(v_cut, 0, self.v_max(r))
vlist = np.sqrt(np.linspace(0, v_cut**2, 250))
flist = np.interp(self.psi(r) -0.5 *vlist **2,
self.eps_grid[::-1], self.f_eps[::-1],
left = 0, right = 0,
)
integ = vlist ** 3 * flist
return np.sqrt(np.trapz(integ, vlist) / np.trapz(vlist ** 2 * flist, vlist)) # [km/s]
def averageSquaredVelocity(self, r: float) -> float:
""" Returns the local average squared velocity [km/s] <u^2> (or root mean squared velocity) from the velocity distribution of the
dark matter particles at position r [pc] from the halo center.
"""
v_cut = self.v_max(r)
# Interpolate the integrand onto the new array vlist.
v_cut = np.clip(v_cut, 0, self.v_max(r))
vlist = np.sqrt(np.linspace(0, v_cut**2, 250))
flist = np.interp(self.psi(r) -0.5 *vlist **2,
self.eps_grid[::-1], self.f_eps[::-1],
left = 0, right = 0,
)
integ = vlist ** 4 * flist
return np.sqrt(np.trapz(integ, vlist) / np.trapz(vlist ** 2 * flist, vlist)) # [km/s]
def velocityDispersion(self, r: float) -> float:
""" Returns the local velocity dispersion [km/s] from the velocity distribution of the dark matter
particles at position r [pc] from the halo center.
"""
u2 = self.averageSquaredVelocity(r)
u = self.averageSquaredVelocity(r)
return np.sqrt(u2 -u**2) # [km/s]
def m(self) -> float:
""" The total mass [M_sun] of the binary system. """
return self.m1 +self.m2 # [M_sun]
def mu(self) -> float:
""" The reduced mass [M_sun] of the binary system. """
return self.m1 *self.m2 /self.m() # [M_sun]
def totalMass(self) -> float:
""" The total mass of dark matter particles in the halo. """
return simpson(-self.P_eps(), self.eps_grid)
def totalEnergy(self) -> float:
""" The total energy of the dark matter halo. """
return simpson(-self.P_eps() * self.eps_grid, self.eps_grid)
def b_90(self, r2: float, Delta_u: float) -> float:
""" The impact parameter [pc] at which dark matter particles are deflected at a 90 degree angle.
Delta_u relative velocity of the orbiting body and dark matter particles, usually set at u_orb
of the companion object m2.
"""
return G_N *(self.m2 +self.mDM) / (Delta_u ** 2) # [pc]
def b_min(self, r2: float, v_orb: float) -> float:
""" The minimum impact parameter [pc] is the radius of the companion m2. """
return self.R/pc_to_km if self.R != -1 else 6.0 * G_N * self.m2/ c ** 2 # [pc]
def b_max(self, r2: float, v_orb: float = -1) -> float:
""" The maximum impact parameter [pc] as calculated from gravitational force equivalance O(sqrt(q)).
Parameters:
- r2 is the separation [pc] of the two components.
- v_orb is the instant velocity [km/s] of the orbiting body. If not specified, defaults to circular orbital velocity.
"""
if v_orb == -1: v_orb = np.sqrt(G_N * (self.m1 + self.m2) / r2) # [km/s]
return np.sqrt(self.m2/self.m1) *r2 # [pc]
def Lambda(self, r2: float, v_orb: float = -1) -> float:
""" The coulomb logarithm of the dynamical friction force induced by the dark matter particles.
Parameters:
- r2 is the separation [pc] of the two components.
- v_orb is the instant velocity [km/s] of the orbiting body. If not specified, defaults to circular orbital velocity.
"""
if v_orb == -1: v_orb = np.sqrt(G_N * (self.m1 + self.m2) / r2) # [km/s]
b90 = self.b_90(r2, v_orb) # [pc]
return np.sqrt((self.b_max(r2, v_orb)**2 +b90**2)/(self.b_min(r2, v_orb)**2 +b90**2))
def eps_min(self, r2: float, v_orb: float) -> float:
""" The minimum energy for the average delta_eps calculation in calc_delta_eps()."""
return 2 * v_orb ** 2 / (1 + self.b_max(r2, v_orb) ** 2 / self.b_90(r2, v_orb) ** 2)
def eps_max(self, r2: float, v_orb: float) -> float:
return 2 * v_orb ** 2 / (1 + self.b_min(r2, v_orb) ** 2 / self.b_90(r2, v_orb) ** 2)
def df(self, r2: float, v_orb: float, v_cut: float = -1) -> np.array:
"""The change of the distribution function f(eps) during an orbit.
Parameters:
- r2 is the radial position [pc] of the perturbing body.
- v_orb is the orbital velocity [km/s] of the perturbing body.
- v_cut (optional), only scatter with particles slower than v_cut [km/s]
defaults to v_max(r) (i.e. all particles).
"""
df_minus = self.df_minus(r2, v_orb, v_cut, N_KICK)
df_plus = self.df_plus(r2, v_orb, v_cut, N_KICK)
# TODO: What is this meant for?
N_plus = 1 # np.trapz(self.DoS*f_plus, self.eps_grid)
N_minus = 1 # np.trapz(-self.DoS*f_minus, self.eps_grid)
return df_minus + df_plus *(N_minus/N_plus)
def dfdt(self, r2: float, v_orb: float, v_cut: float = -1) -> np.array:
"""Time derivative of the distribution function f(eps).
Parameters:
- r2 is the radial position [pc] of the perturbing body.
- v_orb is the orbital velocity [km/s] of the perturbing body.
- v_cut (optional), only scatter with particles slower than v_cut [km/s]
defaults to v_max(r) (i.e. all particles).
"""
T_orb = self.T_orb(r2) # [s]
return self.df(r2, v_orb, v_cut) /T_orb
def delta_f(self, r0: float, v_orb: float, dt: float, v_cut: float = -1) -> np.array:
"""[Deprecated] This shouldn't be used in new applications. TODO: Remove?
Change in f over a time-step dt where it is automatically
adjusted to prevent f_eps from becoming negative.
Parameters:
- r2 is the radial position [pc] of the perturbing body.
- v_orb is the orbital velocity [km/s] of the perturbing body.
- dt: time-step [s]
- v_cut (optional), only scatter with particles slower than v_cut [km/s]
defaults to v_max(r) (i.e. all particles).
"""
f_minus = self.dfdt_minus(r0, v_orb, v_cut, N_KICK) * dt
# Don't remove more particles than there are particles...
correction = np.clip(self.f_eps / (-f_minus + 1e-50), 0, 1)
f_minus = np.clip(f_minus, -self.f_eps, 0)
f_plus = self.dfdt_plus(r0, v_orb, v_cut, N_KICK, correction) * dt
return f_minus + f_plus
def P_delta_eps(self, r: float, v: float, delta_eps: float) -> float:
""" Calcuate PDF for delta_eps. """
norm = self.b_90(r, v) ** 2 / (self.b_max(r, v) ** 2 - self.b_min(r, v) ** 2)
return 2 * norm * v ** 2 / (delta_eps ** 2)
def P_eps(self):
"""Calculate the PDF d{P}/d{eps}"""
return (
np.sqrt(2)
* np.pi ** 3
* (G_N * self.m1) ** 3
* self.f_eps
/ self.eps_grid ** 2.5
)
def calc_delta_eps(self, r: float, v: float, n_kick: int = 1) -> list:
""" Calculate average delta_eps integrated over different bins (and the corresponding
fraction of particles which scatter with that delta_eps).
"""
eps_min = self.eps_min(r, v)
eps_max = self.eps_max(r, v)
norm = self.b_90(r, v) ** 2 / (self.b_max(r, v) ** 2 - self.b_min(r, v) ** 2)
eps_edges = np.linspace(eps_min, eps_max, n_kick + 1)
def F_norm(eps):
return -norm * 2 * v ** 2 / (eps)
def F_avg(eps):
return -norm * 2 * v ** 2 * np.log(eps)
frac = np.diff(F_norm(eps_edges))
eps_avg = np.diff(F_avg(eps_edges)) / frac
return eps_avg, frac
def dEdt_DF(self, r: float, v_orb: float = -1, v_cut: float = -1, average: bool = False) -> float:
"""Rate of change of energy due to DF (km/s)^2 s^-1 M_sun.
Parameters:
- r is the radial position of the perturbing body [pc]
- v_orb the velocity [km/s] of the body, when not given assume circular Keplerian orbits.
- v_cut (optional), only scatter with particles slower than v_cut [km/s]
defaults to v_max(r) (i.e. all particles)
- average determines whether to average over different radii
(average = False is default and should be correct).
"""
if v_orb < 0: v_orb = np.sqrt(G_N * (self.m1 + self.m2) / r) # [km/s]
if average:
warnings.warn(
"Setting 'average = True' is not necessarily the right thing to do..."
)
r_list = r + np.linspace(-1, 1, 3) * self.b_max(r, v_orb)
rho_list = np.array([self.rho(r1, v_cut) for r1 in r_list])
rho_eff = np.trapz(rho_list * r_list, r_list) / np.trapz(r_list, r_list)
else:
rho_eff = self.rho(r, v_cut)
return 4 *np.pi * G_N **2 * self.m2 *(self.m2 +self.mDM) * rho_eff * np.log(self.Lambda(r, v_orb)) / v_orb /pc_to_km # [km]
def E_orb(self, a: float) -> float:
""" The orbital energy of the binary system at semi-major axis [pc]. """
return -0.5 * G_N * (self.m1 + self.m2) / a
def T_orb(self, a: float) -> float:
""" The orbital period of the binary system at semi-major axis [pc]. """
return (2 * np.pi * np.sqrt(pc_to_km ** 2 * a ** 3 / (G_N * (self.m1 + self.m2))) ) # [s]
def interpolate_DF(self, eps_old, correction = 1):
""" Internal function for interpolating the DF on df_plus calculations. """
# Distribution of particles before they scatter
if hasattr(correction, "__len__"):
f_old = np.interp(
eps_old[::-1],
self.eps_grid[::-1],
self.f_eps[::-1] * correction[::-1],
left=0,
right=0,
)[::-1]
else:
f_old = np.interp(
eps_old[::-1], self.eps_grid[::-1], self.f_eps[::-1], left=0, right=0
)[::-1]
return f_old
def delta_eps_of_b(self, r2: float, v_orb: float, b: float) -> float:
""" The change of energy based on the impact parameter of the scattering. """
b90 = self.b_90(r2, v_orb) # [pc]
return -2 * v_orb ** 2 * (1 + b**2 / b90**2) ** -1
# ---------------------
# ----- df/dt ----
# ---------------------
def df_minus(self, r0: float, v_orb: float, v_cut: float = -1, n_kick: int = 1) -> np.array:
"""Particles to remove from the distribution function at energy E. """
if v_cut < 0: v_cut = self.v_max(r0)
df = np.zeros(N_GRID)
# Calculate sizes of kicks and corresponding weights for integration
if n_kick == 1: # Replace everything by the average if n_kick = 1
delta_eps_list = (
-2 * v_orb ** 2 * np.log(1 + self.Lambda(r0, v_orb) ** 2) / self.Lambda(r0, v_orb) ** 2,
)
frac_list = (1,)
else:
b_list = np.geomspace(self.b_min(r0, v_orb), self.b_max(r0, v_orb), n_kick)
delta_eps_list = self.delta_eps_of_b(r0, v_orb, b_list)
# Step size for trapezoidal integration
step = delta_eps_list[1:] - delta_eps_list[:-1]
step = np.append(step, 0)
step = np.append(0, step)
# Make sure that the integral is normalised correctly
renorm = np.trapz(self.P_delta_eps(r0, v_orb, delta_eps_list), delta_eps_list)
frac_list = 0.5 * (step[:-1] + step[1:]) / renorm
# Sum over the kicks
for delta_eps, b, frac in zip(delta_eps_list, b_list, frac_list):
# Define which energies are allowed to scatter
mask = (self.eps_grid > self.psi(r0) * (1 - b / r0) - 0.5 * v_cut ** 2) & (
self.eps_grid < self.psi(r0) * (1 + b / r0)
)
r_eps = G_N * self.m1 / self.eps_grid[mask]
r_cut = G_N * self.m1 / (self.eps_grid[mask] + 0.5 * v_cut ** 2)
L1 = np.minimum((r0 - r0 ** 2 / r_eps) / b, 0.999999)
alpha1 = np.arccos(L1)
L2 = np.maximum((r0 - r0 ** 2 / r_cut) / b, -0.999999)
alpha2 = np.arccos(L2)
m = (2 * b / r0) / (1 - (r0 / r_eps) + b / r0)
mask1 = (m <= 1) & (alpha2 > alpha1)
mask2 = (m > 1) & (alpha2 > alpha1)
N1 = np.zeros(len(m))
if np.any(mask1):
N1[mask1] = ellipe(m[mask1]) - ellipeinc(
(np.pi - alpha2[mask1]) / 2, m[mask1]
)
if np.any(mask2):
N1[mask2] = ellipeinc_alt((np.pi - alpha1[mask2]) / 2, m[mask2])
df[mask] += (
-frac
* self.f_eps[mask]
* (1 + b ** 2 / self.b_90(r0, v_orb) ** 2) ** 2
* np.sqrt(1 - r0 / r_eps + b / r0)
* N1
)
norm = (
2
* np.sqrt(2 * (self.psi(r0)))
* 4
* np.pi ** 2
* r0
* (self.b_90(r0, v_orb) ** 2 / (v_orb) ** 2)
)
result = norm * df / self.DoS
result[self.eps_grid >= 0.9999 *self.psi(self.r_isco)] *= 0
return result
def df_plus(self, r0: float, v_orb: float, v_cut: float = -1, n_kick: int = 1, correction = 1) -> np.array:
"""Particles to add back into distribution function from E - dE -> E. """
if v_cut < 0: v_cut = self.v_max(r0)
df = np.zeros(N_GRID)
# Calculate sizes of kicks and corresponding weights for integration
if n_kick == 1: # Replace everything by the average if n_kick = 1
delta_eps_list = (
-2 * v_orb ** 2 * np.log(1 + self.Lambda(r0, v_orb) ** 2) / self.Lambda(r0, v_orb) ** 2,
)
frac_list = (1,)
else:
b_list = np.geomspace(self.b_min(r0, v_orb), self.b_max(r0, v_orb), n_kick)
delta_eps_list = self.delta_eps_of_b(r0, v_orb, b_list)
# Step size for trapezoidal integration
step = delta_eps_list[1:] - delta_eps_list[:-1]
step = | np.append(step, 0) | numpy.append |
import numpy as np
import bethy_fapar as fapar
class photosynthesis():
def __init__(self):
'''
Class initialisation and setup of parameters
'''
# zero C in K
self.zeroC = 273.15
# gas constant J mol-1 K-1
self.R_gas = 8.314
# Minimum of maximum carboxylation rate [10^(-6) mol/(m^2 s)]
self.minOfMaxCarboxrate = 1e-12
# Minimum stomatal conductance [mol H2O /(m^2 s)]
self.minStomaConductance = 0.0
# oxygen concentration
self.Ox = 0.21 # mol(O2)mol(air)-1
# energy content of PAR quanta
self.EPAR = 220. # kJmol-1
# photon capture efficiency
self.alpha = 0.28
# maximum Michaelis-Menton values for CO2
self.KC0 = 460.e-6 # mol(CO2)mol(air)-1
# maximum Michaelis-Menton values for O2
self.KO0 = 330.e-3 # mol(O2)mol(air)-1
# activation energy for KC
self.EC = 59396. # J mol-1
# activation energy for KO
self.EO = 35948. # J mol-1
# activation energy for VCMAX
self.EV = 58520. # J mol-1
# activation energy for dark respiration
self.ER = 45000. # J mol-1
# Q10=2 (Collatz et al. 1992)
self.EK = 50967.
# ratio of dark respiration to PVM at 25 C
self.FRDC3 = 0.011
self.FRDC4 = 0.042
# scaling for GammaStar
self.GammaStarScale = 1.7e-6
# Effective quantum efficiency C4
self.ALC4 = 0.04
# Curvature parameter (C4)
self.Theta = 0.83
self.molarMassAir_kg = 28.97e-3
self.molarMassCO2_kg = 44.011e-3
# LAI limit used in N scaling
self.LaiLimit = 3.
def calc_nitrogen_scaling_factors(self,zlai,layer_bounds,declination,latitude):
'''
'''
factors = np.ones((layer_bounds.size,zlai.size))
cos_zenith_noon = np.cos(declination)*np.cos(latitude) \
+ np.sin(declination)*np.sin(latitude)
ww = np.where(cos_zenith_noon < 1e-3)
cos_zenith_noon[ww] = 1e-3
# Extinction factor
k12 = 0.5 / cos_zenith_noon
# Condition: LAI>LaiLimit
ww = np.where(zlai >= self.LaiLimit)
for i in range(ayer_bounds.size):
factors[i,:] = np.exp(-k12 * layer_bounds[i] * zlai.flatten())
return factors
def assimilate(self,delta_time,mask,cos_zenith,declination,latitude,\
swdown, par, frac_par_direct, pressure,\
canopy_temp, soil_albedo, CO2_concentration_air,\
canopy_conductance, lai, waterLimitationFlag):
'''
'''
# Expresse radiation in mol(photons) / (m^2 s)
swdown_mol = swdown/self.EPAR
# soil reflectivity is set to soil albedo of the visible range
soil_reflectivity_par = soil_albedo
# canopy_boundaries_lai
canopy_boundaries_lai = np.arange(ncanopy)/float(ncanopy)
# calculate nitrogen scaling factors
nitrogen_scaling_factors = self.calc_nitrogen_scaling_factors(lai,\
canopy_boundaries_lai,\
declination,\
latitude)
(laiPerLayer,fAPAR) = fapar.faparl(mask,ncanopy,lai,soil_reflectivity_par,cos_zenith,frac_par_direct,\
canopy_boundaries_lai)
# Compute absorbed PAR per leaf area in canopy layer [units: (absorbed photons) / (m^2(leaf area) s)] from
# par and fraction of absorbed PAR (Epar is needed to convert radiation intensity from W/m^2 to mol/(m^2 s))
apar_acc = np.zeros_like(faPAR)
lai_ = laiPerLayer*1.
ww = np.where(lai_ < 1.e-10)
lai_[ww] = 1.e-10
for icanopy in range(ncanopy):
apar_layer = (par/Epar)*faPAR[icanopy]/lai_[icanopy]
apar_acc += (par/Epar)*faPAR[icanopy]*delta_time
# Convert CO2 mass mixing ratio [kg/kg] to volume mixing ratio [mol/mol]
CO2_concentration_mol = CO2_concentration_air * self.molarMassAir_kg / self.molarMassCO2_kg
# estimate CO2 leaf conc
CO2_conc_leaf = self.FCI1C3*CO2_concentration_mol
self.photosynthesis(C3Flag,waterLimitationFlag,PAR,PIRRIN,P,T,CO2_concentration_mol,\
NSCL,ETransport,CarboxRate,Ci,Gs)
def photosynthesis(self,C3Flag,waterLimitedFlag,PAR,PIRRIN,P,T,Atm_co2_conc,\
NSCL,ETransport,CarboxRate,\
Ci,Gs):
'''
Farquar et al. 1980 C3 photosynthesis
args:
C3Flag : True if C3, False for C4
waterLimited : flags to indicate water limited or not
PAR : Absorbed PAR mol(photons) m-2 s-1
PIRRIN : Total irridiance at the surface mol m-2 s-1
P : air pressure (Pa)
T : vegetation (leaf) temperature (K)
Atm_co2_conc : Atmospheric CO2 conc.
NSCL : Nitrogen scaling factor at maximum
carboxylation rate and maximum
electron transport rate
ETransport
: The maximum rate of electron transport
at 25 C for each PFT (mol(CO2) m-2 s-1)
CarboxRate
: The maximum carboxilation rate at 25 C
(micro mol(CO2) m-2 s-1)
Ci : CO2 concentration inside leaf mol(CO2) mol(air)-1
Gs : Stomatal conductance (use for water-limited)
Returns:
(A,Diagnostics) : A = gross assimilation
'''
# return None if no data
if C3Flag.size == 0:
return None
# work out which are C3 and C4
wC3 = np.where(C3Flag)
wC4 = np.where(not C3Flag)
# process C3
if wC3.sum():
(A3,C3diagnostics) = self.photosynthesisC3(PAR[wC3],PIRRIN[wC3],P[wC3],T[wC3],Atm_co2_conc[wC3],\
NSCL[wC3],ETransport[wC3],CarboxRate[wC3],\
Ci[wC3],Gs[wC3],waterLimited[wC3])
else:
A3 = np.array([])
C3diagnostics = {}
# process C4
if wC4.sum():
(A4,C4diagnostics) = self.photosynthesisC4(PAR[wC4],PIRRIN[wC4],P[wC4],T[wC4],Atm_co2_conc[wC4],\
NSCL[wC4],ETransport[wC4],CarboxRate[wC4],\
Ci[wC4],Gs[wC4],waterLimited[wC4])
else:
A4 = np.array([])
C4diagnostics = {}
# combine
A = np.zeros_like(C3Flag).astype(float)
A[C3Flag] = A3
A[not C3Flag] = A4
self.Diagnostics = {}
keys = np.unique(np.array(C3diagnostics.keys() + C4diagnostics.keys()))
for k in keys:
self.Diagnostics[k] = np.zeros_like(A)
try:
self.Diagnostics[k][wC3] = C3diagnostics[k]
except:
pass
try:
self.Diagnostics[k][wC4] = C4diagnostics[k]
except:
pass
self.Diagnostics['C3Flag'] = C3Flag
self.Diagnostics['waterLimited'] = waterLimited
return (A,self.Diagnostics)
def photosynthesisC4(self,PAR,PIRRIN,P,T,Atm_co2_conc,\
NSCL,ETransport,CarboxRate,\
Ci,Gs,waterLimited):
'''
Similar to C3 case, but
For C4 plants the Farquhar equations are replaced by the set of equations of
Collatz et al. 1992:
args:
PAR : Absorbed PAR mol(photons) m-2 s-1
PIRRIN : Total irridiance at the surface mol m-2 s-1
P : air pressure (Pa)
T : vegetation (leaf) temperature (K)
Atm_co2_conc : Atmospheric CO2 conc.
NSCL : Nitrogen scaling factor at maximum
carboxylation rate and maximum
electron transport rate
ETransport
: The maximum rate of electron transport
at 25 C for each PFT (mol(CO2) m-2 s-1)
CarboxRate
: The maximum carboxilation rate at 25 C
(micro mol(CO2) m-2 s-1)
Ci : CO2 concentration inside leaf mol(CO2) mol(air)-1
Gs : Stomatal conductance
waterLimited : flags for water limited or not
Returns:
(A,Diagnostics) : A = gross assimilation
'''
# T1 = 25 C in K
T1 = 25. + self.zeroC
# T0 is veg temperature relative tp 25 C
T0 = T - T1
# TC is the temperatrure in C
TC = T - self.zeroC
# K is the PECase CO2 specifity instead of the electron transport capacity
# within C3 plants
K = ETransport * 1.e3 * NSCL \
* np.exp(self.EK * T0 / T1 / self.R_gas / T)
# VCMAX : : assume N content, therefore Rubisco is placed
# where most incoming light is
# NB .. this is a structural consideration
VCMAX = CarboxRate * NSCL * np.exp(self.EV * T0 / T1 / self.R_gas / T)
# dark respiration (mol(CO2)m-2s-1)
Rd = self.FRDC4 * CarboxRate * NSCL \
* np.exp(self.ER * T0 / T1 / self.R_gas / T) \
* highTInhibit(TC) \
* darkInhibit(PIRRIN)
# C4 gross photosynthesis at given Ci
J0 = (self.ALC4 * PAR + VCMAX) / 2. / self.Theta
Je = J0 - np.sqrt(J0*J0 - VCMAX * self.ALC4 * PAR / self.Theta)
Jc = np.zeros_like(Rd)
A = np.zeros_like(Rd)
waterLimit = np.where(waterLimited)
notWaterLimit = np.where(not waterLimited)
if notWaterLimit.sum() > 0:
Ci_ = Ci[notWaterLimit]
TC_ = TC[notWaterLimit]
Rd_ = Rd[notWaterLimit]
Atm_co2_conc_ = Atm_co2_conc[notWaterLimit]
P_ = P[notWaterLimit]
T_ = T[notwaterLimit]
K_ = K[notWaterLimit]
Je_ = Je[notWaterLimit]
Jc_ = K_ * Ci_
# assimilation is the minimum of Je and Jc
# with a high temperature inhibition
# mol(CO2)m-2s-1
A_ = Je_
ww = np.where(Jc_ < Je_)
A_[ww] = Jc_[ww]
A_ = A_ * highTInhhibit(TC_)
# stomatal conductance
Gs_ = 1.6 * (A_-Rd_) * self.R_gas * T_/ (Atm_co2_conc_ - Ci_) / P_
ww = np.where(Gs_ < self.minStomaConductance)
Gs_[ww] = self.minStomaConductance
Gs[notWaterLimit] = Gs_
Jc[notWaterLimit] = Jc_
A[notWaterLimit] = A_
else:
# water limted, so Gs is defined and Ci must be calculated
Gs_ = Gs[waterLimit]
TC_ = TC[waterLimit]
Rd_ = Rd[waterLimit]
Atm_co2_conc_ = Atm_co2_conc[waterLimit]
P_ = P[waterLimit]
T_ = T[waterLimit]
K_ = K[waterLimit]
Je_ = Je[waterLimit]
G0 = Gs_ / 1.6 / self.R_gas / T_ * P_
Jc_ = (G0 * Atm_co2_conc_ + Rd_)/(1. + G0/K_)
ww = np.where(Jc_ < 0)
Jc_[ww] = 0.
# assimilation is the minimum of Je and Jc
# with a high temperature inhibition
# mol(CO2)m-2s-1
A_ = Je_
ww = np.where(Jc_ < Je_)
A_[ww] = Jc_[ww]
A_ = A_ * highTInhhibit(TC)
maxer1 = A_ - Rd_
maxer2 = G0
ww = np.where(G0<1e-6)
maxer2[ww] = 1e-6
maxer = maxer1/maxer2
ww = | np.where(maxer < 0) | numpy.where |
import numpy as np
from mpi4py import MPI
import os
from SIMP import TO_SIMP, make_Conn_matrix
import time
from keras.models import load_model
from bayesian_optimization import kriging, ExImp, corr_matrix
def get_void(nely,nelx):
v=np.zeros((nely,nelx))
R=min(nely,nelx)/15
loc=np.array([[1/3, 1/4], [2/3, 1/4],[ 1/3, 1/2], [2/3, 1/2], [1/3 , 3/4], [2/3, 3/4]])
loc=loc*np.array([[nely,nelx]])
for i in range(nely):
for j in range(nelx):
v[i,j]=R-np.min(np.sqrt(np.sum((loc-np.array([[i+1,j+1]]))**2,1)));
v=v>0
return v
def evaluate_design(Z,Decoder,volfrac,Iar,cMat,void,opt_it,typ):
beta=0.05
epsilon_1=1
epsilon_2=0.25
nelx=90
nely=45
penal=3
E0=1
nu=0.3
max_move=0.25
X=Decoder.predict(Z)
if typ=='sdf':
X=np.clip(X+0.5,0,1)
(n,nely,nelx)=X.shape
avoid=np.zeros((1,nely,nelx))
C=np.zeros(n)
X_out=np.zeros(X.shape)
for i in range(n):
X_out[i,:,:], _ = TO_SIMP(X[i,:,:] , volfrac, penal, beta, epsilon_1, max_move, E0, nu, Iar, cMat, False, void, avoid, 0, opt_it)
## Enforce a design with sparse densities
X_out[i,:,:], C[i] = TO_SIMP(X_out[i,:,:], volfrac, penal, beta, epsilon_2, max_move, E0, nu, Iar, cMat, True , void, avoid, 0, 10)
return X_out,C
## Implent multiprocessing, with size processors, where rank is the one currently executing this file
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
## Load the decoded latent space over which optimization is to be performed
Decoder_model='Sample_data/edim=100_pre=1_AAE=1_surr=1_sdf_case=3_Decoder.h5'
Decoder=load_model(Decoder_model)
## Set type of encoding
typ=Decoder_model[-21:-18] ## Only works for case<10
## Set number of optimization steps in latent space
opt_it=25
## Get problem dimensionality
[nely,nelx]=Decoder.output_shape[1:]
## Set parameters needed for cost function (Compliance minimization)
volfrac=0.4
Iar, cMat= make_Conn_matrix(nelx,nely)
void=get_void(nely, nelx)
## Get the dimensionality of the latent space over which optimization is to be performed
edim=Decoder.input_shape[1]
## Set the parameters for differential evolution (needed for ptimization of aqusition function)
multiplyer=0.6
prob_change=0.9
## Perform the Bayesian optimization
## Set the number of initial samples
train_samples=10*edim
train_samples_perrank=int(train_samples/size)
train_samples=train_samples_perrank*size
## Initialize Gaussian process model parameters
p=np.zeros(edim)
theta=np.ones(edim)
## Set number of itertions
bayes_iteration=edim*3
comm.Barrier()
start_time=time.time()
## Generate the number of training samples and evaluate them
Z0_rank=np.random.rand(train_samples_perrank,edim)
X0_rank,C0_rank=evaluate_design(Z0_rank,Decoder,volfrac,Iar,cMat,void,opt_it,typ)
## Share the samples across processor cores/ranks
if rank==0:
Z0_rec=np.empty((size,train_samples_perrank,edim))
X0_rec=np.empty((size,train_samples_perrank,nely,nelx))
C0_rec=np.empty((size,train_samples_perrank))
else:
Z0_rec=None
X0_rec=None
C0_rec=None
comm.Barrier()
comm.Gather(Z0_rank,Z0_rec,root=0)
comm.Gather(X0_rank,X0_rec,root=0)
comm.Gather(C0_rank,C0_rec,root=0)
if rank==0:
Z0=Z0_rec.reshape((train_samples,edim))
X0=X0_rec.reshape((train_samples,nely,nelx))
C0=C0_rec.reshape((train_samples,1))
else:
Z0=None
X0=None
C0=None
Z0=comm.bcast(Z0,root=0)
X0=comm.bcast(X0,root=0)
C0=comm.bcast(C0,root=0)
## Start the iterative optimization process
for ib in range(bayes_iteration):
if rank==0:
print(' Iteration {}'.format(ib))
start_time_it=time.time()
start_time_its=time.time()
## That the weight to focus on ...
if ib<bayes_iteration-100:
## ...exploration
weight_explore=10
else:
## ...exploitation
weight_explore=0
## Set number of optimization steps when generation Gaussian process model parameters
if ib==0:
k_iter=50
## Get Gaussian process model parameters
theta,p=kriging(Z0, C0,rank,size,[],k_iter)
else:
if np.mod(ib,100)==0:
k_iter=50
else:
k_iter=10
para_old=np.concatenate((theta,p-0.5),0)
## Get Gaussian process model parameters
theta,p=kriging(Z0, C0,rank,size,para_old,k_iter)
## Get the Gaussian process model corealation matrix for optimized parameters
K=corr_matrix(Z0, theta[:,np.newaxis], p[:,np.newaxis])[:,:,0]
## Get the inverse of the correlation matrix (adapt it when the matrix is singular)
inverse_failed=True
while inverse_failed:
try:
Kinv=np.linalg.inv(K)
inverse_failed=False
except np.linalg.LinAlgError:
K=K-np.identity(len(C0))*1e-4
stop_time_its=time.time()
if rank==0:
time_needed=stop_time_its-start_time_its
print(' Time needed for model training: {:10.1f}s'.format(time_needed))
start_time_its=time.time()
## Optimize aqusition function using differential evolution
EI_num_pop_perrank=int(np.ceil(2.5*edim/size))
EI_num_pop=size*EI_num_pop_perrank
## Initial generation is generated and evaluated
Zei_rank=np.random.rand(EI_num_pop_perrank,edim)
EI_rank=ExImp(Zei_rank, theta, p, Z0, C0, Kinv, weight_explore)
## Initial generation is shared over all ranks
if rank==0:
Zei_rec=np.empty((size,EI_num_pop_perrank,edim))
EI_rec=np.empty((size,EI_num_pop_perrank))
else:
Zei_rec=None
EI_rec=None
comm.Barrier()
comm.Gather(Zei_rank,Zei_rec,root=0)
comm.Gather(EI_rank,EI_rec,root=0)
if rank==0:
Zei=Zei_rec.reshape((EI_num_pop,edim))
EI=EI_rec.reshape(EI_num_pop)
else:
Zei=None
EI=None
Zei=comm.bcast(Zei,root=0)
EI=comm.bcast(EI,root=0)
loop_ei=0
loop_ei_max=500
## Generations are evolved
while loop_ei<loop_ei_max:
Zei_rank=Zei[rank*EI_num_pop_perrank:(rank+1)*EI_num_pop_perrank,:]
EI_rank=EI[rank*EI_num_pop_perrank:(rank+1)*EI_num_pop_perrank]
## Reproduction between differnt individuals from the population is perforemd
test_case=np.floor(np.random.rand(EI_num_pop_perrank,3)*(EI_num_pop-1e-7)).astype('int')
Za_rank=np.copy(Zei[test_case[:,0],:])
Zb_rank=np.copy(Zei[test_case[:,1],:])
Zc_rank=np.copy(Zei[test_case[:,2],:])
Zcom_rank=Za_rank+multiplyer*(Zb_rank-Zc_rank)
## Crossover between child and parent is performed
prob=np.random.rand(EI_num_pop_perrank,edim)
Zcom_rank[prob>prob_change]=np.copy(Zei_rank[prob>prob_change])
## Boundaries of design are enforced
Zcom_rank[Zcom_rank<0]=0
Zcom_rank[Zcom_rank>1]=1
## Selection between child (has to be evaluated first) and parent is performed
EI_compare=ExImp(Zcom_rank, theta, p, Z0, C0, Kinv, weight_explore)
EI_rank=np.minimum(EI_rank,EI_compare)
Zei_rank[EI_compare<=EI_rank,:]=Zcom_rank[EI_compare<=EI_rank,:]
## New population is shared between all ranks
if rank==0:
Zei_rec=np.empty((size,EI_num_pop_perrank,edim))
EI_rec= | np.empty((size,EI_num_pop_perrank)) | numpy.empty |
# This computes 1 / pi by tossing needles
import random
import numpy as np
import matplotlib.pyplot as plt
import time
from decimal import Decimal
N = int(input("How many needles would you like to toss? "))
start_time = time.time()
needle_to_line_ratio = 0.5
iter = | np.arange(N) | numpy.arange |
# To import required modules:
import numpy as np
import time
import os
import sys
import matplotlib
import matplotlib.cm as cm #for color maps
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec #for specifying plot attributes
from matplotlib import ticker #for setting contour plots to log scale
import scipy.integrate #for numerical integration
import scipy.misc #for factorial function
from scipy.special import erf #error function, used in computing CDF of normal distribution
import scipy.interpolate #for interpolation functions
import corner #corner.py package for corner plots
#matplotlib.rc('text', usetex=True)
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))))
from src.functions_general import *
from src.functions_compare_kepler import *
from src.functions_load_sims import *
from src.functions_plot_catalogs import *
from src.functions_plot_params import *
savefigures = False
savefigures_directory = '/Users/hematthi/Documents/GradSchool/Research/ExoplanetsSysSim_Clusters/Figures/Model_Optimization/AMD_system/Split_stars/Singles_ecc/Params11_KS/durations_norm_circ_singles_multis_GF2020_KS/Best_models/GP_med/'
##### To load the underlying populations:
loadfiles_directory = '/Users/hematthi/Documents/GradSchool/Research/ACI/Simulated_Data/AMD_system/Split_stars/Singles_ecc/Params11_KS/Distribute_AMD_per_mass/durations_norm_circ_singles_multis_GF2020_KS/GP_med/' #Lognormal_mass_Earthlike_rocky/
run_number = ''
N_sim, cos_factor, P_min, P_max, radii_min, radii_max = read_targets_period_radius_bounds(loadfiles_directory + 'periods%s.out' % run_number)
param_vals_all = read_sim_params(loadfiles_directory + 'periods%s.out' % run_number)
sssp_per_sys, sssp = compute_summary_stats_from_cat_phys(file_name_path=loadfiles_directory, run_number=run_number, load_full_tables=True)
##### To load some mass-radius tables:
# NWG-2018 model:
MR_table_file = '../../data/MRpredict_table_weights3025_R1001_Q1001.txt'
with open(MR_table_file, 'r') as file:
lines = (line for line in file if not line.startswith('#'))
MR_table = np.genfromtxt(lines, names=True, delimiter=', ')
# Li Zeng models:
MR_earthlike_rocky = np.genfromtxt('../../data/MR_earthlike_rocky.txt', names=['mass','radius']) # mass and radius are in Earth units
MR_pure_iron = np.genfromtxt('../../data/MR_pure_iron.txt', names=['mass','radius']) # mass and radius are in Earth units
# To construct an interpolation function for each MR relation:
MR_NWG2018_interp = scipy.interpolate.interp1d(10.**MR_table['log_R'], 10.**MR_table['05'])
MR_earthlike_rocky_interp = scipy.interpolate.interp1d(MR_earthlike_rocky['radius'], MR_earthlike_rocky['mass'])
MR_pure_iron_interp = scipy.interpolate.interp1d(MR_pure_iron['radius'], MR_pure_iron['mass'])
# To find where the Earth-like rocky relation intersects with the NWG2018 mean relation (between 1.4-1.5 R_earth):
def diff_MR(R):
M_NWG2018 = MR_NWG2018_interp(R)
M_earthlike_rocky = MR_earthlike_rocky_interp(R)
return np.abs(M_NWG2018 - M_earthlike_rocky)
# The intersection is approximately 1.472 R_earth
radii_switch = 1.472
# IDEA 1: Normal distribution for rho centered around Earth-like rocky, with a sigma_rho that grows with radius
# To define sigma_rho such that log10(sigma_rho) is a linear function of radius:
rho_earthlike_rocky = rho_from_M_R(MR_earthlike_rocky['mass'], MR_earthlike_rocky['radius']) # mean density (g/cm^3) for Earth-like rocky as a function of radius
rho_pure_iron = rho_from_M_R(MR_pure_iron['mass'], MR_pure_iron['radius']) # mean density (g/cm^3) for pure iron as a function of radius
sigma_rho_at_radii_switch = 3. # std of mean density (g/cm^3) at radii_switch
sigma_rho_at_radii_min = 1. # std of mean density (g/cm^3) at radii_min
rho_radius_slope = (np.log10(sigma_rho_at_radii_switch)-np.log10(sigma_rho_at_radii_min)) / (radii_switch - radii_min) # dlog(rho)/dR; slope between radii_min and radii_switch in log(rho)
sigma_rho = 10.**( rho_radius_slope*(MR_earthlike_rocky['radius'] - radii_min) + np.log10(sigma_rho_at_radii_min) )
# IDEA 2: Lognormal distribution for mass centered around Earth-like rocky, with a sigma_log_M that grows with radius
# To define sigma_log_M as a linear function of radius:
sigma_log_M_at_radii_switch = 0.3 # std of log_M (Earth masses) at radii_switch
sigma_log_M_at_radii_min = 0.04 # std of log_M (Earth masses) at radii_min
sigma_log_M_radius_slope = (sigma_log_M_at_radii_switch - sigma_log_M_at_radii_min) / (radii_switch - radii_min)
sigma_log_M = sigma_log_M_radius_slope*(MR_earthlike_rocky['radius'] - radii_min) + sigma_log_M_at_radii_min
##### To make mass-radius plots:
afs = 20 #axes labels font size
tfs = 20 #text labels font size
lfs = 16 #legend labels font size
bins = 100
# Density vs. radius for new model based on Li Zeng's Earth-like rocky:
fig = plt.figure(figsize=(8,8))
plot = GridSpec(4, 1, left=0.15, bottom=0.1, right=0.98, top=0.98, wspace=0, hspace=0)
ax = plt.subplot(plot[0,:]) # sigma_rho vs. radius
plt.plot(MR_earthlike_rocky['radius'], sigma_rho, color='orange', ls='-', lw=3, label=r'Linear $\log(\sigma_\rho)$ vs $R_p$')
plt.gca().set_yscale("log")
ax.tick_params(axis='both', labelsize=afs)
plt.xticks([])
plt.yticks([1., 2., 3., 4., 5.])
ax.yaxis.set_major_formatter(ticker.ScalarFormatter())
ax.yaxis.get_major_formatter().set_scientific(False)
ax.yaxis.get_major_formatter().set_useOffset(False)
plt.xlim([radii_min, radii_switch])
plt.ylim([0.9, 4.])
plt.ylabel(r'$\sigma_\rho$ ($g/cm^3$)', fontsize=tfs)
plt.legend(loc='upper left', bbox_to_anchor=(0.01,0.99), ncol=1, frameon=False, fontsize=lfs)
ax = plt.subplot(plot[1:,:]) # rho vs. radius
plt.plot(MR_pure_iron['radius'], rho_pure_iron, color='r', ls='--', lw=3, label='Pure iron')
plt.plot(MR_earthlike_rocky['radius'], rho_earthlike_rocky, color='orange', ls='--', lw=3, label='Earth-like rocky')
plt.fill_between(MR_earthlike_rocky['radius'], rho_earthlike_rocky - sigma_rho, rho_earthlike_rocky + sigma_rho, color='orange', alpha=0.5, label=r'Earth-like rocky $\pm \sigma_\rho$')
plt.fill_between(MR_earthlike_rocky['radius'], rho_earthlike_rocky - 2.*sigma_rho, rho_earthlike_rocky + 2.*sigma_rho, color='orange', alpha=0.3, label=r'Earth-like rocky $\pm 2\sigma_\rho$')
plt.fill_between(MR_earthlike_rocky['radius'], rho_earthlike_rocky - 3.*sigma_rho, rho_earthlike_rocky + 3.*sigma_rho, color='orange', alpha=0.1, label=r'Earth-like rocky $\pm 3\sigma_\rho$')
plt.axhline(y=1., color='c', lw=3, label='Water density (1 g/cm^3)')
plt.gca().set_yscale("log")
ax.tick_params(axis='both', labelsize=afs)
plt.minorticks_off()
plt.yticks([1., 2., 3., 4., 5., 7., 10., 15.])
ax.yaxis.set_minor_formatter(ticker.ScalarFormatter())
ax.yaxis.set_major_formatter(ticker.ScalarFormatter())
ax.yaxis.get_major_formatter().set_scientific(False)
ax.yaxis.get_major_formatter().set_useOffset(False)
plt.xlim([radii_min, radii_switch])
plt.ylim([0.9, 20.])
plt.xlabel(r'$R_p$ ($R_\oplus$)', fontsize=tfs)
plt.ylabel(r'$\rho$ ($g/cm^3$)', fontsize=tfs)
plt.legend(loc='lower right', bbox_to_anchor=(0.99,0.01), ncol=1, frameon=False, fontsize=lfs)
if savefigures:
plt.savefig(savefigures_directory + 'Density_radius.pdf')
plt.close()
plt.show()
# Mass vs. radius:
fig = plt.figure(figsize=(16,8))
plot = GridSpec(5, 5, left=0.1, bottom=0.1, right=0.98, top=0.98, wspace=0, hspace=0)
ax = plt.subplot(plot[1:,:4])
masses_all = sssp_per_sys['mass_all'][sssp_per_sys['mass_all'] > 0.]
radii_all = sssp_per_sys['radii_all'][sssp_per_sys['radii_all'] > 0.]
corner.hist2d(np.log10(radii_all), np.log10(masses_all), bins=50, plot_density=True, contour_kwargs={'colors': ['0.6','0.4','0.2','0']}, data_kwargs={'color': 'k'})
plt.plot(MR_table['log_R'], MR_table['05'], '-', color='g', label='Mean prediction (NWG2018)')
plt.fill_between(MR_table['log_R'], MR_table['016'], MR_table['084'], color='g', alpha=0.5, label=r'16%-84% (NWG2018)')
plt.plot(MR_table['log_R'], np.log10(M_from_R_rho(10.**MR_table['log_R'], rho=5.51)), color='b', label='Earth density (5.51 g/cm^3)')
plt.plot(MR_table['log_R'], np.log10(M_from_R_rho(10.**MR_table['log_R'], rho=3.9)), color='m', label='Mars density (3.9 g/cm^3)')
plt.plot(MR_table['log_R'], np.log10(M_from_R_rho(10.**MR_table['log_R'], rho=1.)), color='c', label='Water density (1 g/cm^3)')
plt.plot(MR_table['log_R'], np.log10(M_from_R_rho(10.**MR_table['log_R'], rho=7.9)), color='r', label='Iron density (7.9 g/cm^3)')
plt.plot(MR_table['log_R'], np.log10(M_from_R_rho(10.**MR_table['log_R'], rho=100.)), color='k', label='100 g/cm^3')
plt.plot(np.log10(MR_earthlike_rocky['radius']), np.log10(MR_earthlike_rocky['mass']), color='orange', ls='--', lw=3, label='Earth-like rocky')
#plt.fill_between(np.log10(MR_earthlike_rocky['radius']), np.log10(M_from_R_rho(MR_earthlike_rocky['radius'], rho=rho_earthlike_rocky-sigma_rho)), np.log10(M_from_R_rho(MR_earthlike_rocky['radius'], rho=rho_earthlike_rocky+sigma_rho)), color='orange', alpha=0.5, label=r'16%-84% ($\rho \sim \mathcal{N}(\rho_{\rm Earthlike\:rocky}, \sigma_\rho(R_p))$)') #label=r'$\rho \sim \mathcal{N}(\rho_{\rm Earthlike\:rocky}, 10^{[\frac{d\log\rho}{dR_p}(R_p - 0.5) + \log{\rho_0}]})$'
plt.fill_between(np.log10(MR_earthlike_rocky['radius']), np.log10(MR_earthlike_rocky['mass']) - sigma_log_M, np.log10(MR_earthlike_rocky['mass']) + sigma_log_M, color='orange', alpha=0.5, label=r'16%-84% ($\log{M_p} \sim \mathcal{N}(M_{p,\rm Earthlike\:rocky}, \sigma_{\log{M_p}})$)')
plt.plot(np.log10(MR_pure_iron['radius']), np.log10(MR_pure_iron['mass']), color='r', ls='--', lw=3, label='Pure iron')
#plt.axvline(x=np.log10(0.7), color='k', ls='--', lw=3)
plt.axvline(x=np.log10(radii_switch), color='k', ls='--', lw=3)
ax.tick_params(axis='both', labelsize=afs)
xtick_vals = np.array([0.5, 1., 2., 4., 10.])
ytick_vals = np.array([1e-1, 1., 10., 1e2])
plt.xticks(np.log10(xtick_vals), xtick_vals)
plt.yticks(np.log10(ytick_vals), ytick_vals)
plt.xlim([np.log10(radii_min), np.log10(radii_max)])
plt.ylim([np.log10(0.07), 2.])
plt.xlabel(r'$R_p$ ($R_\oplus$)', fontsize=tfs)
plt.ylabel(r'$M_p$ ($M_\oplus$)', fontsize=tfs)
plt.legend(loc='lower right', bbox_to_anchor=(0.99,0.01), ncol=1, frameon=False, fontsize=lfs)
ax = plt.subplot(plot[0,:4]) # top histogram
plt.hist(radii_all, bins=np.logspace(np.log10(radii_min), np.log10(radii_max), bins+1), histtype='step', color='k', ls='-', label=r'All')
#plt.axvline(x=0.7, color='k', ls='--', lw=3)
plt.axvline(x=radii_switch, color='k', ls='--', lw=3)
plt.gca().set_xscale("log")
plt.xlim([radii_min, radii_max])
plt.xticks([])
plt.yticks([])
plt.legend(loc='upper right', bbox_to_anchor=(0.99,0.99), ncol=1, frameon=False, fontsize=lfs)
ax = plt.subplot(plot[1:,4]) # side histogram
plt.hist(masses_all, bins=np.logspace(np.log10(0.07), 2., bins+1), histtype='step', orientation='horizontal', color='k', ls='-', label='All')
radii_cut = radii_switch
plt.hist(masses_all[radii_all > radii_cut], bins=np.logspace(np.log10(0.07), 2., bins+1), histtype='step', orientation='horizontal', color='b', ls='-', label=r'$R_p > %s R_\oplus$' % radii_cut)
plt.hist(masses_all[radii_all < radii_cut], bins=np.logspace( | np.log10(0.07) | numpy.log10 |
#!/usr/bin/env python3
"""
Investigate DSC data.
Created on Fri Sep 13 12:44:01 2019
@author: slevy
"""
import dsc_extract_physio
import nibabel as nib
import numpy as np
import os
import matplotlib.pyplot as plt
import scipy.signal
import scipy.stats
import pydicom
from matplotlib import cm
from lmfit.models import GaussianModel
from datetime import datetime
import warnings
def extract_signal_within_roi(image, mask):
if len(image.shape) > 3:
nrep = image.shape[3]
s_along_reps = np.zeros((nrep))
s_along_reps_by_slice = np.zeros((nrep, image.shape[2]))
for i_rep in range(nrep):
img_rep_i = image[:, :, :, i_rep]
s_along_reps[i_rep] = np.mean(img_rep_i[mask > 0])
for i_z in range(image.shape[2]):
s_along_reps_by_slice[i_rep, i_z] = np.mean(img_rep_i[mask[:, :, i_z] > 0, i_z])
return s_along_reps, s_along_reps_by_slice
else:
s_whole_mask = np.mean(image[mask > 0])
s_by_slice = np.zeros((image.shape[2]))
for i_z in range(image.shape[2]):
s_by_slice[i_z] = np.mean(image[mask[:, :, i_z] > 0, i_z])
return s_whole_mask, s_by_slice
# def detect_outliers(signal, time):
#
# # thresholds for detection
# sd_t = np.std(signal[1:]) # first point is always outlier
# mean_baseline = np.mean(signal[0, 1:12])
#
#
# # find outliers =================================================================================
# signal_reptimes = np.vstack((s_along_reps, reps_acqtime))
# signal_reptimes_outliers = np.zeros((2, 1))
# signal_reptimes_outliers[:, 0] = signal_reptimes[:, 0] # save the first point as outlier because it is always corrupted in those data
# signal_reptimes_without_outliers = signal_reptimes[:, 1:] # remove the first point which is always corrupted with this sequence
#
# # if above 3 standard-deviation it is an outlier
# idx_outliers = np.where(np.abs(signal_reptimes_without_outliers[0, :] - mean_baseline) >= 3*sd_t) # find indexes of outliers
# signal_reptimes_outliers = np.hstack((signal_reptimes_outliers, signal_reptimes_without_outliers[:, idx_outliers[0]])) # save the detected outliers
# signal_reptimes_without_outliers = np.delete(signal_reptimes_without_outliers, idx_outliers, axis=1) # remove the outliers
# # by slice
# s_along_reps_by_slice = np.delete(s_along_reps_by_slice, 0, axis=0) # first point is always outlier
# sd_t_by_slice = np.std(s_along_reps_by_slice, axis=0) # temporal SD for each slice
# s_along_reps_by_slice_without_outliers = [] # [[signal, acqtimes], [,], [,] ]
# for i_z in range(dsc.shape[2]):
# idx_outliers_z_i = np.where(np.abs(s_along_reps_by_slice[:, i_z] - np.mean(s_along_reps_by_slice[0:11, i_z])) >= 3 * sd_t_by_slice[i_z]) # find indexes of outliers
# s_along_reps_by_slice_without_outliers.append([np.delete(s_along_reps_by_slice[:, i_z], idx_outliers_z_i), np.delete(signal_reptimes[1, 1:], idx_outliers_z_i)])
#
# return idx_outliers, signal_without_outliers, signal_outliers, time_without_outliers_time_outliers
def smooth_signal(signal, baseline_nb=10, windowLength=23, outPlotFname=''):
"""
Smooth signal.
:param signal: MRI signal, already regridded to a regular sampling
:param time:
:param baseline_nb:
:param increase_res_factor:
:return:
"""
# first point is always an outlier (and a NaN actually because of the TReff normalization)
# --> replace it by the mean signal at baseline
signal[0] = np.mean(signal[1:baseline_nb])
# # interpolate signal on regular grid
# t_regular_sampling = np.linspace(np.min(time), np.max(time), increase_res_factor * len(time))
# signal_interp = np.interp(t_regular_sampling, time, signal)
# replace
# signal_interp_smoothed = scipy.signal.savgol_filter(signal_interp, window_length=25, polyorder=3)
signal_smoothed = scipy.signal.savgol_filter(signal, window_length=windowLength, polyorder=5, mode='constant', cval=signal[0])
if outPlotFname:
# plot results
fig, ((ax1)) = plt.subplots(1, 1, figsize=(20, 9.5))
ax1.set_title('Final signal smoothing')
ax1.set_xlabel('Points')
ax1.plot(np.arange(signal.size), signal, label='original signal', color='black', lw=0.3, marker='+')
ax1.plot(np.arange(signal.size), signal_smoothed, label='smoothed signal', color='tab:blue', lw=0.3, marker='o', fillstyle='none')
ax1.legend()
ax1.grid()
fig.savefig(outPlotFname)
plt.close()
return signal_smoothed
def smoothlyCropSignal(mriSignalRegrid, firstPassStartRepRegrid, firstPassEndRepRegrid, injRepRegrid, outPlotFname=''):
"""
:param mriSignalRegrid:
:param baselineLastRepRegrid:
:param firstPassEndRepRegrid:
:param outPlotFname:
:return: mriSignalCropSmooth: signal cropped before first pass start and after first pass end with smooth transitions
mriSignalCropEndSmooth_forAIF: signal cropped only after half time of first pass (start time + (end time -
start time)/2) with smooth transition, to be used for AIF detection
"""
# calculate the baseline before and after contrast agent first pass
baselineBefore = np.mean(mriSignalRegrid[0:firstPassStartRepRegrid])
baselineAfter = np.mean(mriSignalRegrid[firstPassEndRepRegrid:-1])
# replace them in original signal
mriSignalCrop = np.copy(mriSignalRegrid)
mriSignalCrop[0:firstPassStartRepRegrid] = baselineBefore
mriSignalCrop[firstPassEndRepRegrid:-1] = baselineAfter
# crop larger for AIF detection
mriSignalCropEnd_forAIF = np.copy(mriSignalRegrid)
firstPassMiddleRep = int(np.ceil(firstPassStartRepRegrid + (firstPassEndRepRegrid - firstPassStartRepRegrid)/2))
mriSignalCropEnd_forAIF[0:injRepRegrid] = baselineBefore
mriSignalCropEnd_forAIF[firstPassMiddleRep:-1] = baselineAfter
# smooth whole signal to avoid sharp transitions
mriSignalCropSmooth = scipy.signal.savgol_filter(mriSignalCrop, window_length=25, polyorder=3, mode='nearest')
mriSignalCropEndSmooth_forAIF = scipy.signal.savgol_filter(mriSignalCropEnd_forAIF, window_length=25, polyorder=3, mode='nearest')
if outPlotFname:
# plot results
fig, ((ax1, ax2)) = plt.subplots(2, 1, figsize=(20, 9.5))
ax1.set_title('Final smooth & crop of signal')
ax1.set_xlabel('Points')
ax1.plot(np.arange(mriSignalRegrid.size), mriSignalRegrid, label='original signal', color='black', lw=0.7, marker='+')
ax1.plot(np.arange(mriSignalRegrid.size), mriSignalCrop, label='cropped signal', color='tab:blue', lw=0.7, marker='.')
ax1.plot(np.arange(mriSignalRegrid.size), mriSignalCropSmooth, label='smoothly cropped signal', color='tab:red', lw=0.7, marker='.')
ax1.axvline(x=firstPassStartRepRegrid, label='first pass start', color='green', lw=1)
ax1.axvline(x=firstPassEndRepRegrid, label='first pass end', color='red', lw=1)
ax1.legend()
ax1.grid()
ax2.set_title('Final smooth & crop of signal for AIF detection')
ax2.set_xlabel('Points')
ax2.plot(np.arange(mriSignalRegrid.size), mriSignalRegrid, label='original signal', color='black', lw=0.7, marker='+')
ax2.plot(np.arange(mriSignalRegrid.size), mriSignalCropEnd_forAIF, label='cropped signal', color='tab:blue', lw=0.7, marker='.')
ax2.plot(np.arange(mriSignalRegrid.size), mriSignalCropEndSmooth_forAIF, label='smoothly cropped signal', color='tab:red', lw=0.7, marker='.')
ax2.axvline(x=firstPassStartRepRegrid, label='first pass start', color='green', lw=1)
ax2.axvline(x=firstPassEndRepRegrid, label='first pass end', color='red', lw=1)
ax2.axvline(x=firstPassMiddleRep, label='first pass middle', color='orange', lw=1)
ax2.legend()
ax2.grid()
fig.savefig(outPlotFname)
plt.close()
return mriSignalCropSmooth, mriSignalCropEndSmooth_forAIF
def plot_signal_vs_TReff(effTR, signal, time, ofname=''):
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(20, 9.7))
ax1.set_xlabel('Effective TR (ms)')
ax1.set_ylabel('Signal')
ax1.set_title("Signal vs effective TR: (Pearson\'s R, p-value)={}".format(tuple(np.round(scipy.stats.pearsonr(effTR[1:], signal[1:]), decimals=4))))
ax1.grid(which='both')
ax1.plot(effTR, signal, linewidth=0, marker='+', markersize=5.0)
ax2.set_xlabel('$1 - e^{-TR_{eff}/T_{1}} (TR_{eff}\\ in\\ ms)$')
ax2.set_ylabel('Signal')
pearsonr, pval = scipy.stats.pearsonr(1 - np.exp(-effTR[1:]/1251.0), signal[1:])
ax2.set_title("Signal vs $1 - e^{-TR_{eff}/T_{1}}$: (Pearson\'s R, p-value)=(%.4f, %.4f)" % (pearsonr, pval))
ax2.grid(which='both')
ax2.plot(1 - np.exp(-effTR/1251.0), signal, linewidth=0, marker='+', markersize=5.0)
ax3.set_xlabel('Time (ms)')
ax3.set_ylabel('Signal')
ax3.set_title("Signal and effective TR vs time")
ax3.grid(which='both')
ax3.plot(time/1000, signal, linewidth=1.0, marker='+', markersize=7.0)
ax3_effTR = ax3.twinx()
ax3_effTR.plot(time/1000, effTR, linewidth=1.0, marker='+', markersize=7.0, color='orange')
ax3_effTR.tick_params(axis='y', labelcolor='orange')
ax3_exp_effTR = ax3.twinx()
ax3_exp_effTR.plot(time/1000, 1 - np.exp(-effTR/1251.0), linewidth=1.0, marker='+', markersize=7.0, color='green')
ax3_exp_effTR.tick_params(axis='y', labelcolor='green')
ax4.set_xlabel('Time (ms)')
ax4.set_ylabel('Signal', color='green')
ax4.set_title("Signal vs time")
ax4.grid(which='both')
signal_norm_exp = np.divide(signal, (1 - np.exp(-effTR/1251.0)))
ax4.plot(time/1000, signal_norm_exp, linewidth=1, marker='+', markersize=7.0, color='green', label='norm by $1 - e^{-TR_{eff}/T_{1}}$: COV='+str(round(100*np.std(signal_norm_exp[1:])/np.mean(signal_norm_exp[1:]), 2))+'%')
ax4.tick_params(axis='y', color='green')
ax4.legend(loc="lower left")
ax4_effTR = ax4.twinx()
signal_norm_TR = np.divide(signal, effTR)
ax4_effTR.plot(time/1000, signal_norm_TR, linewidth=1, marker='+', markersize=7.0, color='orange', label='norm by $TR_{eff}$: COV='+str(round(100*np.std(signal_norm_TR[1:])/np.mean(signal_norm_TR[1:]), 2))+'%')
ax4_effTR.set_ylabel('Signal', color='orange')
ax4_effTR.tick_params(axis='y', color='orange')
ax4_effTR.legend(loc="lower right")
ax4_rawsignal = ax4.twinx()
ax4_rawsignal.plot(time/1000, signal, linewidth=1, marker='+', markersize=7.0, color='blue', label='raw signal: COV='+str(round(100*np.std(signal[1:])/np.mean(signal[1:]), 2))+'%')
ax4_rawsignal.set_ylabel('Signal', color='blue')
ax4_rawsignal.tick_params(axis='y', color='blue')
ax4_rawsignal.legend(loc="upper right")
plt.show(block=True)
if ofname:
fig.savefig(ofname+'_signal_vs_TReff.png')
def plot_signal_vs_time(time, signal, time_interp, signal_interp, signal_smoothed, signal_by_slice_smoothed, ofname='', baseline_nb=50):
fig, (axtime, axtime_norm) = plt.subplots(1, 2, figsize=(20, 9.5))
plt.subplots_adjust(wspace=0.2, left=0.05, right=0.95)
axtime.set_xlabel('Time (s)')
axtime.set_ylabel('Signal')
axtime.grid(which='both')
axtime.plot(time/1000., signal, marker='+', linewidth=0.2, color='black', label='raw signal')
# baseline and injection
axtime.axvline(x=time[baseline_nb+1]/1000, linestyle='-', label='injection', color='red', linewidth=1.0)
axtime.axhline(y=np.mean(signal[0:baseline_nb]), linestyle='-', label='baseline', color='gray', alpha=0.7, linewidth=3.0)
axtime.legend()
# display repetition numbers on top x-axis
time_ticks_locs = axtime.get_xticks()
reps_nb_interp_to_time_ticks_locs = np.interp(time_ticks_locs, time / 1000, range(len(time)))
axreps = axtime.twiny()
axreps.plot(time/1000., signal, marker='', linewidth=0) # fake plot to get the same x-axis
axreps.set_xticklabels(np.round(reps_nb_interp_to_time_ticks_locs, decimals=0).astype(int))
axreps.set_xlabel('Repetition number')
# add effective TR
axTR = axtime.twinx()
axTR.plot(time / 1000, np.append(np.nan, np.diff(time)), color='orange', linewidth=0.8, label='effective TR')
axTR.set_ylabel('Effective TR (ms)', color='orange')
axTR.tick_params(axis='y', labelcolor='orange')
# plot normalized signal
axtime_norm.set_xlabel('Time (s)')
axtime_norm.set_ylabel('Signal')
axtime_norm.grid(which='both')
axtime_norm.plot(time_interp/1000., signal_interp, linestyle='-', color='gray', alpha=0.7, label='S$_{inter}$')
axtime_norm.plot(time_interp/1000., signal_smoothed, linestyle='-', color='green', label='S$_{smoothed}$')
# plt.plot(signal_reptimes_outliers[1, :]/1000., signal_reptimes_outliers[0, :], marker='+', linewidth=0., color='red', label='outliers')
# plot by slice
colors = [cm.jet(slc) for slc in np.linspace(0.0, 1.0, signal_by_slice_smoothed.shape[1])]
for iz, color in enumerate(colors):
axtime_norm.plot(time/1000., signal_by_slice_smoothed[:, iz], label="z="+str(iz), color=color, lw=1, marker='.', ms=2.)
axtime_norm.axvline(x=time[baseline_nb+1]/1000, linestyle='-', label='injection', color='red', linewidth=1.0)
axtime_norm.legend()
# # plot physio on the same graph but different scale
# ax_physio = plt.gca().twinx()
# ax_physio.plot(time/1000., physio_values, marker=None, color='cyan', label='pulseOx')
if ofname:
fig.savefig(ofname+'_signal_vs_time.png')
def extract_acqtime_and_physio(log_fname, nrep_nii, physioplot_out_fname=''):
# pulseOx ----------------------------
if os.path.exists(log_fname+'.puls'):
time_puls, puls_values, epi_acqtime_puls, epi_event_puls, acq_window_puls = dsc_extract_physio.read_physiolog(log_fname+'.puls', sampling_period=20) # extract physio signal
reps_table_puls, slice_table_puls = dsc_extract_physio.sort_event_times(epi_acqtime_puls, epi_event_puls) # sort event times
if physioplot_out_fname:
dsc_extract_physio.plot_physio(time_puls, puls_values, epi_acqtime_puls, reps_table_puls, acq_window_puls, physioplot_out_fname+'_pulseOx') # plot physio signal
# calculate acquisition time for each rep
nrep_pulseOxLog = np.sum(reps_table_puls[:, 1])
if nrep_nii != nrep_pulseOxLog:
os.error('Number of repetitions in image is different from the number of repetitions recorded in pulseOx physiolog.')
reps_acqtime_pulseOx = np.squeeze(np.mean(slice_table_puls[np.where(reps_table_puls[:, 1] == 1), :], axis=2))
else:
reps_acqtime_pulseOx = 900*np.arange(0, nrep_nii)
time_puls = np.linspace(np.min(reps_acqtime_pulseOx), np.max(reps_acqtime_pulseOx), int((np.max(reps_acqtime_pulseOx) - np.min(reps_acqtime_pulseOx))/20000))
puls_values = None
# respiration ----------------------------
if os.path.exists(log_fname+'.resp'):
time_resp, resp_values, epi_acqtime_resp, epi_event_resp, acq_window_resp = dsc_extract_physio.read_physiolog(log_fname+'.resp', sampling_period=20) # extract physio signal
reps_table_resp, slice_table_resp = dsc_extract_physio.sort_event_times(epi_acqtime_resp, epi_event_resp) # sort event times
if physioplot_out_fname:
dsc_extract_physio.plot_physio(time_resp, resp_values, epi_acqtime_resp, reps_table_resp, acq_window_resp, physioplot_out_fname+'_resp') # plot physio signal
# calculate acquisition time for each rep
nrep_respLog = np.sum(reps_table_resp[:, 1])
if nrep_nii != nrep_respLog:
os.error('Number of repetitions in image is different from the number of repetitions recorded in respiration physiolog.')
reps_acqtime_resp = np.squeeze(np.mean(slice_table_resp[np.where(reps_table_resp[:, 1] == 1), :], axis=2))
else:
reps_acqtime_resp = 900*np.arange(0, nrep_nii)
time_resp = np.linspace(np.min(reps_acqtime_resp), np.max(reps_acqtime_resp), int((np.max(reps_acqtime_resp) - np.min(reps_acqtime_resp))/20000))
resp_values = None
return reps_acqtime_pulseOx, time_puls, puls_values, reps_acqtime_resp, time_resp, resp_values
def extract_acqtime_and_physio_by_slice(log_fname, nSlices, nAcqs, acqTime_firstImg, TR=1000):
"""
:param log_fname:
:param nSlices:
:param nAcqs:
:return: repsAcqTime: ((SC+all slices) x Nacq x (PulseOx, Resp)
timePhysio: N_pulseOx_points x ((PulseOx, Resp)
valuesPhysio: N_pulseOx_points x ((PulseOx, Resp)
"""
# repsAcqTime: ((SC+all slices) x Nacq x (PulseOx, Resp)
# timePhysio: N_pulseOx_points x ((PulseOx, Resp)
# valuesPhysio: N_pulseOx_points x ((PulseOx, Resp)
repsAcqTime = np.zeros((1+nSlices, nAcqs, 2))
# pulseOx ----------------------------
if os.path.exists(log_fname+'.puls'):
print('Processing pulseOx log: '+log_fname+'.puls')
if 'slr' in os.path.basename(log_fname):
print('\t[\'slr\'-type physiolog]')
time_puls, puls_values, epi_acqtime_puls, epi_event_puls, acq_window_puls = dsc_extract_physio.read_physiolog(log_fname+'.puls', sampling_period=20) # extract physio signal
reps_table_puls, slices_table_puls = dsc_extract_physio.sort_event_times(epi_acqtime_puls, epi_event_puls) # sort event times
nrep_pulseOxLog = np.sum(reps_table_puls[:, 1])
if nAcqs != nrep_pulseOxLog:
os.error('Number of repetitions in image is different from the number of repetitions recorded in pulseOx physiolog.')
# get acquisition time for each slice
repsAcqTime[1:, :, 0] = np.squeeze(slices_table_puls[np.where(reps_table_puls[:, 1] == 1), :]).T
else:
print('\t[\'CMRR\'-type physiolog]')
time_puls, trigger_start_times_puls, trigger_end_times_puls, puls_values, acq_window_puls, acqStartTime_puls = dsc_extract_physio.read_physiolog_cmrr(log_fname+'.puls')
triggerStartTimes_imgOnly_puls = dsc_extract_physio.extract_acqTimes_cmrr(trigger_start_times_puls, acqTime_firstImg, acqStartTime_puls, trigger_end_times_puls)
repsAcqTime[1:, :, 0] = np.tile(triggerStartTimes_imgOnly_puls, (nSlices, 1)) + np.tile(TR/nSlices * np.arange(0, nSlices), (nAcqs, 1)).T
else:
print('\nNo log found for pulseOx.')
repsAcqTime[1:, :, 0] = TR*np.tile(np.arange(0, nAcqs), (nSlices, 1)) + np.tile(TR/nSlices*np.arange(0, nSlices), (nAcqs, 1)).T
time_puls = np.arange(np.min(repsAcqTime), np.max(repsAcqTime), step=20)
puls_values = None
# take the mean acquisition time across slices for the whole rep (SC)
repsAcqTime[0, :, 0] = np.mean(repsAcqTime[1:nSlices, :, 0], axis=0)
# respiration ----------------------------
if os.path.exists(log_fname+'.resp'):
print('Processing respiration log: '+log_fname+'.resp')
if 'slr' in os.path.basename(log_fname):
print('\t[\'slr\'-type physiolog]')
time_resp, resp_values, epi_acqtime_resp, epi_event_resp, acq_window_resp = dsc_extract_physio.read_physiolog(log_fname+'.resp', sampling_period=20) # extract physio signal
reps_table_resp, slices_table_resp = dsc_extract_physio.sort_event_times(epi_acqtime_resp, epi_event_resp) # sort event times
nrep_respLog = np.sum(reps_table_resp[:, 1])
if nAcqs != nrep_respLog:
os.error('Number of repetitions in image is different from the number of repetitions recorded in respiration physiolog.')
# get acquisition time for each slice
repsAcqTime[1:, :, 1] = np.squeeze(slices_table_resp[np.where(reps_table_resp[:, 1] == 1), :]).T
else:
print('\t[\'CMRR\'-type physiolog]')
time_resp, trigger_start_times_resp, trigger_end_times_resp, resp_values, acq_window_resp, acqStartTime_resp = dsc_extract_physio.read_physiolog_cmrr(log_fname+'.resp')
else:
print('\nNo log found for respiration.\n')
repsAcqTime[1:, :, 1] = TR*np.tile(np.arange(0, nAcqs), (nSlices, 1)) + np.tile(TR/nSlices*np.arange(0, nSlices), (nAcqs, 1)).T
time_resp = np.arange(np.min(repsAcqTime), np.max(repsAcqTime), step=20)
resp_values = None
# take the mean acquisition time across slices for the whole rep (SC)
repsAcqTime[0, :, 1] = np.mean(repsAcqTime[1:nSlices, :, 1], axis=0)
# merge the two physiological signal into one array each (for time and physio values)
if time_puls.size > time_resp.size:
time_resp = np.hstack((time_resp, time_puls[time_resp.size:]))
resp_values = np.pad(resp_values, (0, puls_values.size - resp_values.size), 'reflect')
elif time_puls.size < time_resp.size:
time_puls = np.hstack((time_puls, time_resp[time_puls.size:]))
puls_values = np.pad(puls_values, (0, resp_values.size - puls_values.size), 'reflect')
timePhysio = np.vstack((time_puls, time_resp)).T
valuesPhysio = np.vstack((puls_values, resp_values)).T
return repsAcqTime, timePhysio, valuesPhysio
def plot_pulseOx_and_resp(pulseTime, pulseVal, pulseAcqTimes, respTime, respVal, respAcqTime, ofname=''):
fig, ((ax1)) = plt.subplots(1, 1, figsize=(20, 9.5))
ax1.plot(pulseTime, pulseVal, color='red', label='PulseOx signal')
ax1.plot(respTime, respVal, color='blue', label='Respiration signal')
for acqtime in pulseAcqTimes:
ax1.axvline(x=acqtime, ymin=0, ymax=.5, color='red', lw=0.8, label='reps' if np.where(pulseAcqTimes==acqtime)[0][0] == 0 else "_nolegend_")
for acqtime in respAcqTime:
ax1.axvline(x=acqtime, ymin=.5, ymax=1, color='blue', lw=0.8, label='reps' if np.where(respAcqTime==acqtime)[0][0] == 0 else "_nolegend_")
ax1.legend()
ax1.grid()
fig.show()
if ofname:
ax1.set_title('Saved to: ' + ofname + '.png')
fig.savefig(ofname+'.png')
plt.close()
def plot_signal_vs_resp(respTime, respSignal, mriTime, mriSignal, ofname=''):
# interpolate respiration signal to MRI signal sampling
respSignalSampledToMRISignal = np.interp(mriTime, respTime, respSignal)
# remove points where respiration signal is saturated
mriSignal_noRespSat = np.delete(mriSignal, np.where((respSignalSampledToMRISignal == 0) | (respSignalSampledToMRISignal == 4095)))
respSignal_noRespSat = np.delete(respSignalSampledToMRISignal, np.where((respSignalSampledToMRISignal == 0) | (respSignalSampledToMRISignal == 4095)))
mriTime_noRespSat = np.delete(mriTime, np.where((respSignalSampledToMRISignal == 0) | (respSignalSampledToMRISignal == 4095)))
# interpolate MRI signal to respiration signal sampling
mriSignalSampledToRespSignal = np.interp(respTime, mriTime, mriSignal)
mriSignalSampledToRespSignal = mriSignalSampledToRespSignal[np.abs(respTime - np.min(mriTime)).argmin():np.abs(respTime - np.max(mriTime)).argmin()]
respTimeCropToMRI = respTime[np.abs(respTime - np.min(mriTime)).argmin():np.abs(respTime - np.max(mriTime)).argmin()]
respSignalCropToMRI = respSignal[np.abs(respTime - np.min(mriTime)).argmin():np.abs(respTime - np.max(mriTime)).argmin()]
# remove points where respiration signal is saturated
mriSignalOverSampled_noRespSat = np.delete(mriSignalSampledToRespSignal, np.where((respSignalCropToMRI == 0) | (respSignalCropToMRI == 4095)))
respSignalCropToMRI_noRespSat = np.delete(respSignalCropToMRI, np.where((respSignalCropToMRI == 0) | (respSignalCropToMRI == 4095)))
respTimeCropToMRI_noRespSat = np.delete(respTimeCropToMRI, np.where((respSignalCropToMRI == 0) | (respSignalCropToMRI == 4095)))
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(20, 9.7))
plt.subplots_adjust(wspace=0.3, left=0.05, right=0.95, hspace=0.3, bottom=0.05, top=0.95)
ax1.set_title("Signal vs time")
ax1.set_xlabel('Time (ms)')
ax1.set_ylabel('Signal', color='green')
ax1.grid(which='both')
ax1.plot(mriTime/1000, mriSignal, linewidth=1, marker='+', markersize=7.0, color='green', label='$S_{MRI}$: COV='+str(round(100*np.std(mriSignal)/np.mean(mriSignal), 2))+'%')
ax1.plot(respTimeCropToMRI/1000, mriSignalSampledToRespSignal, linewidth=1.2, marker=None, color='gray', label='$S_{MRI} interp$: COV='+str(round(100*np.std(mriSignalSampledToRespSignal)/np.mean(mriSignalSampledToRespSignal), 2))+'%')
ax1.tick_params(axis='y', labelcolor='green')
ax1.legend(loc="lower left")
ax1_resp = ax1.twinx()
ax1_resp.set_ylabel('Signal')
ax1_resp.grid(which='both')
ax1_resp.plot(respTime/1000, respSignal, linewidth=1, marker=None, color='blue', label='$S_{resp}$: COV=' + str(round(100 * np.std(respSignal) / np.mean(respSignal), 2)) + '%')
ax1_resp.plot(mriTime/1000, respSignalSampledToMRISignal, linewidth=0, marker='+', color='red', label='$S_{resp}$: COV=' + str(round(100 * np.std(respSignalSampledToMRISignal) / np.mean(respSignalSampledToMRISignal), 2)) + '%')
ax1_resp.plot(mriTime_noRespSat/1000, respSignal_noRespSat, linewidth=0, marker='+', color='blue', label='$S_{resp}$ no sat')
ax1_resp.tick_params(axis='y', labelcolor='blue')
ax1_resp.legend(loc="lower right")
ax2.set_title("MRI signal vs Respiration signal: (Pearson\'s R, p-value)={}".format(tuple(np.round(scipy.stats.pearsonr(mriSignalOverSampled_noRespSat, respSignalCropToMRI_noRespSat), decimals=4))))
ax2.set_xlabel('Respiration signal')
ax2.set_ylabel('MRI signal (interpolated to respiration sampling)')
ax2.grid(which='both')
# ax2.plot(respSignalSampledToMRISignal, mriSignal, linewidth=0, marker='+', markersize=7.0, color='tab:red', label='all points')
# ax2.plot(respSignal_noRespSat, mriSignal_noRespSat, linewidth=0, marker='+', markersize=7.0, color='tab:blue', label='without respiration signal saturation')
# ax2.plot(respSignalCropToMRI, mriSignalSampledToRespSignal, linewidth=0, marker='+', markersize=7.0, color='tab:orange', label='all points')
ax2.plot(respSignalCropToMRI_noRespSat, mriSignalOverSampled_noRespSat, linewidth=0, marker='+', markersize=7.0, color='tab:green', label='without respiration signal saturation')
ax2.legend()
ax3.set_title("Signal vs time interpolated to respiration sampling") # --------------------------------------------
ax3.set_xlabel('Time (ms)')
ax3.set_ylabel('Signal', color='green')
ax3.grid(which='both')
ax3.plot(respTimeCropToMRI/1000, mriSignalSampledToRespSignal, linewidth=0, marker='.', markersize=3.0, color='tab:red', label='$S_{MRI} interp to resp$')
ax3.plot(respTimeCropToMRI_noRespSat/1000, mriSignalOverSampled_noRespSat, linewidth=0, marker='.', markersize=3.0, color='green', label='$S_{MRI} interp to resp NO RESP SAT$')
ax3.tick_params(axis='y', labelcolor='green')
ax3.legend(loc="lower left")
ax3_resp = ax3.twinx()
ax3_resp.set_ylabel('Signal')
ax3_resp.plot(respTimeCropToMRI/1000, respSignalCropToMRI, linewidth=0, marker='.', markersize=3.0, color='tab:red', label='$S_{resp}$ crop')
ax3_resp.plot(respTimeCropToMRI_noRespSat/1000, respSignalCropToMRI_noRespSat, linewidth=0, marker='.', markersize=3.0, color='blue', label='$S_{resp}$ NO RESP SAT')
ax3_resp.tick_params(axis='y', labelcolor='blue')
ax3_resp.legend(loc="lower right")
ax3_respPeriod = ax3.twinx()
respSignalMax, respSignalMin = peakdet(respSignalCropToMRI, 300)
respPeriod = np.append(np.nan, np.diff(respTimeCropToMRI[respSignalMax[:, 0]]))/1000
ax3_respPeriod.plot(respTimeCropToMRI[respSignalMax[:, 0]]/1000, respPeriod, linewidth=3.0, marker='+', markersize=10, color='tab:pink', label='Resp period')
ax3_respPeriod.tick_params(axis='y', labelcolor='tab:pink')
ax3_respPeriod.set_ylabel('Resp period is s (mean = '+str(round(np.mean(respPeriod[1:]), 2))+' ['+str(np.min(respPeriod[1:]))+', '+str(np.max(respPeriod[1:]))+']', color='tab:pink')
for tPeak in respTimeCropToMRI[respSignalMax[:, 0]]/1000:
ax3_respPeriod.axvline(x=tPeak, linestyle='-', color='tab:pink', linewidth=1.0)
ax3_corr = ax3.twinx()
ax3_corr.plot(respTimeCropToMRI_noRespSat/1000, scipy.signal.correlate(mriSignalOverSampled_noRespSat, respSignalCropToMRI_noRespSat, mode='same', method='direct'), linewidth=1, marker=None, markersize=0, color='tab:orange', label='Cross-corr')
ax3_corr.legend(loc="upper right")
ax4.set_title("FFT") # --------------------------------------------------------------------------------------------
# respSignal_FFT = np.fft.fft((respSignalCropToMRI - np.mean(respSignalCropToMRI))/np.std(respSignalCropToMRI))
# mriSignal_FFT = np.fft.fft((mriSignalSampledToRespSignal - np.mean(mriSignalSampledToRespSignal))/np.std(mriSignalSampledToRespSignal))
# freq = np.fft.fftfreq(respTimeCropToMRI.size, d=respTimeCropToMRI[1]-respTimeCropToMRI[0]) # in MHz
# idx_f0 = np.where(freq == 0)[0]
# idx_ascending_freq = np.argsort(freq)
freqResMRI, respSignalResMRI_FFT = fft_warpper(mriTime, respSignalSampledToMRISignal, increase_res_factor=5)
freqResMRI, mriSignalResMRI_FFT = fft_warpper(mriTime, mriSignal, increase_res_factor=5)
ax4.set_xlabel('Frequency (Hz)')
ax4.set_ylabel('Signal')
ax4.grid(which='both')
# ax4.plot(freq[idx_ascending_freq]*1000, np.abs(respSignal_FFT[idx_ascending_freq]), linewidth=0.9, marker='.', markersize=0, color='black', label='$S_{resp}$')
# ax4.plot(freq[idx_ascending_freq]*1000, np.abs(mriSignal_FFT[idx_ascending_freq]), linewidth=0.9, marker='.', markersize=0, color='green', label='$S_{MRI}\ interp\ to\ resp$')
ax4.plot(freqResMRI*1000, respSignalResMRI_FFT, label='$S_{resp}\ res\ MRI$', linewidth=0.9, marker='+', markersize=0, color='black')
ax4.plot(freqResMRI*1000, mriSignalResMRI_FFT, label='$S_{MRI}\ res\ MRI$', linewidth=0.9, marker='+', markersize=0, color='green')
ax4.axvspan(xmin=1/ | np.min(respPeriod[1:]) | numpy.min |
# docking.py
#import necessary modules.
import krpc
import time
import numpy as np
def acquire_target(conn,vessel,target):
# conn is a krpc.connect() object, vessel is a vessel object, target is a string.
# vessel_list is a list of all vessels currently in use.
vessel_list = conn.space_center.vessels
# n is the number of vessels currently in use.
n = len(vessel_list)
# the loop cycles through all vessels in the list, and if the vessel's name matches the target string provided, it sets it as the target vessel.
for i in range(0,n):
vessel_name = vessel_list[i].name
if vessel_name == target:
conn.space_center.target_vessel = vessel_list[i]
return None
def orbit_finder(conn,vessel):
# IMPORTANT: The more circular the vessel's initial orbit is, the more accurate the orbit finder will be.
# conn is a krpc.connect() object, vessel is a vessel object.
while True:
# retrieves the target vessel as a vessel object.
target = conn.space_center.target_vessel
# finds the period of the current orbit.
vessel_period = vessel.orbit.period
# creates an array of times ranging from 0 to the vessel's period to be used to test where best to fire engines for a Hohmann transfer.
times = np.linspace(0,vessel_period,1000)
# retrieves the current time.
current_time = conn.space_center.ut
# retrieves the vessel's current speed.
current_v = vessel.orbit.speed
#retrieves the current orbital radius.
vessel_radius = vessel.orbit.radius
# the Hohmann transfer orbit used to intercept the target will have a semi-major axis of the average of the original vessel orbit and the target orbit.
needed_a = (vessel_radius + target.orbit.radius)/2
# retrieves the gravitational constant.
G = conn.space_center.g
# retrieves the mass of the body the vessel is orbiting.
M = vessel.orbit.body.mass
# plugs in the previously defined variables into the vis-viva equation to find the needed velocity.
needed_v = np.sqrt(G*M*(2/vessel_radius - 1/needed_a))
# finds the delta V needed for the maneuver.
delta_v = needed_v - current_v
# the program loops over each time in the times array and creates a maneuver node at that time with the needed delta V.
# Using the orbit created by the node, it then checks how close the target and vessel will be when the vessel is at apoapsis (since that is when the vessel will cross the target's orbit).
# If the distance is close enough, the node is returned to be used as a variable in later programs.
# If the distance is not close enough, the node is deleted and the program moves on to the next time.
for i in times:
maneuver_time = current_time + i
node = vessel.control.add_node(maneuver_time, prograde=delta_v)
possible_orbit = node.orbit
time_to_pred_apoapsis = possible_orbit.period / 2
node_position = np.array(node.position(vessel.orbit.body.reference_frame))
node_unit_vector = np.array([node_position[0]/np.linalg.norm(node_position),node_position[1]/np.linalg.norm(node_position),node_position[2]/np.linalg.norm(node_position)])
possible_apoapsis = possible_orbit.apoapsis
vessel_position_at_apoapsis = possible_apoapsis * -1 * node_unit_vector
target_position_at_apoapsis = target.orbit.position_at(maneuver_time+time_to_pred_apoapsis,target.orbit.body.reference_frame)
dist_vector = [vessel_position_at_apoapsis[0] - target_position_at_apoapsis[0],vessel_position_at_apoapsis[1] - target_position_at_apoapsis[1],vessel_position_at_apoapsis[2] - target_position_at_apoapsis[2]]
dist = np.linalg.norm(dist_vector)
if dist < 1700:
return node
else:
vessel.control.remove_nodes()
# If no maneuvers result in a close enough distance, the program warps the vessel forward in time, and then restarts the search.
# Since the vessel has fast-forwarded, new times will be available to check.
conn.space_center.rails_warp_factor = 4
time.sleep(3)
conn.space_center.rails_warp_factor = 0
def first_slowdown(conn,vessel):
# conn is a krpc.connect() object, vessel is a vessel object.
# retrieves the target vessel as a vessel object.
target = conn.space_center.target_vessel
# initializes a reference frame centered on the target. This makes it easy to measure how far the vessel is from the target and to get the velocity relative to the target.
target_ref = target.orbital_reference_frame
# sets the autopilot reference frame to the target-centered reference frame.
vessel.auto_pilot.reference_frame = target_ref
# engages the autopilot.
vessel.auto_pilot.engage()
while True:
# returns the velocity of the vessel relative to the target as a tuple.
velocity_vector = vessel.flight(target_ref).velocity
# using the velocity vector, it sets the direction the autopilot should point the vessel to retrograde.
vessel.auto_pilot.target_direction = (-velocity_vector[0],-velocity_vector[1],-velocity_vector[2])
# retrieves the current time.
current_time = conn.space_center.ut
# returns the position of the vessel relative to the target as a tuple.
current_position = vessel.orbit.position_at(current_time,target_ref)
# finds the current distance by finding the magnitude of the current position.
current_distance = np.linalg.norm(current_position)
# when the vessel is 15000 meters away, the engines turn on to slow the vessel relative to the target.
# once the vessel's relative speed is less than 40 m/s, the engines turn off and this function exits.
if current_distance < 15000:
conn.space_center.rails_warp_factor = 0
vessel.control.throttle = 1
if vessel.flight(target_ref).speed < 40:
vessel.control.throttle = 0
return None
# when the distance is greater than 30000 meters, the vessel warps to save time. Then it goes back to normal time flow.
elif current_distance > 30000:
conn.space_center.rails_warp_factor = 4
else:
conn.space_center.rails_warp_factor = 0
def zeroing_inclination(conn,vessel):
# conn is a krpc.connect() object, vessel is a vessel object.
# retrieves the target vessel as a vessel object.
target = conn.space_center.target_vessel
# sets the autopilot reference frame to the vessel's orbital reference frame (so that the normal/anti-normal directions are basis vectors).
vessel.auto_pilot.reference_frame = vessel.orbital_reference_frame
# engages the autopilot.
vessel.auto_pilot.engage()
while True:
# finds the relative incline of the target (in radians).
incline = vessel.orbit.relative_inclination(target)
# finds the angular separation from the periapsis to the ascending node (in radians).
an_location = vessel.orbit.true_anomaly_at_an(target)
# finds the angular separation between the vessel and the periapsis (in radians).
vessel_location = vessel.orbit.true_anomaly
# when the incline is small enough, the engines should turn off and the function will exit.
if abs(incline) < 0.0005:
vessel.control.throttle = 0
return None
# if the inclination is positive, the vessel points in the anti-normal direction. If the inclination is negative, the vessel points in the normal direction.
if incline > 0:
vessel.auto_pilot.target_direction = (0,0,-1)
else:
vessel.auto_pilot.target_direction = (0,0,1)
# if the angular separation of the ascending node and the vessel is low, the engines should fire. Otherwise, the engines should not fire.
if abs(an_location-vessel_location) < 0.05:
vessel.control.throttle = 1
else:
vessel.control.throttle = 0
# controls if the vessel should warp forward in time depending on how close the ascending node and the vessel are (to save time).
if abs(an_location-vessel_location) > np.pi / 12:
conn.space_center.rails_warp_factor = 4
else:
conn.space_center.rails_warp_factor = 0
def execute_transfer_burn(conn,vessel,node):
# conn is a krpc.connect() object, vessel is a vessel object, and node is a node object.
# arrived is a boolean that will ensure that if the vessel gets to the maneuver node, it will not stop the engines until the maneuver is complete.
arrived = False
while True:
# sets the reference frame to the vessel's orbital reference frame so that the velocity vector is a basis vector.
vessel.auto_pilot.reference_frame = vessel.orbital_reference_frame
# engages the autopilot.
vessel.auto_pilot.engage()
# sets the target direction to prograde.
vessel.auto_pilot.target_direction = (0,1,0)
# if the vessel has ever been 20 seconds from the node, fire the engines.
if node.time_to < 20 or arrived == True:
arrived = True
vessel.control.throttle = 1
# once the vessel orbit's apoapsis is higher than the node orbit's apoapsis, the burn is complete and the engines turn off, the node is deleted, and this function exits.
if node.orbit.apoapsis_altitude < vessel.orbit.apoapsis_altitude:
vessel.control.throttle = 0
node.remove()
return None
# determines if the vessel should warp forward in time to save time.
if node.time_to > 400:
conn.space_center.rails_warp_factor = 4
else:
conn.space_center.rails_warp_factor = 0
def second_slowdown(conn,vessel):
# conn is a krpc.connect() object, vessel is a vessel object.
# retrieves the target vessel as a vessel object.
target = conn.space_center.target_vessel
# initializes a reference frame centered on the target. This makes it easy to measure how far the vessel is from the target and to get the velocity relative to the target.
target_ref = target.orbital_reference_frame
# sets the autopilot reference frame to the target-centered reference frame.
vessel.auto_pilot.reference_frame = target_ref
# engages the autopilot.
vessel.auto_pilot.engage()
while True:
# returns the velocity of the vessel relative to the target as a tuple.
velocity_vector = vessel.flight(target_ref).velocity
# using the velocity vector, it sets the direction the autopilot should point the vessel to prograde.
vessel.auto_pilot.target_direction = (velocity_vector[0],velocity_vector[1],velocity_vector[2])
# waits until the vessel is pointed in the right direction.
vessel.auto_pilot.wait()
# retrieves the current time.
current_time = conn.space_center.ut
# returns the position of the vessel relative to the target as a tuple.
current_position = vessel.orbit.position_at(current_time,target_ref)
# finds the current distance by finding the magnitude of the current position.
current_distance = np.linalg.norm(current_position)
# turns on RCS thrusters.
vessel.control.rcs = True
# sets the RCS thrusters to fire retrograde.
vessel.control.forward = -1
# once the vessel's relative speed is less than 10 m/s, the RCS turns off, the autopilot is disengaged, and the function exits.
if vessel.flight(target_ref).speed < 10:
vessel.control.rcs = False
vessel.auto_pilot.disengage()
return None
def third_slowdown(conn,vessel):
# conn is a krpc.connect() object, vessel is a vessel object.
# retrieves the target vessel as a vessel object.
target = conn.space_center.target_vessel
# initializes a reference frame centered on the target. This makes it easy to measure how far the vessel is from the target and to get the velocity relative to the target.
target_ref = target.orbital_reference_frame
# sets the autopilot reference frame to the target-centered reference frame.
vessel.auto_pilot.reference_frame = target_ref
# engages the autopilot.
vessel.auto_pilot.engage()
# initializes the list that will store the differences between the optimal dot product and the actual dot product.
differences = []
# initializes the list that will store the responses the system has tried to maximize the dot product.
responses = []
# initializes the list that will store the distances the vessel is from the target. Only is useful in the rare case that the vessel starts moving away from the vessel.
distances = []
# retrieves the current time.
current_time = conn.space_center.ut
# returns the position of the vessel relative to the target as a tuple.
current_position = vessel.orbit.position_at(current_time,target_ref)
# finds the current distance by finding the magnitude of the current position.
current_distance = np.linalg.norm(current_position)
# current_position is a vector from the target to the vessel, so the negative of it is a vector from the vessel to the target (the "target" symbol on the navball).
# this sets the autopilot to point right at the target.
vessel.auto_pilot.target_direction = (-current_position[0],-current_position[1],-current_position[2])
# waits until the autopilot is pointing in the right direction.
vessel.auto_pilot.wait()
# activates RCS thrusters.
vessel.control.rcs = True
while True:
# retrieves the current time.
current_time = conn.space_center.ut
# returns the position of the vessel relative to the target as a tuple.
current_position = vessel.orbit.position_at(current_time,target_ref)
# finds the current distance by finding the magnitude of the current position.
current_distance = np.linalg.norm(current_position)
# appends the current distance to the distances list.
distances.append(current_distance)
# current_position is a vector from the target to the vessel, so the negative of it is a vector from the vessel to the target (the "target" symbol on the navball).
# this sets the autopilot to point right at the target.
vessel.auto_pilot.target_direction = (-current_position[0],-current_position[1],-current_position[2])
# if the speed is less than 10 m/s and the distance is greater than 1200 meters, the RCS thrusts prograde.
# if the current distance is less than 1200 meters and the speed is greater than 2 m/s, the RCS thrusts retrograde.
# otherwise, the RCS does not fire forward nor backward.
if vessel.flight(target_ref).speed < 10 and current_distance > 1200:
vessel.control.forward = 1
elif current_distance < 1200 and vessel.flight(target_ref).speed > 2:
vessel.control.forward = -1
else:
vessel.control.forward = 0
# finds the current relative velocity of the vessel.
current_velocity = vessel.flight(target_ref).velocity
# calculates the dot product between the velocity vector and the target vector.
dot_prod = np.dot([current_velocity[0],current_velocity[1],current_velocity[2]],[-current_position[0],-current_position[1],-current_position[2]])
# returns the magnitude of the current velocity.
current_speed = vessel.flight(target_ref).speed
# calculates the maximum dot product based on the magnitudes of the velocity and target vectors.
optimal_dot = current_speed * current_distance
# finds the difference between the optimal and actual dot.
difference = optimal_dot - dot_prod
# appends this difference to the differences list.
differences.append(difference)
# initializes the list of actions the vessel can take to align the velocity vector with the target vector.
corrective_actions = ['up','down','left','right']
# once the length of differences is greater than 10, if the difference has decreased, the current action is set to what the most recent action was.
# If the difference has increased, it randomly selects another action to take as the current action.
# The current action is then appended to the responses list.
if len(differences) > 10:
if differences[-1] < differences[-2]:
current_action = responses[-1]
else:
alternatives = []
for action in corrective_actions:
if action != responses[-1]:
alternatives.append(action)
current_action = np.random.choice(alternatives)
responses.append(current_action)
else:
# if the length of differences is less than 10, it picks random actions no matter what and appends it to the responses list.
current_action = np.random.choice(corrective_actions)
responses.append(current_action)
# based on the distance, this determines the zone around the target vector where the velocity vector is considered "aligned enough".
# this angle is increased when the vessel is closer because the lower speed means the thrusters have greater effect on the velocity vector.
if current_distance < 1200:
min_angle = 6
else:
min_angle = 3
# the minimum dot product sets the cut off for if a dot product is considered "good enough".
# Without this, the RCS would continually fire throughout the flight, which is inefficient.
minimum_dot = optimal_dot * np.cos(np.deg2rad(min_angle))
# if the vectors are not very closely aligned, the vessel will use RCS according to the last response in the response list.
if dot_prod < minimum_dot:
if responses[-1] == 'up':
vessel.control.up = 1
vessel.control.right = 0
elif responses[-1] == 'down':
vessel.control.up = -1
vessel.control.right = 0
elif responses[-1] == 'left':
vessel.control.right = -1
vessel.control.up = 0
elif responses[-1] == 'right':
vessel.control.right = 1
vessel.control.up = 0
else:
# if the vectors are closely aligned, the up/down and left/right RCS is turned off.
vessel.control.up = 0
vessel.control.right = 0
# if the vessel is less than 180 meters from the target, the RCS is deactivated and this function exits.
# this happens at 180 meters because in the final approach, the target docking port is selected.
# the target port can only be selected within 200 meters because that is when the target vessel's part list is loaded.
if current_distance < 180:
vessel.control.rcs = False
return None
# the time.sleep() functions allow the program to fire the RCS long enough to determine if the change in velocity was beneficial.
# when closer, the sleep time is decreased because lower velocities mean it takes less time to produce a significant change in velocity.
if current_distance < 1200:
time.sleep(0.5)
# in rare cases, the vessel could start slowing down and eventually start moving away from the target.
# this ensures that the speed never goes lower than 1.5 m/s.
if current_speed < 1.5:
vessel.control.forward = 1
else:
vessel.control.forward = 0
else:
time.sleep(1)
def final_approach(conn,vessel,vessel_port,tar_port):
# conn is a krpc.connect() object, vessel is a vessel object, vessel_port is a string, tar_port is a string.
# retrieves a list of the docking ports on the vessel where the title of the port matches the vessel_port string.
vessel_port_list = vessel.parts.with_title(vessel_port)
# cycles through the docking port list and selects one that is not already docked.
# it sets this port to the controlling port, and makes sure the port's shield (if applicable) is deactivated.
for port in vessel_port_list:
if port.docking_port.state != conn.space_center.DockingPortState.docked:
vessel.parts.controlling = port
port.docking_port.shielded = False
controlling_port = port.docking_port
# retrieves the target vessel as a vessel object.
target = conn.space_center.target_vessel
# retrieves a list of all the docking ports on the target with the same title as the tar_port string.
target_port_list = target.parts.with_title(tar_port)
# cycles through the target port list and selects one that is not already docked.
# it sets this port as the target port, and deactivates the port's shield (if applicable).
for port in target_port_list:
if port.docking_port.state != conn.space_center.DockingPortState.docked:
conn.space_center.target_docking_port = port.docking_port
port.docking_port.shielded = False
# retrieves the target port as a part object (for ease of use later).
target_port = conn.space_center.target_docking_port
# creates a reference frame with the origin centered on the target port and the y-axis pointing directly perpendicular to the port.
target_ref = target_port.part.reference_frame
# sets the autopilot's reference frame to this new reference frame.
vessel.auto_pilot.reference_frame = target_ref
# engages the autopilot.
vessel.auto_pilot.engage()
# sets the vessel to point so that its controlling port is parallel to the target port.
vessel.auto_pilot.target_direction = (0,-1,0)
# sets the vessel's roll to 0.
vessel.auto_pilot.target_roll = 0
# waits until the vessel is pointed in the right direction.
vessel.auto_pilot.wait()
# initializes the list that will store the differences between the optimal dot product and the actual dot product.
differences = []
# initializes the list that will store the responses the system has tried to maximize the dot product.
responses = []
while True:
# if the docking was successful, the function exits.
if controlling_port.state == conn.space_center.DockingPortState.docked:
return None
# activates the RCS.
vessel.control.rcs = True
# finds the position of the controlling port relative to the target port as a tuple.
vessel_position = controlling_port.position(target_ref)
# sets the vessel's roll to 0.
vessel.auto_pilot.target_roll = 0
# sets the vessel to point so that its controlling port is parallel to the target port.
vessel.auto_pilot.target_direction = (0,-1,0)
# finds the current speed of the vessel relative to the target.
current_speed = vessel.flight(target_ref).speed
# finds the relative velocity of the vessel as a tuple.
rel_velocity = vessel.flight(target_ref).velocity
# sets the target vector to the negative of the vector extending from the target to the vessel (so this vector is from the vessel to the target).
target_dir = (-vessel_position[0],-vessel_position[1],-vessel_position[2])
# finds the magnitude of the target vector.
target_dir_mag = np.linalg.norm(target_dir)
# finds the dot product of the velocity vector and the target vector.
dot_prod = np.dot(rel_velocity,target_dir)
# finds the maximum dot product based on the magnitudes of the vectors.
optimal_dot = current_speed * target_dir_mag
# finds the difference between the optimal and actual dot.
difference = optimal_dot - dot_prod
# appends the difference to the differences list.
differences.append(difference)
# initializes the list of actions the vessel can take to align the velocity vector with the target vector.
corrective_actions = ['up','down','left','right']
# once the length of differences is greater than 3, if the difference has decreased, the current action is set to what the most recent action was.
# If the difference has increased, it randomly selects another action to take as the current action.
# The current action is then appended to the responses list.
if len(differences) > 3:
if differences[-1] < differences[-2]:
current_action = responses[-1]
else:
alternatives = []
for action in corrective_actions:
if action != responses[-1]:
alternatives.append(action)
current_action = np.random.choice(alternatives)
responses.append(current_action)
else:
# if the length of differences is less than 3, it picks random actions no matter what and appends it to the responses list.
current_action = np.random.choice(corrective_actions)
responses.append(current_action)
# determines at what angle the vectors are considered "close enough". This becomes less as the vessel gets closer to the target, with the minimum being 2 degrees.
min_angle = 2 + target_dir_mag * 0.025
# finds the minimum dot product that is considered "good enough".
minimum_dot = optimal_dot * np.cos(np.deg2rad(min_angle))
# determines the dot product of the target vector with the vector pointing into and perpendicular to the target port.
# this measurement is used to determine if the vessel is actually coming in backward to the target port, so that can be fixed.
target_heading_dot = np.dot(target_dir,(0,-1,0))
# the vessel should only try to dock if it is coming in at less than 60 degrees from the normal of the port.
target_heading_dot_cutoff = target_dir_mag *np.cos( | np.deg2rad(60) | numpy.deg2rad |
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 10 15:34:09 2020
@author: scott
"""
import numpy as np
from matplotlib import pyplot as plt
from EC_MS import Chip, Chem
chip = Chip()
print("design length = " + str(chip.l_cap * 1e3) + " mm")
T0 = 273.15
T_vec = np.linspace(0, 100, 101) + T0
fig, ax = plt.subplots()
ax.set_xlabel("Temperature / [deg C]")
ax.set_ylabel("capilarry flux / [nmol/s]")
p_colors = [(1e5, "k"), (2e5, "b"), (2.5e5, "g"), (3e5, "r")]
for p, color in p_colors:
n_dot_vec = | np.array([]) | numpy.array |
import numpy as np
from numpy import testing
import pytest
from unittest import TestCase
import pickle
from .basis import Basis
from .grid import Grid
def test_init_0():
x = np.linspace(0, 1, 10, endpoint=False)
y = np.linspace(0, 1, 20, endpoint=False)
z = np.linspace(0, 1, 30, endpoint=False)
xx, yy, zz = np.meshgrid(x, y, z, indexing='ij')
data = xx * 2 + yy * 2 + zz * 2
c = Grid(
Basis.orthorhombic((1, 1, 1)),
(x, y, z),
data,
)
assert len(c.coordinates) == 3
testing.assert_equal(c.coordinates[0], x)
testing.assert_equal(c.coordinates[1], y)
testing.assert_equal(c.coordinates[2], z)
testing.assert_equal(c.values.shape, (10, 20, 30))
def test_init_fail_0():
x = np.linspace(0, 1, 10, endpoint=False)
y = np.linspace(0, 1, 10, endpoint=False)
z = np.linspace(0, 1, 10, endpoint=False)
xx, yy, zz = np.meshgrid(x, y, z, indexing='ij')
data = xx ** 2 + yy ** 2 + zz ** 2
basis = Basis.orthorhombic((1, 1, 1))
with pytest.raises(ValueError):
Grid(basis, (x, y), data)
with pytest.raises(ValueError):
Grid(basis, (x, y, z, z), data)
with pytest.raises(ValueError):
Grid(basis, ((x, x), (y, y), (z, z)), data)
with pytest.raises(ValueError):
Grid(basis, (x, y, z), data[0])
def test_init_1():
x = np.linspace(0, 1, 10, endpoint=False)
y = np.linspace(0, 1, 20, endpoint=False)
z = np.linspace(0, 1, 30, endpoint=False)
xx, yy, zz = np.meshgrid(x, y, z, indexing='ij')
data = xx ** 2 + yy ** 2 + zz ** 2
with pytest.raises(ValueError):
Grid(
Basis.orthorhombic((1, 1, 1)),
(x, x, x),
data,
)
class GridTest(TestCase):
@classmethod
def setUpClass(cls):
x = np.linspace(0, 1, 2, endpoint=False)
y = np.linspace(0, 1, 3, endpoint=False)
z = np.linspace(0, 1, 4, endpoint=False)
xx, yy, zz = np.meshgrid(x, y, z, indexing='ij')
data = xx ** 2 + yy ** 2 + zz ** 2
cls.empty = Basis.orthorhombic((1, 2, 3))
cls.grid = Grid(cls.empty, (x, y, z), data)
def test_pickle(self):
c = pickle.loads(pickle.dumps(self.grid))
testing.assert_equal(c.vectors, self.grid.vectors)
testing.assert_equal(c.coordinates, self.grid.coordinates)
testing.assert_equal(c.values, self.grid.values)
def test_eq(self):
assert self.grid == self.grid.copy()
assert self.grid == self.grid.copy(coordinates=self.grid.coordinates)
assert self.grid != self.grid.copy(coordinates=tuple(i + 3.14 for i in self.grid.coordinates))
assert self.grid != self.grid.copy(values=self.grid.values + 3.14)
def test_round(self):
g = self.grid.rounded(1)
testing.assert_equal(g.vectors, [(1, 0, 0), (0, 2, 0), (0, 0, 3)])
| testing.assert_equal(g.coordinates, [(0, .5), (0, .3, .7), (0, .2, .5, .8)]) | numpy.testing.assert_equal |
# -*- coding: utf-8 -*-
"""
@author: <NAME>
@python_version: 3.6.4
"""
from scipy import signal as scisig
import numpy as np
import scipy.fftpack as scifft
import math
# ARTIFACTS REMOVAL
def remove_current_pulse_artifacts(sig, markers, window, n_draws, return_artifacts=False):
"""Remove current pulse artifacts from one-dimensional signal based on artifacts occurences represented
by one-dimensional markers signal. Current pulse artifacts removal is performed in following steps:
1. Extract current pulse artifacts from 'sig' based on 'markers' which contains ones and zeros, whereas ones
indicate current pulse artifact occurences.
2. Extraction is performed around the artifact occurence in accordance to range described by 'window'.
3. Extracted artifacts are stored in two-dimensional numpy.ndarray.
4. We draw from stored artifacts 'n_draws' without repetition and average them in order to get averaged
representation of current pulse artifact.
5. We substract this averaged artifact representation from the first occurence of the artifact in 'sig'.
6. We now repeat steps 4 and 5 for all next subsequent artifact occurences in 'sig'.
Parameters
----------
sig : 1D numpy.ndarray
One-dimensional signal with the occurences of current pulse artifacts.
markers : 1D numpy.ndarray
One-dimensional signal consisted of ones and zeros, where ones correspond to the exact sample occurences
of current pulse artifact in 'sig'. That's why 'markers'.size' must equal to 'sig.size'.
window : list of int of length 2
List consisted of two values describing sample range (window) of the current pulse artifact around
its occurence.
n_draws : int
Number of draws from the collection of stored artifacts. Must be >= 1.
return_artifacts : boolean
If True, beside of cleared signal, function will return also collection of the stored artifacts. Default value
is False.
Returns
-------
cleared : 1D numpy.ndarray
Cleared signal.
artifacts : 2D numpy.ndarray
Collection of the stored artifacts.
"""
if (isinstance(sig, np.ndarray) and sig.ndim == 1 and isinstance(markers, np.ndarray) and markers.ndim == 1
and ndarray_contains_only(markers, np.array([0, 1])) and sig.size == markers.size and len(window) in range(1, 3)
and list_is_int(window) and isinstance(n_draws, int) and n_draws >= 1):
# Extract artifacts.
artifacts = []
iterator = 0
for marker in markers:
if marker == 1:
artifacts.append(sig[iterator-window[0]:iterator+window[1]])
iterator += 1
artifacts = np.asarray(artifacts)
# Remove artifacts from the signal.
iterator = 0
for marker in markers:
if marker == 1:
if n_draws > np.shape(artifacts)[0]:
n_draws = np.shape(artifacts)[0]
random_artifact_indices = np.random.choice(np.arange(np.shape(artifacts)[0]), n_draws, replace=False)
avg_artifact = np.mean(np.take(artifacts, random_artifact_indices, axis=0), axis=0)
sig[iterator-window[0]:iterator+window[1]] -= avg_artifact
iterator += 1
cleared = sig
# Return cleared signal and extracted artifacts.
if return_artifacts:
return cleared, artifacts
else:
return cleared
else:
raise ValueError("Inappropriate type or value of one of the arguments. Please read carefully function docstring.")
# EXPLORATION AND MARKING
def mark_photodiode_changes(sig, threshold, wait_n_samples, direction='left-to-right'):
"""Create one-dimensional array of zeros and ones, where ones indicate places where photodiode signal exceeds some
specific threshold value. This one-dimensional array is the same length as photodiode signal.
Parameters
----------
sig : 1D numpy.ndarray
Photodiode signal.
threshold : float
Threshold value above which photodiode signal will be marked.
wait_n_samples : int
Wait n samples after last marker before trying to put next marker. Must be >= 0.
direction : str
Direction in which photodiode signal course will be analyzed and marked. There are three directions, ie.
'left-to-right', 'right-to-left', 'both'. In case of 'both' photodiode signal course will be first analyzed
'left-to-right' and than 'right-to-left'. Default value is 'left-to-right'.
Returns
-------
markers : 1D numpy.ndarray
Array of zeros and ones, where ones are markers.
"""
if (isinstance(sig, np.ndarray) and sig.ndim == 1 and isinstance(threshold, float) and isinstance(wait_n_samples, int) and wait_n_samples >= 0 and direction in ['left-to-right', 'right-to-left', 'both']):
if direction == 'left-to-right':
markers = np.zeros(len(sig))
wait_until_next_mark = wait_n_samples
iterator = 0
for sample in sig:
if sample > threshold and wait_until_next_mark >= wait_n_samples:
markers[iterator] = 1
wait_until_next_mark = 0
iterator += 1
wait_until_next_mark += 1
return markers
elif direction == 'right-to-left':
markers = np.zeros(len(sig))
iterator = len(sig)-1
wait_until_next_mark = wait_n_samples
for sample in reversed(sig):
if sample > threshold and wait_until_next_mark >= wait_n_samples:
markers[iterator] = 1
wait_until_next_mark = 0
iterator -= 1
wait_until_next_mark += 1
return markers
else:
markers_left_to_right = mark_photodiode_changes(sig, threshold, wait_n_samples, direction='left-to-right')
markers_right_to_left = mark_photodiode_changes(sig, threshold, wait_n_samples, direction='right-to-left')
markers = markers_left_to_right + markers_right_to_left
return markers
else:
raise ValueError("Inappropriate type, shape or value of one of the arguments. Please read carefully function docstring.")
# FILTERING, SMOOTHING, UP- AND DOWNSAMPLING
def downsample(sig, d_factor):
"""Downsample one-dimensional signal with the use of reshaping.
Parameters
----------
sig : 1D numpy.ndarray
One-dimensional signal for downsampling.
d_factor : int, range(1, inf)
Downsampling factor. Must be higher than 0.
Returns
-------
d_sig : 1D numpy.ndarray
One-dimensional signal downsampled lineary by factor equal to 'd_factor'.
"""
if (isinstance(sig, np.ndarray) and sig.ndim == 1 and isinstance(d_factor, int) and d_factor >= 1):
d_sig = sig.reshape(-1, d_factor).mean(axis=1)
return d_sig
else:
raise ValueError("Inappropriate type, shape or value of one of the arguments. Please read carefully function docstring.")
def filtfilt_butterworth(sig, sf, cf, order=1, btype='bandpass'):
"""Two-sided Butterworth filter.
Parameters
----------
sig : numpy.ndarray
Signal to filter.
sf : float
Signal sampling frequecy (number of samples per second).
cf : float | list of float of length 2
Filter frequencies. When using btype 'lowpass' or 'highpass' use single float. When using btype 'bandstop'
or 'bandpass' use list of float of length 2.
order : int in range of 1-5.
Order of the filter. Default value is 1.
btype : str
One of the four filter types, ie. 'lowpass', 'highpass', 'bandstop', 'bandpass'. Default value is 'bandpass'.
Returns
-------
filtered : numpy.ndarray
Filtered sig.
"""
if (isinstance(sig, np.ndarray) and isinstance(sf, float) and sf > 0 and isinstance(cf, list)
and len(cf) in range(1, 3) and isinstance(order, int) and order in range(1, 6)
and btype in ['lowpass', 'highpass', 'bandstop', 'bandpass']):
if btype == 'highpass' or btype == 'lowpass':
b, a = scisig.butter(order, Wn=cf / (0.5 * sf), btype=btype, analog=0, output='ba')
return scisig.filtfilt(b, a, sig)
elif btype == 'bandstop' or btype == 'bandpass':
b, a = scisig.butter(order, Wn=(cf[0] / (0.5 * sf), cf[1] / (0.5 * sf)), btype=btype, analog=0, output='ba')
return scisig.filtfilt(b, a, sig)
else:
raise ValueError("Inappropriate type or value of one of the arguments. Please read carefully function docstring.")
def upsample(sig, i_factor):
"""Upsample one-dimensional signal with the use of linear interpolation.
Parameters
----------
sig : 1D numpy.ndarray
One-dimensional signal for interpolation.
i_factor : int, range(1, inf)
Interpolation factor. Must be higher than 0.
Returns
-------
i_sig : 1D numpy.ndarray
One-dimensional signal interpolated lineary by factor equal to 'i_factor'.
"""
if (isinstance(sig, np.ndarray) and sig.ndim == 1 and isinstance(i_factor, int) and i_factor >= 1):
x = np.linspace(0, sig.size, sig.size)
y = sig
i_x = np.linspace(0, sig.size, sig.size * i_factor)
i_y = np.interp(i_x, x, y)
i_sig = i_y
return i_sig
else:
raise ValueError("Inappropriate type, shape or value of one of the arguments. Please read carefully function docstring.")
# SIGNAL CREATION
def create_sawtooth_pulse(freq, sf, amp, first_peak='positive'):
"""Create one-period sawtooth pulse.
Parameters
----------
freq : float
Frequency of the pulse wave in Hz. Must be > 0.
sf : int
Sampling frequency of the pulse (number of samples per second). Must be > 0.
amp : float
Amplitude of the pulse in microamperes (uA). Must be > 0.
first_peak : str
Polarity of the first pulse hillock. Available options: 'positive', 'negative'. Default value is 'positive'.
Returns
-------
pulse : 1D numpy.ndarray
One-period sawtooth pulse.
"""
if (isinstance(freq, float) and freq > 0 and isinstance(sf, int) and sf > 0 and isinstance(amp, float)
and amp > 0 and first_peak in ['positive', 'negative']):
duration = 1 / freq
time_scale = np.arange(0, duration, 1 / sf)
pulse = scisig.sawtooth(2 * np.pi * freq * time_scale) * (amp / 2)
if first_peak == 'negative':
pulse *= -1
return pulse
else:
raise ValueError("Inappriopriate type or value of one of the arguments. Please read carefully function docstring.")
def create_sin_pulse(freq, sf, amp, first_peak='positive'):
"""Create one-period sinusoidal pulse.
Parameters
----------
freq : float
Frequency of the pulse wave in Hz. Must be > 0.
sf : int
Sampling frequency of the pulse (number of samples per second). Must be > 0.
amp : float
Amplitude of the pulse in microapers (uA). Must be > 0.
first_peak : str
Polarity of the first pulse hillock. Available options: 'positive', 'negative'. Default value is 'positive'.
Returns
-------
pulse : 1D numpy.ndarray
One-period sinusoidal pulse.
"""
if (isinstance(freq, float) and freq > 0 and isinstance(sf, int) and sf > 0 and isinstance(amp, float)
and amp > 0 and first_peak in ['positive', 'negative']):
duration = 1 / freq
time_scale = np.arange(0, duration, 1 / sf)
pulse = np.sin(2 * np.pi * freq * time_scale) * (amp / 2)
if first_peak == 'negative':
pulse *= -1
return pulse
else:
raise ValueError("Inappriopriate type or value of one of the arguments. Please read carefully function docstring.")
def create_square_pulse(freq, sf, amp, first_peak='positive'):
"""Create one-period square pulse.
Parameters
----------
freq : float
Frequency of the pulse wave in Hz. Must be > 0.
sf : int
Sampling frequency of the pulse (number of samples per second). Must be > 0.
amp : float
Amplitude of the pulse in microamperes (uA). Must be > 0.
first_peak : str
Polarity of the first pulse hillock. Available options: 'positive', 'negative'. Default value is 'positive'.
Returns
-------
pulse : 1D numpy.ndarray
One-period squarewave pulse.
"""
if (isinstance(freq, float) and freq > 0 and isinstance(sf, int) and sf > 0 and isinstance(amp, float)
and amp > 0 and first_peak in ['positive', 'negative']):
duration = 1 / freq
time_scale = np.arange(0, duration, 1 / sf)
pulse = scisig.square(2 * np.pi * freq * time_scale) * (amp / 2)
if first_peak == 'negative':
pulse *= -1
return pulse
else:
raise ValueError("Inappropriate type or value of one of the arguments. Please read carefully function docstring.")
def create_alternating_signal(duration, sf, freq, amp, s_type='sinusoidal', first_peak='positive'):
"""Create one-dimensional alternating signal using sawtooth, sinusoidal or square wave.
Parameters
----------
duration : float
Duration of the signal in seconds. Must be > 0.
sf : int
Sampling frequency of the pulse (number of samples per second). Must be > 0.
freq : float
Frequency of the signal in Hz.
amp : float
Amplitude of the pulse in microampers (uA). Must be > 0.
s_type : str
Type of the wave used in the signal creation. Available types: 'sawtooth', sinusoidal', 'square'.
Default value is 'sinusoidal'.
first_peak : str
Polarity of the first pulse hillock. Available options: 'positive', 'negative'. Default value is 'positive'.
Returns
-------
sig : 1D numpy.ndarray
Created one-dimensional alternating signal.
"""
if (isinstance(duration, float) and duration > 0 and isinstance(sf, int) and sf > 0 and isinstance(freq, float)
and freq > 0 and isinstance(amp, float) and amp > 0 and s_type in ['sawtooth', 'sinusoidal', 'square']
and first_peak in ['positive', 'negative'] and duration * sf >= 1):
temp_sig = []
pulse_time_in_s = 1.0 / freq
n_pulses = int(math.ceil(duration / pulse_time_in_s))
if s_type == 'sawtooth':
for i in np.arange(n_pulses):
pulse = create_sawtooth_pulse(freq, sf, amp, first_peak=first_peak)
temp_sig.append(pulse)
elif s_type == 'sinusoidal':
for i in np.arange(n_pulses):
pulse = create_sin_pulse(freq, sf, amp, first_peak=first_peak)
temp_sig.append(pulse)
else:
for i in np.arange(n_pulses):
pulse = create_square_pulse(freq, sf, amp, first_peak=first_peak)
temp_sig.append(pulse)
temp_sig = np.asarray(temp_sig).reshape(-1)
sig = np.zeros(int(np.around(duration * sf, decimals=0)))
sig = temp_sig[:sig.size]
return sig
else:
raise ValueError("Inappropriate type or value of one of the arguments. Please read carefully function docstring.")
# SIMPLE CALCULATIONS
def z_score(x, avg, sd):
"""Calculate z-score.
Parameters
----------
x : float
Standardized variable..
avg : float
Average from population.
sd : float
Standard deviation from population.
Returns
-------
z : float
Z-score.
"""
return (x - avg) / sd
def create_time_scale(n_samples, sf, unit='s'):
"""Create one-dimensional time scale.
Parameters
----------
n_samples : int
Total number of samples in the signal for which time scale has to be created.
sf : int
Sampling frequency of the signal, ie. number of samples per second.
unit : str
Time unit in which time scale has to be expressed. Available units: hours 'h', minutes 'min', seconds 's',
milliseconds 'ms', microseconds 'us', nanoseconds 'ns'. Default value is 's'.
Returns
-------
time_scale : 1D np.ndarray
One-dimensional time scale with values expressed in a specific time unit.
"""
if (isinstance(n_samples, int) and isinstance(sf, int) and unit in ['h', 'min', 's', 'ms', 'us', 'ns']):
unit_convertion = {'h':3600, 'min':60, 's':1, 'ms':0.001, 'us':0.000001, 'ns':0.000000001}
total_time_in_unit = (n_samples / sf) / unit_convertion[unit]
dt = (1 / sf) / unit_convertion[unit]
time_scale = np.arange(0, total_time_in_unit, dt)
return time_scale
else:
raise ValueError("Innapriopriate type or value of one of the arguments. Please read carefully function docstring.")
def list_is_int(list_of_ints):
"""Check whether given list contains only int values.
Parameters
----------
list_of_ints : list
List of presumably only int values.
Returns
-------
verdict : boolean
Return True, if 'list_of_ints" contains only in values. Otherwise, return False.
"""
if (isinstance(list_of_ints, list) and len(list_of_ints) > 0):
for item in list_of_ints:
if not isinstance(item, int):
return False
return True
else:
raise ValueError("Inappropriate type or size of the argument.")
def ndarray_contains_only(ndarray, values):
"""Check whether numpy.ndarray contains only some specific values.
Parameters
----------
ndarray : numpy.ndarray
One-dimensional array.
values : 1D numpy.ndarray
One-dimensional array with values to check whether they occur in 'ndarray'.
Returns
-------
verdict : boolean
Return True, if 'ndarray' contains only 'values'. Otherwise, return False.
"""
if (isinstance(ndarray, np.ndarray) and ndarray.ndim == 1 and isinstance(values, np.ndarray) and values.ndim == 1):
mask = np.isin(ndarray, values)
matches = np.sum(mask)
if matches != ndarray.size:
return False
else:
return True
else:
raise ValueError("Inappropriate type or shape of the argument.")
# TRANSFORMATIONS AND CORRECTIONS
def baseline_correction(sig, b_window, c_window, b_type='absolute'):
"""Perform baseline correction on a given one-dimensional signal.
Parameters
----------
sig : 1D numpy.ndarray
One-dimensional signal for which baseline correction has to be performed.
b_window : list of int of length 2
Range of the 'sig' samples from which baseline should be calculated. Minimum and maximum range
is [0, sig.size-1].
c_window : list of int of length 2
Range of the 'sig' samples which should be baseline-corrected. Minimum and maximum range is [0, sig.size-1].
b_type : str
Type of baseline. Available options: 'absolute', 'relative', 'relchange', 'decibel' (based on
http://bjornherrmann.com/baseline_correction.html). Default values is 'absolute'. For 'X' is the signal
and for 'B' is the baseline calculated as mean(sig[window[0]:window[1]]):
1. 'absolute' - absolute baseline, range of possible values: [-inf, inf], calculated as X - B;
2. 'relative' - relative baseline, range of possible values: [0, inf], calculated as X / B;
3. 'relchange' - relative change baseline, range of possible values: [-1, inf], calculated as (X - B) / B;
4. 'decibel' - decibel baseline (defined only for power), range of possible values: [-inf, inf], calculated as
10 * log10(X / B).
Returns
-------
corrected : numpy.ndarray
Baseline-corrected signal.
"""
if (isinstance(sig, np.ndarray) and sig.ndim == 1 and isinstance(b_window, list) and list_is_int(b_window)
and len(b_window) in range(1, 3) and isinstance(c_window, list) and list_is_int(c_window)
and len(c_window) in range(1, 3) and b_type in ['absolute', 'relative', 'relchange', 'decibel']):
baseline = np.mean(sig[b_window[0]:b_window[1]])
if b_type == 'absolute':
sig[c_window[0]:c_window[1]] -= baseline
elif b_type == 'relative':
sig[c_window[0]:c_window[1]] /= baseline
elif b_type == 'relchange':
sig[c_window[0]:c_window[1]] = (sig[c_window[0]:c_window[1]] - baseline) / baseline
else:
sig[c_window[0]:c_window[1]] = 10 * | np.log10(sig[c_window[0]:c_window[1]] / baseline) | numpy.log10 |
#!/usr/bin/python
from __future__ import division
from __future__ import print_function
import sys
import os
import re
import datetime
import zipfile
import tempfile
import argparse
import math
import warnings
import json
import csv
import numpy as np
import scipy.stats as scp
from lxml import etree as et
def get_rdml_lib_version():
"""Return the version string of the RDML library.
Returns:
The version string of the RDML library.
"""
return "1.0.0"
class NpEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.bool_):
return bool(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
else:
return super(NpEncoder, self).default(obj)
class RdmlError(Exception):
"""Basic exception for errors raised by the RDML-Python library"""
def __init__(self, message):
Exception.__init__(self, message)
pass
class secondError(RdmlError):
"""Just to have, not used yet"""
def __init__(self, message):
RdmlError.__init__(self, message)
pass
def _get_first_child(base, tag):
"""Get a child element of the base node with a given tag.
Args:
base: The base node element. (lxml node)
tag: Child elements group tag used to select the elements. (string)
Returns:
The first child lxml node element found or None.
"""
for node in base:
if node.tag.replace("{http://www.rdml.org}", "") == tag:
return node
return None
def _get_first_child_text(base, tag):
"""Get a child element of the base node with a given tag.
Args:
base: The base node element. (lxml node)
tag: Child elements group tag used to select the elements. (string)
Returns:
The text of first child node element found or an empty string.
"""
for node in base:
if node.tag.replace("{http://www.rdml.org}", "") == tag:
return node.text
return ""
def _get_first_child_bool(base, tag, triple=True):
"""Get a child element of the base node with a given tag.
Args:
base: The base node element. (lxml node)
tag: Child elements group tag used to select the elements. (string)
triple: If True, None is returned if not found, if False, False
Returns:
The a bool value of tag or if triple is True None.
"""
for node in base:
if node.tag.replace("{http://www.rdml.org}", "") == tag:
return _string_to_bool(node.text, triple)
if triple is False:
return False
else:
return None
def _get_step_sort_nr(elem):
"""Get the number of the step eg. for sorting.
Args:
elem: The node element. (lxml node)
Returns:
The a int value of the step node nr.
"""
if elem is None:
raise RdmlError('A step element must be provided for sorting.')
ret = _get_first_child_text(elem, "nr")
if ret == "":
raise RdmlError('A step element must have a \"nr\" element for sorting.')
return int(ret)
def _sort_list_int(elem):
"""Get the first element of the array as int. for sorting.
Args:
elem: The 2d list
Returns:
The a int value of the first list element.
"""
return int(elem[0])
def _sort_list_float(elem):
"""Get the first element of the array as float. for sorting.
Args:
elem: The 2d list
Returns:
The a float value of the first list element.
"""
return float(elem[0])
def _sort_list_digital_PCR(elem):
"""Get the first column of the list as int. for sorting.
Args:
elem: The list
Returns:
The a int value of the first list element.
"""
arr = elem.split("\t")
return int(arr[0]), arr[4]
def _string_to_bool(value, triple=True):
"""Translates a string into bool value or None.
Args:
value: The string value to evaluate. (string)
triple: If True, None is returned if not found, if False, False
Returns:
The a bool value of tag or if triple is True None.
"""
if value is None or value == "":
if triple is True:
return None
else:
return False
if type(value) is bool:
return value
if type(value) is int:
if value != 0:
return True
else:
return False
if type(value) is str:
if value.lower() in ['false', '0', 'f', '-', 'n', 'no']:
return False
else:
return True
return
def _value_to_booldic(value):
"""Translates a string, list or dic to a dictionary with true/false.
Args:
value: The string value to evaluate. (string)
Returns:
The a bool value of tag or if triple is True None.
"""
ret = {}
if type(value) is str:
ret[value] = True
if type(value) is list:
for ele in value:
ret[ele] = True
if type(value) is dict:
for key, val in value.items():
ret[key] = _string_to_bool(val, triple=False)
return ret
def _get_first_child_by_pos_or_id(base, tag, by_id, by_pos):
"""Get a child element of the base node with a given tag and position or id.
Args:
base: The base node element. (lxml node)
tag: Child elements group tag used to select the elements. (string)
by_id: The unique id to search for. (string)
by_pos: The position of the element in the list (int)
Returns:
The child node element found or raise error.
"""
if by_id is None and by_pos is None:
raise RdmlError('Either an ' + tag + ' id or a position must be provided.')
if by_id is not None and by_pos is not None:
raise RdmlError('Only an ' + tag + ' id or a position can be provided.')
allChildren = _get_all_children(base, tag)
if by_id is not None:
for node in allChildren:
if node.get('id') == by_id:
return node
raise RdmlError('The ' + tag + ' id: ' + by_id + ' was not found in RDML file.')
if by_pos is not None:
if by_pos < 0 or by_pos > len(allChildren) - 1:
raise RdmlError('The ' + tag + ' position ' + by_pos + ' is out of range.')
return allChildren[by_pos]
def _add_first_child_to_dic(base, dic, opt, tag):
"""Adds the first child element with a given tag to a dictionary.
Args:
base: The base node element. (lxml node)
dic: The dictionary to add the element to (dictionary)
opt: If false and id is not found in base, the element is added with an empty string (Bool)
tag: Child elements group tag used to select the elements. (string)
Returns:
The dictionary with the added element.
"""
for node in base:
if node.tag.replace("{http://www.rdml.org}", "") == tag:
dic[tag] = node.text
return dic
if not opt:
dic[tag] = ""
return dic
def _get_all_children(base, tag):
"""Get a list of all child elements with a given tag.
Args:
base: The base node element. (lxml node)
tag: Child elements group tag used to select the elements. (string)
Returns:
A list with all child node elements found or an empty list.
"""
ret = []
for node in base:
if node.tag.replace("{http://www.rdml.org}", "") == tag:
ret.append(node)
return ret
def _get_all_children_id(base, tag):
"""Get a list of ids of all child elements with a given tag.
Args:
base: The base node element. (lxml node)
tag: Child elements group tag used to select the elements. (string)
Returns:
A list with all child id strings found or an empty list.
"""
ret = []
for node in base:
if node.tag.replace("{http://www.rdml.org}", "") == tag:
ret.append(node.get('id'))
return ret
def _get_number_of_children(base, tag):
"""Count all child elements with a given tag.
Args:
base: The base node element. (lxml node)
tag: Child elements group tag used to select the elements. (string)
Returns:
A int number of the found child elements with the id.
"""
counter = 0
for node in base:
if node.tag.replace("{http://www.rdml.org}", "") == tag:
counter += 1
return counter
def _check_unique_id(base, tag, id):
"""Find all child elements with a given group and check if the id is already used.
Args:
base: The base node element. (lxml node)
tag: Child elements group tag used to select the elements. (string)
id: The unique id to search for. (string)
Returns:
False if the id is already used, True if not.
"""
for node in base:
if node.tag.replace("{http://www.rdml.org}", "") == tag:
if node.get('id') == id:
return False
return True
def _create_new_element(base, tag, id):
"""Create a new element with a given tag and id.
Args:
base: The base node element. (lxml node)
tag: Child elements group tag. (string)
id: The unique id of the new element. (string)
Returns:
False if the id is already used, True if not.
"""
if id is None or id == "":
raise RdmlError('An ' + tag + ' id must be provided.')
if not _check_unique_id(base, tag, id):
raise RdmlError('The ' + tag + ' id "' + id + '" must be unique.')
return et.Element(tag, id=id)
def _add_new_subelement(base, basetag, tag, text, opt):
"""Create a new element with a given tag and id.
Args:
base: The base node element. (lxml node)
basetag: Child elements group tag. (string)
tag: Child elements own tag, to be created. (string)
text: The text content of the new element. (string)
opt: If true, the element is optional (Bool)
Returns:
Nothing, the base lxml element is modified.
"""
if opt is False:
if text is None or text == "":
raise RdmlError('An ' + basetag + ' ' + tag + ' must be provided.')
et.SubElement(base, tag).text = text
else:
if text is not None and text != "":
et.SubElement(base, tag).text = text
def _change_subelement(base, tag, xmlkeys, value, opt, vtype, id_as_element=False):
"""Change the value of the element with a given tag.
Args:
base: The base node element. (lxml node)
tag: Child elements own tag, to be created. (string)
xmlkeys: The list of possible keys in the right order for xml (list strings)
value: The text content of the new element.
opt: If true, the element is optional (Bool)
vtype: If true, the element is optional ("string", "int", "float")
id_as_element: If true, handle tag "id" as element, else as attribute
Returns:
Nothing, the base lxml element is modified.
"""
# Todo validate values with vtype
goodVal = value
if vtype == "bool":
ev = _string_to_bool(value, triple=True)
if ev is None or ev == "":
goodVal = ""
else:
if ev:
goodVal = "true"
else:
goodVal = "false"
if opt is False:
if goodVal is None or goodVal == "":
raise RdmlError('A value for ' + tag + ' must be provided.')
if tag == "id" and id_as_element is False:
if base.get('id') != goodVal:
par = base.getparent()
groupTag = base.tag.replace("{http://www.rdml.org}", "")
if not _check_unique_id(par, groupTag, goodVal):
raise RdmlError('The ' + groupTag + ' id "' + goodVal + '" is not unique.')
base.attrib['id'] = goodVal
return
# Check if the tag already excists
elem = _get_first_child(base, tag)
if elem is not None:
if goodVal is None or goodVal == "":
base.remove(elem)
else:
elem.text = goodVal
else:
if goodVal is not None and goodVal != "":
new_node = et.Element(tag)
new_node.text = goodVal
place = _get_tag_pos(base, tag, xmlkeys, 0)
base.insert(place, new_node)
def _get_or_create_subelement(base, tag, xmlkeys):
"""Get element with a given tag, if not present, create it.
Args:
base: The base node element. (lxml node)
tag: Child elements own tag, to be created. (string)
xmlkeys: The list of possible keys in the right order for xml (list strings)
Returns:
The node element with the tag.
"""
# Check if the tag already excists
if _get_first_child(base, tag) is None:
new_node = et.Element(tag)
place = _get_tag_pos(base, tag, xmlkeys, 0)
base.insert(place, new_node)
return _get_first_child(base, tag)
def _remove_irrelevant_subelement(base, tag):
"""If element with a given tag has no children, remove it.
Args:
base: The base node element. (lxml node)
tag: Child elements own tag, to be created. (string)
Returns:
The node element with the tag.
"""
# Check if the tag already excists
elem = _get_first_child(base, tag)
if elem is None:
return
if len(elem) == 0:
base.remove(elem)
def _move_subelement(base, tag, id, xmlkeys, position):
"""Change the value of the element with a given tag.
Args:
base: The base node element. (lxml node)
tag: The id to search for. (string)
id: The unique id of the new element. (string)
xmlkeys: The list of possible keys in the right order for xml (list strings)
position: the new position of the element (int)
Returns:
Nothing, the base lxml element is modified.
"""
pos = _get_tag_pos(base, tag, xmlkeys, position)
ele = _get_first_child_by_pos_or_id(base, tag, id, None)
base.insert(pos, ele)
def _move_subelement_pos(base, tag, oldpos, xmlkeys, position):
"""Change the value of the element with a given tag.
Args:
base: The base node element. (lxml node)
tag: The id to search for. (string)
oldpos: The unique id of the new element. (string)
xmlkeys: The list of possible keys in the right order for xml (list strings)
position: the new position of the element (int)
Returns:
Nothing, the base lxml element is modified.
"""
pos = _get_tag_pos(base, tag, xmlkeys, position)
ele = _get_first_child_by_pos_or_id(base, tag, None, oldpos)
base.insert(pos, ele)
def _get_tag_pos(base, tag, xmlkeys, pos):
"""Returns a position were to add a subelement with the given tag inc. pos offset.
Args:
base: The base node element. (lxml node)
tag: The id to search for. (string)
xmlkeys: The list of possible keys in the right order for xml (list strings)
pos: The position relative to the tag elements (int)
Returns:
The int number of were to add the element with the tag.
"""
count = _get_number_of_children(base, tag)
offset = pos
if pos is None or pos < 0:
offset = 0
pos = 0
if pos > count:
offset = count
return _get_first_tag_pos(base, tag, xmlkeys) + offset
def _get_first_tag_pos(base, tag, xmlkeys):
"""Returns a position were to add a subelement with the given tag.
Args:
base: The base node element. (lxml node)
tag: The id to search for. (string)
xmlkeys: The list of possible keys in the right order for xml (list strings)
Returns:
The int number of were to add the element with the tag.
"""
listrest = xmlkeys[xmlkeys.index(tag):]
counter = 0
for node in base:
if node.tag.replace("{http://www.rdml.org}", "") in listrest:
return counter
counter += 1
return counter
def _writeFileInRDML(rdmlName, fileName, data):
"""Writes a file in the RDML zip, even if it existed before.
Args:
rdmlName: The name of the RDML zip file
fileName: The name of the file to write into the zip
data: The data string of the file
Returns:
Nothing, modifies the RDML file.
"""
needRewrite = False
if os.path.isfile(rdmlName):
with zipfile.ZipFile(rdmlName, 'r') as RDMLin:
for item in RDMLin.infolist():
if item.filename == fileName:
needRewrite = True
if needRewrite:
tempFolder, tempName = tempfile.mkstemp(dir=os.path.dirname(rdmlName))
os.close(tempFolder)
# copy everything except the filename
with zipfile.ZipFile(rdmlName, 'r') as RDMLin:
with zipfile.ZipFile(tempName, mode='w', compression=zipfile.ZIP_DEFLATED) as RDMLout:
RDMLout.comment = RDMLin.comment
for item in RDMLin.infolist():
if item.filename != fileName:
RDMLout.writestr(item, RDMLin.read(item.filename))
if data != "":
RDMLout.writestr(fileName, data)
os.remove(rdmlName)
os.rename(tempName, rdmlName)
else:
with zipfile.ZipFile(rdmlName, mode='a', compression=zipfile.ZIP_DEFLATED) as RDMLout:
RDMLout.writestr(fileName, data)
def _lrp_linReg(xIn, yUse):
"""A function which calculates the slope or the intercept by linear regression.
Args:
xIn: The numpy array of the cycles
yUse: The numpy array that contains the fluorescence
Returns:
An array with the slope and intercept.
"""
counts = np.ones(yUse.shape)
xUse = xIn.copy()
xUse[np.isnan(yUse)] = 0
counts[np.isnan(yUse)] = 0
cycSqared = xUse * xUse
cycFluor = xUse * yUse
sumCyc = np.nansum(xUse, axis=1)
sumFluor = np.nansum(yUse, axis=1)
sumCycSquared = np.nansum(cycSqared, axis=1)
sumCycFluor = np.nansum(cycFluor, axis=1)
n = np.nansum(counts, axis=1)
ssx = sumCycSquared - (sumCyc * sumCyc) / n
sxy = sumCycFluor - (sumCyc * sumFluor) / n
slope = sxy / ssx
intercept = (sumFluor / n) - slope * (sumCyc / n)
return [slope, intercept]
def _lrp_findStopCyc(fluor, aRow):
"""Find the stop cycle of the log lin phase in fluor.
Args:
fluor: The array with the fluorescence values
aRow: The row to work on
Returns:
An int with the stop cycle.
"""
# Take care of nan values
validTwoLessCyc = 3 # Cycles so +1 to array
while (validTwoLessCyc <= fluor.shape[1] and
(np.isnan(fluor[aRow, validTwoLessCyc - 1]) or
np.isnan(fluor[aRow, validTwoLessCyc - 2]) or
np.isnan(fluor[aRow, validTwoLessCyc - 3]))):
validTwoLessCyc += 1
# First and Second Derivative values calculation
fluorShift = np.roll(fluor[aRow], 1, axis=0) # Shift to right - real position is -0.5
fluorShift[0] = np.nan
firstDerivative = fluor[aRow] - fluorShift
if np.isfinite(firstDerivative).any():
FDMaxCyc = np.nanargmax(firstDerivative, axis=0) + 1 # Cycles so +1 to array
else:
return fluor.shape[1]
firstDerivativeShift = np.roll(firstDerivative, -1, axis=0) # Shift to left
firstDerivativeShift[-1] = np.nan
secondDerivative = firstDerivativeShift - firstDerivative
if FDMaxCyc + 2 <= fluor.shape[1]:
# Only add two cycles if there is an increase without nan
if (not np.isnan(fluor[aRow, FDMaxCyc - 1]) and
not np.isnan(fluor[aRow, FDMaxCyc]) and
not np.isnan(fluor[aRow, FDMaxCyc + 1]) and
fluor[aRow, FDMaxCyc + 1] > fluor[aRow, FDMaxCyc] > fluor[aRow, FDMaxCyc - 1]):
FDMaxCyc += 2
else:
FDMaxCyc = fluor.shape[1]
maxMeanSD = 0.0
stopCyc = fluor.shape[1]
for cycInRange in range(validTwoLessCyc, FDMaxCyc):
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=RuntimeWarning)
tempMeanSD = np.mean(secondDerivative[cycInRange - 2: cycInRange + 1], axis=0)
# The > 0.000000000001 is to avoid float differences to the pascal version
if not np.isnan(tempMeanSD) and (tempMeanSD - maxMeanSD) > 0.000000000001:
maxMeanSD = tempMeanSD
stopCyc = cycInRange
if stopCyc + 2 >= fluor.shape[1]:
stopCyc = fluor.shape[1]
return stopCyc
def _lrp_findStartCyc(fluor, aRow, stopCyc):
"""A function which finds the start cycle of the log lin phase in fluor.
Args:
fluor: The array with the fluorescence values
aRow: The row to work on
stopCyc: The stop cycle
Returns:
An array [int, int] with the start cycle and the fixed start cycle.
"""
startCyc = stopCyc - 1
# startCyc might be NaN, so shift it to the first value
firstNotNaN = 1 # Cycles so +1 to array
while np.isnan(fluor[aRow, firstNotNaN - 1]) and firstNotNaN < startCyc:
firstNotNaN += 1
while startCyc > firstNotNaN and np.isnan(fluor[aRow, startCyc - 1]):
startCyc -= 1
# As long as there are no NaN and new values are increasing
while (startCyc > firstNotNaN and
not np.isnan(fluor[aRow, startCyc - 2]) and
fluor[aRow, startCyc - 2] <= fluor[aRow, startCyc - 1]):
startCyc -= 1
startCycFix = startCyc
if (not np.isnan(fluor[aRow, startCyc]) and
not np.isnan(fluor[aRow, startCyc - 1]) and
not np.isnan(fluor[aRow, stopCyc - 1]) and
not np.isnan(fluor[aRow, stopCyc - 2])):
startStep = np.log10(fluor[aRow, startCyc]) - np.log10(fluor[aRow, startCyc - 1])
stopStep = np.log10(fluor[aRow, stopCyc - 1]) - np.log10(fluor[aRow, stopCyc - 2])
if startStep > 1.1 * stopStep:
startCycFix += 1
return [startCyc, startCycFix]
def _lrp_testSlopes(fluor, aRow, stopCyc, startCycFix):
"""Splits the values and calculates a slope for the upper and the lower half.
Args:
fluor: The array with the fluorescence values
aRow: The row to work on
stopCyc: The stop cycle
startCycFix: The start cycle
Returns:
An array with [slopelow, slopehigh].
"""
# Both start with full range
loopStart = [startCycFix[aRow], stopCyc[aRow]]
loopStop = [startCycFix[aRow], stopCyc[aRow]]
# Now find the center ignoring nan
while True:
loopStart[1] -= 1
loopStop[0] += 1
while (loopStart[1] - loopStop[0]) > 1 and np.isnan(fluor[aRow, loopStart[1] - 1]):
loopStart[1] -= 1
while (loopStart[1] - loopStop[0]) > 1 and np.isnan(fluor[aRow, loopStop[1] - 1]):
loopStop[0] += 1
if (loopStart[1] - loopStop[0]) <= 1:
break
# basic regression per group
ssx = [0, 0]
sxy = [0, 0]
slope = [0, 0]
for j in range(0, 2):
sumx = 0.0
sumy = 0.0
sumx2 = 0.0
sumxy = 0.0
nincl = 0.0
for i in range(loopStart[j], loopStop[j] + 1):
if not np.isnan(fluor[aRow, i - 1]):
sumx += i
sumy += np.log10(fluor[aRow, i - 1])
sumx2 += i * i
sumxy += i * np.log10(fluor[aRow, i - 1])
nincl += 1
ssx[j] = sumx2 - sumx * sumx / nincl
sxy[j] = sumxy - sumx * sumy / nincl
slope[j] = sxy[j] / ssx[j]
return [slope[0], slope[1]]
def _lrp_lastCycMeanMax(fluor, vecSkipSample, vecNoPlateau):
"""A function which calculates the mean of the max fluor in the last ten cycles.
Args:
fluor: The array with the fluorescence values
vecSkipSample: Skip the sample
vecNoPlateau: Sample has no plateau
Returns:
An float with the max mean.
"""
maxFlour = np.nanmax(fluor[:, -11:], axis=1)
maxFlour[vecSkipSample] = np.nan
maxFlour[vecNoPlateau] = np.nan
# Ignore all nan slices, to fix them below
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=RuntimeWarning)
maxMean = np.nanmean(maxFlour)
if np.isnan(maxMean):
maxMean = np.nanmax(maxFlour)
return maxMean
def _lrp_meanPcrEff(tarGroup, vecTarget, pcrEff, vecSkipSample, vecNoPlateau, vecShortLogLin):
"""A function which calculates the mean efficiency of the selected target group excluding bad ones.
Args:
tarGroup: The target number
vecTarget: The vector with the targets numbers
pcrEff: The array with the PCR efficiencies
vecSkipSample: Skip the sample
vecNoPlateau: True if there is no plateau
vecShortLogLin: True indicates a short log lin phase
Returns:
An array with [meanPcrEff, pcrEffVar].
"""
cnt = 0
sumEff = 0.0
sumEff2 = 0.0
for j in range(0, len(pcrEff)):
if tarGroup is None or tarGroup == vecTarget[j]:
if (not (vecSkipSample[j] or vecNoPlateau[j] or vecShortLogLin[j])) and pcrEff[j] > 1.0:
cnt += 1
sumEff += pcrEff[j]
sumEff2 += pcrEff[j] * pcrEff[j]
if cnt > 1:
meanPcrEff = sumEff / cnt
pcrEffVar = (sumEff2 - (sumEff * sumEff) / cnt) / (cnt - 1)
else:
meanPcrEff = 1.0
pcrEffVar = 100
return [meanPcrEff, pcrEffVar]
def _lrp_startStopInWindow(fluor, aRow, upWin, lowWin):
"""Find the start and the stop of the part of the curve which is inside the window.
Args:
fluor: The array with the fluorescence values
aRow: The row to work on
upWin: The upper limit of the window
lowWin: The lower limit of the window
Returns:
The int startWinCyc, stopWinCyc and the bool notInWindow.
"""
startWinCyc = 0
stopWinCyc = 0
# Find the stopCyc and the startCyc cycle of the log lin phase
stopCyc = _lrp_findStopCyc(fluor, aRow)
[startCyc, startCycFix] = _lrp_findStartCyc(fluor, aRow, stopCyc)
if np.isfinite(fluor[aRow, startCycFix - 1:]).any():
stopMaxCyc = np.nanargmax(fluor[aRow, startCycFix - 1:]) + startCycFix
else:
return startCyc, startCyc, True
# If is true if outside the window
if fluor[aRow, startCyc - 1] > upWin or fluor[aRow, stopMaxCyc - 1] < lowWin:
notInWindow = True
if fluor[aRow, startCyc - 1] > upWin:
startWinCyc = startCyc
stopWinCyc = startCyc
if fluor[aRow, stopMaxCyc - 1] < lowWin:
startWinCyc = stopMaxCyc
stopWinCyc = stopMaxCyc
else:
notInWindow = False
# look for stopWinCyc
if fluor[aRow, stopMaxCyc - 1] < upWin:
stopWinCyc = stopMaxCyc
else:
for i in range(stopMaxCyc, startCyc, -1):
if fluor[aRow, i - 1] > upWin > fluor[aRow, i - 2]:
stopWinCyc = i - 1
# look for startWinCyc
if fluor[aRow, startCycFix - 1] > lowWin:
startWinCyc = startCycFix
else:
for i in range(stopMaxCyc, startCyc, -1):
if fluor[aRow, i - 1] > lowWin > fluor[aRow, i - 2]:
startWinCyc = i
return startWinCyc, stopWinCyc, notInWindow
def _lrp_paramInWindow(fluor, aRow, upWin, lowWin):
"""Calculates slope, nNull, PCR efficiency and mean x/y for the curve part in the window.
Args:
fluor: The array with the fluorescence values
aRow: The row to work on
upWin: The upper limit of the window
lowWin: The lower limit of the window
Returns:
The calculated values: indMeanX, indMeanY, pcrEff, nnulls, ninclu, correl.
"""
startWinCyc, stopWinCyc, notInWindow = _lrp_startStopInWindow(fluor, aRow, upWin, lowWin)
sumx = 0.0
sumy = 0.0
sumx2 = 0.0
sumy2 = 0.0
sumxy = 0.0
nincl = 0.0
ssx = 0.0
ssy = 0.0
sxy = 0.0
for i in range(startWinCyc, stopWinCyc + 1):
fluorSamp = fluor[aRow, i - 1]
if not np.isnan(fluorSamp):
logFluorSamp = np.log10(fluorSamp)
sumx += i
sumy += logFluorSamp
sumx2 += i * i
sumy2 += logFluorSamp * logFluorSamp
sumxy += i * logFluorSamp
nincl += 1
if nincl > 1:
ssx = sumx2 - sumx * sumx / nincl
ssy = sumy2 - sumy * sumy / nincl
sxy = sumxy - sumx * sumy / nincl
if ssx > 0.0 and ssy > 0.0 and nincl > 0.0:
cslope = sxy / ssx
cinterc = sumy / nincl - cslope * sumx / nincl
correl = sxy / np.sqrt(ssx * ssy)
indMeanX = sumx / nincl
indMeanY = sumy / nincl
pcrEff = np.power(10, cslope)
nnulls = np.power(10, cinterc)
else:
correl = np.nan
indMeanX = np.nan
indMeanY = np.nan
pcrEff = np.nan
nnulls = np.nan
if notInWindow:
ninclu = 0
else:
ninclu = stopWinCyc - startWinCyc + 1
return indMeanX, indMeanY, pcrEff, nnulls, ninclu, correl
def _lrp_allParamInWindow(fluor, tarGroup, vecTarget, indMeanX, indMeanY, pcrEff, nnulls, ninclu, correl, upWin, lowWin, vecNoAmplification, vecBaselineError):
"""A function which calculates the mean of the max fluor in the last ten cycles.
Args:
fluor: The array with the fluorescence values
tarGroup: The target number
vecTarget: The vector with the targets numbers
indMeanX: The vector with the x mean position
indMeanY: The vector with the y mean position
pcrEff: The array with the PCR efficiencies
nnulls: The array with the calculated nnulls
ninclu: The array with the calculated ninclu
correl: The array with the calculated correl
upWin: The upper limit of the window
lowWin: The lower limit of the window
vecNoAmplification: True if there is a amplification error
vecBaselineError: True if there is a baseline error
Returns:
An array with [indMeanX, indMeanY, pcrEff, nnulls, ninclu, correl].
"""
for row in range(0, fluor.shape[0]):
if tarGroup is None or tarGroup == vecTarget[row]:
if not (vecNoAmplification[row] or vecBaselineError[row]):
if tarGroup is None:
indMeanX[row], indMeanY[row], pcrEff[row], nnulls[row], ninclu[row], correl[row] = _lrp_paramInWindow(fluor, row, upWin[0], lowWin[0])
else:
indMeanX[row], indMeanY[row], pcrEff[row], nnulls[row], ninclu[row], correl[row] = _lrp_paramInWindow(fluor, row, upWin[tarGroup], lowWin[tarGroup])
else:
correl[row] = np.nan
indMeanX[row] = np.nan
indMeanY[row] = np.nan
pcrEff[row] = np.nan
nnulls[row] = np.nan
ninclu[row] = 0
return indMeanX, indMeanY, pcrEff, nnulls, ninclu, correl
def _lrp_meanStopFluor(fluor, tarGroup, vecTarget, stopCyc, vecSkipSample, vecNoPlateau):
"""Return the mean of the stop fluor or the max fluor if all rows have no plateau.
Args:
fluor: The array with the fluorescence values
tarGroup: The target number
vecTarget: The vector with the targets numbers
stopCyc: The vector with the stop cycle of the log lin phase
vecSkipSample: Skip the sample
vecNoPlateau: True if there is no plateau
Returns:
The meanMax fluorescence.
"""
meanMax = 0.0
maxFluor = 0.0000001
cnt = 0
if tarGroup is None:
for aRow in range(0, fluor.shape[0]):
if not vecSkipSample[aRow]:
if not vecNoPlateau[aRow]:
cnt += 1
meanMax += fluor[aRow, stopCyc[aRow] - 1]
else:
for i in range(0, fluor.shape[1]):
if fluor[aRow, i] > maxFluor:
maxFluor = fluor[aRow, i]
else:
for aRow in range(0, fluor.shape[0]):
if tarGroup == vecTarget[aRow] and not vecSkipSample[aRow]:
if not vecNoPlateau[aRow]:
cnt += 1
meanMax += fluor[aRow, stopCyc[aRow] - 1]
else:
for i in range(0, fluor.shape[1]):
if fluor[aRow, i] > maxFluor:
maxFluor = fluor[aRow, i]
if cnt > 0:
meanMax = meanMax / cnt
else:
meanMax = maxFluor
return meanMax
def _lrp_maxStartFluor(fluor, tarGroup, vecTarget, startCyc, vecSkipSample):
"""Return the maximum of the start fluorescence
Args:
fluor: The array with the fluorescence values
tarGroup: The target number
vecTarget: The vector with the targets numbers
startCyc: The vector with the start cycle of the log lin phase
vecSkipSample: Skip the sample
Returns:
The maxStart fluorescence.
"""
maxStart = -10.0
if tarGroup is None:
for aRow in range(0, fluor.shape[0]):
if not vecSkipSample[aRow]:
if fluor[aRow, startCyc[aRow] - 1] > maxStart:
maxStart = fluor[aRow, startCyc[aRow] - 1]
else:
for aRow in range(0, fluor.shape[0]):
if tarGroup == vecTarget[aRow] and not vecSkipSample[aRow]:
if fluor[aRow, startCyc[aRow] - 1] > maxStart:
maxStart = fluor[aRow, startCyc[aRow] - 1]
return 0.999 * maxStart
def _lrp_setLogWin(tarGroup, newUpWin, foldWidth, upWin, lowWin, maxFluorTotal, minFluorTotal):
"""Sets a new window and ensures its within the total fluorescence values.
Args:
tarGroup: The target number
newUpWin: The new upper window
foldWidth: The foldWith to the lower window
upWin: The upper window fluorescence
lowWin: The lower window fluorescence
maxFluorTotal: The maximum fluorescence over all rows
minFluorTotal: The minimum fluorescence over all rows
Returns:
An array with [indMeanX, indMeanY, pcrEff, nnulls, ninclu, correl].
"""
# No rounding needed, only present for exact identical output with Pascal version
tempUpWin = np.power(10, np.round(1000 * newUpWin) / 1000)
tempLowWin = np.power(10, np.round(1000 * (newUpWin - foldWidth)) / 1000)
tempUpWin = np.minimum(tempUpWin, maxFluorTotal)
tempUpWin = np.maximum(tempUpWin, minFluorTotal)
tempLowWin = np.minimum(tempLowWin, maxFluorTotal)
tempLowWin = np.maximum(tempLowWin, minFluorTotal)
if tarGroup is None:
upWin[0] = tempUpWin
lowWin[0] = tempLowWin
else:
upWin[tarGroup] = tempUpWin
lowWin[tarGroup] = tempLowWin
return upWin, lowWin
def _lrp_logStepStop(fluor, tarGroup, vecTarget, stopCyc, vecSkipSample, vecNoPlateau):
"""Calculates the log of the fluorescence increase at the stop cycle.
Args:
fluor: The array with the fluorescence values
tarGroup: The target number
vecTarget: The vector with the targets numbers
stopCyc: The vector with the stop cycle of the log lin phase
vecSkipSample: True if row should be skipped
vecNoPlateau: True if there is no plateau
Returns:
An array with [indMeanX, indMeanY, pcrEff, nnulls, ninclu, correl].
"""
cnt = 0
step = 0.0
for aRow in range(0, fluor.shape[0]):
if (tarGroup is None or tarGroup == vecTarget[aRow]) and not (vecSkipSample[aRow] or vecNoPlateau[aRow]):
cnt += 1
step += np.log10(fluor[aRow, stopCyc[aRow] - 1]) - np.log10(fluor[aRow, stopCyc[aRow] - 2])
if cnt > 0:
step = step / cnt
else:
step = np.log10(1.8)
return step
def _lrp_setWoL(fluor, tarGroup, vecTarget, pointsInWoL, indMeanX, indMeanY, pcrEff, nNulls, nInclu, correl,
upWin, lowWin, maxFluorTotal, minFluorTotal, stopCyc, startCyc, threshold,
vecNoAmplification, vecBaselineError, vecSkipSample, vecNoPlateau, vecShortLogLin, vecIsUsedInWoL):
"""Find the window with the lowest variation in PCR efficiency and calculate its values.
Args:
fluor: The array with the fluorescence values
tarGroup: The target number
vecTarget: The vector with the targets numbers
pointsInWoL: The number of points in the window
indMeanX: The vector with the x mean position
indMeanY: The vector with the y mean position
pcrEff: The array with the PCR efficiencies
nNulls: The array with the calculated nNulls
nInclu: The array with the calculated nInclu
correl: The array with the calculated correl
upWin: The upper limit of the window
lowWin: The lower limit of the window
maxFluorTotal: The maximum fluorescence over all rows
minFluorTotal: The minimum fluorescence over all rows
stopCyc: The vector with the stop cycle of the log lin phase
startCyc: The vector with the start cycle of the log lin phase
threshold: The threshold fluorescence
vecNoAmplification: True if there is a amplification error
vecBaselineError: True if there is a baseline error
vecSkipSample: Skip the sample
vecNoPlateau: True if there is no plateau
vecShortLogLin: True indicates a short log lin phase
vecIsUsedInWoL: True if used in the WoL
Returns:
The values indMeanX, indMeanY, pcrEff, nNulls, nInclu, correl, upWin, lowWin, threshold, vecIsUsedInWoL.
"""
skipGroup = False
stepSize = 0.2 # was 0.5, smaller steps help in finding WoL
# Keep 60 calculated results
memVarEff = np.zeros(60, dtype=np.float64)
memUpWin = np.zeros(60, dtype=np.float64)
memFoldWidth = np.zeros(60, dtype=np.float64)
maxFluorWin = _lrp_meanStopFluor(fluor, tarGroup, vecTarget, stopCyc, vecSkipSample, vecNoPlateau)
if maxFluorWin > 0.0:
maxFluorWin = np.log10(maxFluorWin)
else:
skipGroup = True
minFluorLim = _lrp_maxStartFluor(fluor, tarGroup, vecTarget, startCyc, vecSkipSample)
if minFluorLim > 0.0:
minFluorLim = np.log10(minFluorLim)
else:
skipGroup = True
checkMeanEff = 1.0
if not skipGroup:
foldWidth = pointsInWoL * _lrp_logStepStop(fluor, tarGroup, vecTarget, stopCyc, vecSkipSample, vecNoPlateau)
upWin, lowWin = _lrp_setLogWin(tarGroup, maxFluorWin, foldWidth, upWin, lowWin, maxFluorTotal, minFluorTotal)
_unused, _unused2, checkPcrEff, _unused3, _unused4, _unused5 = _lrp_allParamInWindow(fluor, tarGroup, vecTarget,
indMeanX, indMeanY, pcrEff,
nNulls, nInclu, correl,
upWin, lowWin,
vecNoAmplification,
vecBaselineError)
[checkMeanEff, _unused] = _lrp_meanPcrEff(tarGroup, vecTarget, checkPcrEff,
vecSkipSample, vecNoPlateau, vecShortLogLin)
if checkMeanEff < 1.001:
skipGroup = True
if skipGroup:
if tarGroup is None:
threshold[0] = (0.5 * np.round(1000 * upWin[0]) / 1000)
else:
threshold[tarGroup] = (0.5 * np.round(1000 * upWin[tarGroup]) / 1000)
if not skipGroup:
foldWidth = np.log10(np.power(checkMeanEff, pointsInWoL))
counter = -1
maxVarEff = 0.0
maxVarEffStep = -1
lastUpWin = 2 + maxFluorWin
while True:
counter += 1
step = np.log10(checkMeanEff)
newUpWin = maxFluorWin - counter * stepSize * step
if newUpWin < lastUpWin:
upWin, lowWin = _lrp_setLogWin(tarGroup, newUpWin, foldWidth, upWin, lowWin, maxFluorTotal, minFluorTotal)
_unused, _unused2, checkPcrEff, _unused3, _unused4, _unused5 = _lrp_allParamInWindow(fluor, tarGroup,
vecTarget, indMeanX,
indMeanY, pcrEff,
nNulls, nInclu,
correl,
upWin, lowWin,
vecNoAmplification,
vecBaselineError)
[checkMeanEff, _unused] = _lrp_meanPcrEff(tarGroup, vecTarget, checkPcrEff,
vecSkipSample, vecNoPlateau, vecShortLogLin)
foldWidth = np.log10(np.power(checkMeanEff, pointsInWoL))
if foldWidth < 0.5:
foldWidth = 0.5 # to avoid width = 0 above stopCyc
upWin, lowWin = _lrp_setLogWin(tarGroup, newUpWin, foldWidth, upWin, lowWin, maxFluorTotal, minFluorTotal)
_unused, _unused2, checkPcrEff, _unused3, _unused4, _unused5 = _lrp_allParamInWindow(fluor, tarGroup,
vecTarget, indMeanX,
indMeanY, pcrEff,
nNulls, nInclu,
correl,
upWin, lowWin,
vecNoAmplification,
vecBaselineError)
[checkMeanEff, checkVarEff] = _lrp_meanPcrEff(tarGroup, vecTarget, checkPcrEff,
vecSkipSample, vecNoPlateau, vecShortLogLin)
if checkVarEff > 0.0:
memVarEff[counter] = np.sqrt(checkVarEff) / checkMeanEff
else:
memVarEff[counter] = 0.0
if checkVarEff > maxVarEff:
maxVarEff = checkVarEff
maxVarEffStep = counter
memUpWin[counter] = newUpWin
memFoldWidth[counter] = foldWidth
lastUpWin = newUpWin
else:
checkVarEff = 0.0
if counter >= 60 or newUpWin - foldWidth / (pointsInWoL / 2.0) < minFluorLim or checkVarEff < 0.00000000001:
break
# corrections: start
if checkVarEff < 0.00000000001:
counter -= 1 # remove window with vareff was 0.0
validSteps = -1
while True:
validSteps += 1
if memVarEff[validSteps] < 0.000001:
break
validSteps -= 1 # i = number of valid steps
minSmooth = memVarEff[0]
minStep = 0 # default top window
# next 3 if conditions on i: added to correct smoothing
if validSteps == 0:
minStep = 0
if 0 < validSteps < 4:
n = -1
while True:
n += 1
if memVarEff[n] < minSmooth:
minSmooth = memVarEff[n]
minStep = n
if n == validSteps:
break
if validSteps >= 4:
n = 0
while True:
n += 1
smoothVar = 0.0
for m in range(n - 1, n + 2):
smoothVar = smoothVar + memVarEff[m]
smoothVar = smoothVar / 3.0
if smoothVar < minSmooth:
minSmooth = smoothVar
minStep = n
if n >= validSteps - 1 or n > maxVarEffStep:
break
# corrections: stop
# Calculate the final values again
upWin, lowWin = _lrp_setLogWin(tarGroup, memUpWin[minStep], memFoldWidth[minStep],
upWin, lowWin, maxFluorTotal, minFluorTotal)
if tarGroup is None:
threshold[0] = (0.5 * np.round(1000 * upWin[0]) / 1000)
else:
threshold[tarGroup] = (0.5 * np.round(1000 * upWin[tarGroup]) / 1000)
indMeanX, indMeanY, pcrEff, nNulls, nInclu, correl = _lrp_allParamInWindow(fluor, tarGroup, vecTarget,
indMeanX, indMeanY, pcrEff, nNulls,
nInclu, correl, upWin, lowWin,
vecNoAmplification, vecBaselineError)
for aRow in range(0, len(pcrEff)):
if tarGroup is None or tarGroup == vecTarget[aRow]:
if (not (vecSkipSample[aRow] or vecNoPlateau[aRow] or vecShortLogLin[aRow])) and pcrEff[aRow] > 1.0:
vecIsUsedInWoL[aRow] = True
else:
vecIsUsedInWoL[aRow] = False
return indMeanX, indMeanY, pcrEff, nNulls, nInclu, correl, upWin, lowWin, threshold, vecIsUsedInWoL
def _lrp_assignNoPlateau(fluor, tarGroup, vecTarget, pointsInWoL, indMeanX, indMeanY, pcrEff, nNulls, nInclu, correl,
upWin, lowWin, maxFluorTotal, minFluorTotal, stopCyc, startCyc, threshold,
vecNoAmplification, vecBaselineError, vecSkipSample, vecNoPlateau, vecShortLogLin, vecIsUsedInWoL):
"""Assign no plateau again and possibly recalculate WoL if new no plateau was found.
Args:
fluor: The array with the fluorescence values
tarGroup: The target number
vecTarget: The vector with the targets numbers
pointsInWoL: The number of points in the window
indMeanX: The vector with the x mean position
indMeanY: The vector with the y mean position
pcrEff: The array with the PCR efficiencies
nNulls: The array with the calculated nNulls
nInclu: The array with the calculated nInclu
correl: The array with the calculated correl
upWin: The upper limit of the window
lowWin: The lower limit of the window
maxFluorTotal: The maximum fluorescence over all rows
minFluorTotal: The minimum fluorescence over all rows
stopCyc: The vector with the stop cycle of the log lin phase
startCyc: The vector with the start cycle of the log lin phase
threshold: The threshold fluorescence
vecNoAmplification: True if there is a amplification error
vecBaselineError: True if there is a baseline error
vecSkipSample: Skip the sample
vecNoPlateau: True if there is no plateau
vecShortLogLin: True indicates a short log lin phase
vecIsUsedInWoL: True if used in the WoL
Returns:
The values indMeanX, indMeanY, pcrEff, nNulls, nInclu, correl, upWin, lowWin, threshold, vecIsUsedInWoL, vecNoPlateau.
"""
newNoPlateau = False
for aRow in range(0, fluor.shape[0]):
if (tarGroup is None or tarGroup == vecTarget[aRow]) and not (vecNoAmplification[aRow] or
vecBaselineError[aRow] or
vecNoPlateau[aRow]):
expectedFluor = nNulls[aRow] * np.power(pcrEff[aRow], fluor.shape[1])
if expectedFluor / fluor[aRow, fluor.shape[1] - 1] < 5:
newNoPlateau = True
vecNoPlateau[aRow] = True
if newNoPlateau:
indMeanX, indMeanY, pcrEff, nNulls, nInclu, correl, upWin, lowWin, threshold, vecIsUsedInWoL = _lrp_setWoL(fluor, tarGroup, vecTarget,
pointsInWoL, indMeanX, indMeanY, pcrEff,
nNulls, nInclu, correl, upWin,
lowWin, maxFluorTotal, minFluorTotal,
stopCyc, startCyc, threshold,
vecNoAmplification,
vecBaselineError,
vecSkipSample, vecNoPlateau,
vecShortLogLin, vecIsUsedInWoL)
return indMeanX, indMeanY, pcrEff, nNulls, nInclu, correl, upWin, lowWin, threshold, vecIsUsedInWoL, vecNoPlateau
def _lrp_removeOutlier(data, vecNoPlateau, alpha=0.05):
"""A function which calculates the skewness and Grubbs test to identify outliers ignoring nan.
Args:
data: The numpy array with the data
vecNoPlateau: The vector of samples without plateau.
alpha: The the significance level (default 0.05)
Returns:
The a bool array with the removed outliers set true.
"""
oData = np.copy(data)
oLogic = np.zeros(data.shape, dtype=np.bool_)
loopOn = True
while loopOn:
count = np.count_nonzero(~np.isnan(oData))
if count < 3:
loopOn = False
else:
mean = np.nanmean(oData)
std = np.nanstd(oData, ddof=1)
skewness = scp.skew(oData, bias=False, nan_policy='omit')
skewness_SE = np.sqrt((6 * count * (count - 1)) / ((count - 2) * (count + 1) * (count + 3)))
skewness_t = np.abs(skewness) / skewness_SE
skewness_P = scp.t.sf(skewness_t, df=np.power(10, 10)) * 2
if skewness_P < alpha / 2.0:
# It's skewed!
grubbs_t = scp.t.ppf(1 - (alpha / count) / 2, (count - 2))
grubbs_Gcrit = ((count - 1) / np.sqrt(count)) * np.sqrt(np.power(grubbs_t, 2) /
((count - 2) + np.power(grubbs_t, 2)))
if skewness > 0.0:
data_max = np.nanmax(oData)
grubbs_res = (data_max - mean) / std
max_pos = np.nanargmax(oData)
if grubbs_res > grubbs_Gcrit:
# It's a true outlier
oData[max_pos] = np.nan
oLogic[max_pos] = True
else:
if vecNoPlateau[max_pos]:
# It has no plateau
oData[max_pos] = np.nan
oLogic[max_pos] = True
else:
loopOn = False
else:
data_min = np.nanmin(oData)
grubbs_res = (mean - data_min) / std
min_pos = np.nanargmin(oData)
if grubbs_res > grubbs_Gcrit:
# It's a true outlier
oData[min_pos] = np.nan
oLogic[min_pos] = True
else:
if vecNoPlateau[min_pos]:
# It has no plateau
oData[min_pos] = np.nan
oLogic[min_pos] = True
else:
loopOn = False
else:
loopOn = False
return oLogic
def _mca_smooth(tempList, rawFluor):
"""A function to smooth the melt curve date based on Friedmans supersmoother.
# https://www.slac.stanford.edu/pubs/slacpubs/3250/slac-pub-3477.pdf
Args:
tempList:
rawFluor: The numpy array with the raw data
Returns:
The numpy array with the smoothed data.
"""
span_s = 0.05
span_m = 0.2
span_l = 0.5
smoothFluor = np.zeros(rawFluor.shape, dtype=np.float64)
padTemp = np.append(0.0, tempList)
zeroPad = np.zeros((rawFluor.shape[0], 1), dtype=np.float64)
padFluor = np.append(zeroPad, rawFluor, axis=1)
n = len(padTemp) - 1
# Find the increase in x from 0.25 to 0.75 over the total range
firstQuarter = int(0.5 + n / 4)
thirdQuarter = 3 * firstQuarter
scale = -1.0
while scale <= 0.0:
if thirdQuarter < n:
thirdQuarter += 1
if firstQuarter > 1:
firstQuarter -= 1
scale = padTemp[thirdQuarter] - padTemp[firstQuarter]
vsmlsq = 0.0001 * scale * 0.0001 * scale
countUp = 0
for fluor in padFluor:
[res_s_a, res_s_t] = _mca_sub_smooth(padTemp, fluor, span_s, vsmlsq, True)
[res_s_b, _unused] = _mca_sub_smooth(padTemp, res_s_t, span_m, vsmlsq, False)
[res_s_c, res_s_t] = _mca_sub_smooth(padTemp, fluor, span_m, vsmlsq, True)
[res_s_d, _unused] = _mca_sub_smooth(padTemp, res_s_t, span_m, vsmlsq, False)
[res_s_e, res_s_t] = _mca_sub_smooth(padTemp, fluor, span_l, vsmlsq, True)
[res_s_f, _unused] = _mca_sub_smooth(padTemp, res_s_t, span_m, vsmlsq, False)
res_s_fin = np.zeros(res_s_a.shape, dtype=np.float64)
for thirdQuarter in range(1, n + 1):
resmin = 1.0e20
if res_s_b[thirdQuarter] < resmin:
resmin = res_s_b[thirdQuarter]
res_s_fin[thirdQuarter] = span_s
if res_s_d[thirdQuarter] < resmin:
resmin = res_s_d[thirdQuarter]
res_s_fin[thirdQuarter] = span_m
if res_s_f[thirdQuarter] < resmin:
res_s_fin[thirdQuarter] = span_l
[res_s_bb, _unused] = _mca_sub_smooth(padTemp, res_s_fin, span_m, vsmlsq, False)
res_s_cc = np.zeros(res_s_a.shape, dtype=np.float64)
for thirdQuarter in range(1, n + 1):
# compare res_s_bb with spans[] and make sure the no res_s_bb[] is below span_s or above span_l
if res_s_bb[thirdQuarter] <= span_s:
res_s_bb[thirdQuarter] = span_s
if res_s_bb[thirdQuarter] >= span_l:
res_s_bb[thirdQuarter] = span_l
f = res_s_bb[thirdQuarter] - span_m
if f >= 0.0:
# in case res_s_bb[] is higher than span_m: calculate res_s_cc[] from res_s_c and res_s_e
# using linear interpolation between span_l and span_m
f = f / (span_l - span_m)
res_s_cc[thirdQuarter] = (1.0 - f) * res_s_c[thirdQuarter] + f * res_s_e[thirdQuarter]
else:
# in case res_s_bb[] is less than span_m: calculate res_s_cc[] from res_s_c and res_s_a
# using linear interpolation between span_s and span_m
f = -f / (span_m - span_s)
res_s_cc[thirdQuarter] = (1.0 - f) * res_s_c[thirdQuarter] + f * res_s_a[thirdQuarter]
# final smoothing of combined optimally smoothed values in res_s_cc[] into smo[]
[res_s_t, _unused] = _mca_sub_smooth(padTemp, res_s_cc, span_s, vsmlsq, False)
smoothFluor[countUp] = res_s_t[1:]
countUp += 1
return smoothFluor
def _mca_sub_smooth(temperature, fluor, span, vsmlsq, saveVarianceData):
"""A function to smooth the melt curve date based on Friedmans supersmoother.
# https://www.slac.stanford.edu/pubs/slacpubs/3250/slac-pub-3477.pdf
Args:
temperature:
fluor: The numpy array with the raw data
span: The selected span
vsmlsq: The width
saveVarianceData: Sava variance data
Returns:
[smoothData[], varianceData[]] where smoothData[] contains smoothed data,
varianceData[] contains residuals scaled to variance.
"""
n = len(temperature) - 1
smoothData = np.zeros(len(temperature), dtype=np.float64)
varianceData = np.zeros(len(temperature), dtype=np.float64)
windowSize = int(0.5 * span * n + 0.6)
if windowSize < 2:
windowSize = 2
windowStop = 2 * windowSize + 1 # range of smoothing window
xm = temperature[1]
ym = fluor[1]
tempVar = 0.0
fluorVar = 0.0
for i in range(2, windowStop + 1):
xm = ((i - 1) * xm + temperature[i]) / i
ym = ((i - 1) * ym + fluor[i]) / i
tmp = i * (temperature[i] - xm) / (i - 1)
tempVar += tmp * (temperature[i] - xm)
fluorVar += tmp * (fluor[i] - ym)
fbw = windowStop
for j in range(1, n + 1): # Loop through all
windowStart = j - windowSize - 1
windowEnd = j + windowSize
if not (windowStart < 1 or windowEnd > n):
tempStart = temperature[windowStart]
tempEnd = temperature[windowEnd]
fbo = fbw
fbw = fbw - 1.0
tmp = 0.0
if fbw > 0.0:
xm = (fbo * xm - tempStart) / fbw
if fbw > 0.0:
ym = (fbo * ym - fluor[windowStart]) / fbw
if fbw > 0.0:
tmp = fbo * (tempStart - xm) / fbw
tempVar = tempVar - tmp * (tempStart - xm)
fluorVar = fluorVar - tmp * (fluor[windowStart] - ym)
fbo = fbw
fbw = fbw + 1.0
tmp = 0.0
if fbw > 0.0:
xm = (fbo * xm + tempEnd) / fbw
if fbw > 0.0:
ym = (fbo * ym + fluor[windowEnd]) / fbw
if fbo > 0.0:
tmp = fbw * (tempEnd - xm) / fbo
tempVar = tempVar + tmp * (tempEnd - xm)
fluorVar = fluorVar + tmp * (fluor[windowEnd] - ym)
if tempVar > vsmlsq:
smoothData[j] = (temperature[j] - xm) * fluorVar / tempVar + ym # contains smoothed data
else:
smoothData[j] = ym # contains smoothed data
if saveVarianceData:
h = 0.0
if fbw > 0.0:
h = 1.0 / fbw
if tempVar > vsmlsq:
h = h + (temperature[j] - xm) * (temperature[j] - xm) / tempVar
if 1.0 - h > 0.0:
varianceData[j] = abs(fluor[j] - smoothData[j]) / (1.0 - h) # contains residuals scaled to variance
else:
if j > 1:
varianceData[j] = varianceData[j - 1] # contains residuals scaled to variance
else:
varianceData[j] = 0.0
return [smoothData, varianceData]
def _mca_linReg(xIn, yUse, start, stop):
"""A function which calculates the slope or the intercept by linear regression.
Args:
xIn: The numpy array of the temperatures
yUse: The numpy array that contains the fluorescence
Returns:
An array with the slope and intercept.
"""
counts = np.ones(yUse.shape)
xUse = xIn.copy()
xUse[np.isnan(yUse)] = 0
counts[np.isnan(yUse)] = 0
myStop = stop + 1
tempSqared = xUse * xUse
tempFluor = xUse * yUse
sumCyc = np.nansum(xUse[:, start:myStop], axis=1)
sumFluor = np.nansum(yUse[:, start:myStop], axis=1)
sumCycSquared = np.nansum(tempSqared[:, start:myStop], axis=1)
sumCycFluor = np.nansum(tempFluor[:, start:myStop], axis=1)
n = np.nansum(counts[:, start:myStop], axis=1)
ssx = sumCycSquared - (sumCyc * sumCyc) / n
sxy = sumCycFluor - (sumCyc * sumFluor) / n
slope = sxy / ssx
intercept = (sumFluor / n) - slope * (sumCyc / n)
return [slope, intercept]
def _cleanErrorString(inStr, cleanStyle):
outStr = ";"
inStr += ";"
if cleanStyle == "melt":
outStr = inStr.replace('several products with different melting temperatures detected', '')
outStr = outStr.replace('product with different melting temperatures detected', '')
outStr = outStr.replace('no product with expected melting temperature', '')
else:
strList = inStr.split(";")
knownWarn = ["amplification in negative control", "plateau in negative control",
"no amplification in positive control", "baseline error in positive control",
"no plateau in positive control", "noisy sample in positive control",
"Cq < 10, N0 unreliable", "Cq > 34", "no indiv PCR eff can be calculated",
"PCR efficiency outlier", "no amplification", "baseline error", "no plateau",
"noisy sample", "Cq too high"]
for ele in strList:
if ele in knownWarn:
continue
if re.search(r"^only \d+ values in log phase", ele):
continue
if re.search(r"^indiv PCR eff is .+", ele):
continue
outStr += ele + ";"
# if inStr.find('several products with different melting temperatures detected') >= 0:
# outStr += ';several products with different melting temperatures detected;'
# if inStr.find('product with different melting temperatures detected') >= 0:
# outStr += ';product with different melting temperatures detected;'
# if inStr.find('no product with expected melting temperature') >= 0:
# outStr += ';no product with expected melting temperature;'
outStr = re.sub(r';+', ';', outStr)
return outStr
def _numpyTwoAxisSave(var, fileName):
with np.printoptions(precision=3, suppress=True):
np.savetxt(fileName, var, fmt='%.6f', delimiter='\t', newline='\n')
def _getXMLDataType():
return ["tar", "cq", "N0", "ampEffMet", "ampEff", "ampEffSE", "corrF", "meltTemp",
"excl", "note", "adp", "mdp", "endPt", "bgFluor", "quantFluor"]
class Rdml:
"""RDML-Python library
The root element used to open, write, read and edit RDML files.
Attributes:
_rdmlData: The RDML XML object from lxml.
_node: The root node of the RDML XML object.
"""
def __init__(self, filename=None):
"""Inits an empty RDML instance with new() or load RDML file with load().
Args:
self: The class self parameter.
filename: The name of the RDML file to load.
Returns:
No return value. Function may raise RdmlError if required.
"""
self._rdmlData = None
self._rdmlFilename = None
self._node = None
if filename:
self.load(filename)
else:
self.new()
def __getitem__(self, key):
"""Returns data of the key.
Args:
self: The class self parameter.
key: The key of the experimenter subelement
Returns:
A string of the data or None.
"""
if key == "version":
return self.version()
if key in ["dateMade", "dateUpdated"]:
return _get_first_child_text(self._node, key)
raise KeyError
def keys(self):
"""Returns a list of the keys.
Args:
self: The class self parameter.
Returns:
A list of the key strings.
"""
return ["version", "dateMade", "dateUpdated"]
def xmlkeys(self):
"""Returns a list of the keys in the xml file.
Args:
self: The class self parameter.
Returns:
A list of the key strings.
"""
return ["dateMade", "dateUpdated", "id", "experimenter", "documentation", "dye",
"sample", "target", "thermalCyclingConditions", "experiment"]
def new(self):
"""Creates an new empty RDML object with the current date.
Args:
self: The class self parameter.
Returns:
No return value. Function may raise RdmlError if required.
"""
data = "<rdml version='1.2' xmlns:rdml='http://www.rdml.org' xmlns='http://www.rdml.org'>\n<dateMade>"
data += datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S")
data += "</dateMade>\n<dateUpdated>"
data += datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S")
data += "</dateUpdated>\n</rdml>"
self.loadXMLString(data)
return
def load(self, filename):
"""Load an RDML file with decompression of rdml_data.xml or an XML file. Uses loadXMLString().
Args:
self: The class self parameter.
filename: The name of the RDML file to load.
Returns:
No return value. Function may raise RdmlError if required.
"""
if zipfile.is_zipfile(filename):
self._rdmlFilename = filename
zf = zipfile.ZipFile(filename, 'r')
try:
data = zf.read('rdml_data.xml').decode('utf-8')
except KeyError:
raise RdmlError('No rdml_data.xml in compressed RDML file found.')
else:
self.loadXMLString(data)
finally:
zf.close()
else:
with open(filename, 'r') as txtfile:
data = txtfile.read()
if data:
self.loadXMLString(data)
else:
raise RdmlError('File format error, not a valid RDML or XML file.')
def save(self, filename):
"""Save an RDML file with compression of rdml_data.xml.
Args:
self: The class self parameter.
filename: The name of the RDML file to save to.
Returns:
No return value. Function may raise RdmlError if required.
"""
elem = _get_or_create_subelement(self._node, "dateUpdated", self.xmlkeys())
elem.text = datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S")
data = et.tostring(self._rdmlData, pretty_print=True)
_writeFileInRDML(filename, 'rdml_data.xml', data)
def loadXMLString(self, data):
"""Create RDML object from xml string. !ENTITY and DOCSTRINGS will be removed.
Args:
self: The class self parameter.
data: The xml string of the RDML file to load.
Returns:
No return value. Function may raise RdmlError if required.
"""
# To avoid some xml attacs based on
# <!ENTITY entityname "replacement text">
data = re.sub(r"<\W*!ENTITY[^>]+>", "", data)
data = re.sub(r"!ENTITY", "", data)
try:
self._rdmlData = et.ElementTree(et.fromstring(data.encode('utf-8')))
# Change to bytecode and defused?
except et.XMLSyntaxError:
raise RdmlError('XML load error, not a valid RDML or XML file.')
self._node = self._rdmlData.getroot()
if self._node.tag.replace("{http://www.rdml.org}", "") != 'rdml':
raise RdmlError('Root element is not \'rdml\', not a valid RDML or XML file.')
rdml_version = self._node.get('version')
# Remainder: Update version in new() and validate()
if rdml_version not in ['1.0', '1.1', '1.2', '1.3']:
raise RdmlError('Unknown or unsupported RDML file version.')
def validate(self, filename=None):
"""Validate the RDML object against its schema or load file and validate it.
Args:
self: The class self parameter.
filename: The name of the RDML file to load.
Returns:
A string with the validation result as a two column table.
"""
notes = ""
if filename:
try:
vd = Rdml(filename)
except RdmlError as err:
notes += 'RDML file structure:\tFalse\t' + str(err) + '\n'
return notes
notes += "RDML file structure:\tTrue\tValid file structure.\n"
else:
vd = self
version = vd.version()
rdmlws = os.path.dirname(os.path.abspath(__file__))
if version == '1.0':
xmlschema_doc = et.parse(os.path.join(rdmlws, 'schema', 'RDML_v1_0_REC.xsd'))
elif version == '1.1':
xmlschema_doc = et.parse(os.path.join(rdmlws, 'schema', 'RDML_v1_1_REC.xsd'))
elif version == '1.2':
xmlschema_doc = et.parse(os.path.join(rdmlws, 'schema', 'RDML_v1_2_REC.xsd'))
elif version == '1.3':
xmlschema_doc = et.parse(os.path.join(rdmlws, 'schema', 'RDML_v1_3_CR.xsd'))
else:
notes += 'RDML version:\tFalse\tUnknown schema version' + version + '\n'
return notes
notes += "RDML version:\tTrue\t" + version + "\n"
xmlschema = et.XMLSchema(xmlschema_doc)
result = xmlschema.validate(vd._rdmlData)
if result:
notes += 'Schema validation result:\tTrue\tRDML file is valid.\n'
else:
notes += 'Schema validation result:\tFalse\tRDML file is not valid.\n'
log = xmlschema.error_log
for err in log:
notes += 'Schema validation error:\tFalse\t'
notes += "Line %s, Column %s: %s \n" % (err.line, err.column, err.message)
return notes
def isvalid(self, filename=None):
"""Validate the RDML object against its schema or load file and validate it.
Args:
self: The class self parameter.
filename: The name of the RDML file to load.
Returns:
True or false as the validation result.
"""
if filename:
try:
vd = Rdml(filename)
except RdmlError:
return False
else:
vd = self
version = vd.version()
rdmlws = os.path.dirname(os.path.abspath(__file__))
if version == '1.0':
xmlschema_doc = et.parse(os.path.join(rdmlws, 'schema', 'RDML_v1_0_REC.xsd'))
elif version == '1.1':
xmlschema_doc = et.parse(os.path.join(rdmlws, 'schema', 'RDML_v1_1_REC.xsd'))
elif version == '1.2':
xmlschema_doc = et.parse(os.path.join(rdmlws, 'schema', 'RDML_v1_2_REC.xsd'))
elif version == '1.3':
xmlschema_doc = et.parse(os.path.join(rdmlws, 'schema', 'RDML_v1_3_CR.xsd'))
else:
return False
xmlschema = et.XMLSchema(xmlschema_doc)
result = xmlschema.validate(vd._rdmlData)
if result:
return True
else:
return False
def version(self):
"""Returns the version string of the RDML object.
Args:
self: The class self parameter.
Returns:
A string of the version like '1.1'.
"""
return self._node.get('version')
def migrate_version_1_0_to_1_1(self):
"""Migrates the rdml version from v1.0 to v1.1.
Args:
self: The class self parameter.
Returns:
A list of strings with the modifications made.
"""
ret = []
rdml_version = self._node.get('version')
if rdml_version != '1.0':
raise RdmlError('RDML version for migration has to be v1.0.')
exp = _get_all_children(self._node, "thirdPartyExtensions")
if len(exp) > 0:
ret.append("Migration to v1.1 deleted \"thirdPartyExtensions\" elements.")
for node in exp:
self._node.remove(node)
hint = ""
exp1 = _get_all_children(self._node, "experiment")
for node1 in exp1:
exp2 = _get_all_children(node1, "run")
for node2 in exp2:
exp3 = _get_all_children(node2, "react")
for node3 in exp3:
exp4 = _get_all_children(node3, "data")
for node4 in exp4:
exp5 = _get_all_children(node4, "quantity")
for node5 in exp5:
hint = "Migration to v1.1 deleted react data \"quantity\" elements."
node4.remove(node5)
if hint != "":
ret.append(hint)
xml_keys = ["description", "documentation", "xRef", "type", "interRunCalibrator",
"quantity", "calibratorSample", "cdnaSynthesisMethod",
"templateRNAQuantity", "templateRNAQuality", "templateDNAQuantity", "templateDNAQuality"]
exp1 = _get_all_children(self._node, "sample")
for node1 in exp1:
hint = ""
exp2 = _get_all_children(node1, "templateRNAQuantity")
if len(exp2) > 0:
templateRNAQuantity = _get_first_child_text(node1, "templateRNAQuantity")
node1.remove(exp2[0])
if templateRNAQuantity != "":
hint = "Migration to v1.1 modified sample \"templateRNAQuantity\" element without loss."
ele = _get_or_create_subelement(node1, "templateRNAQuantity", xml_keys)
_change_subelement(ele, "value", ["value", "unit"], templateRNAQuantity, True, "float")
_change_subelement(ele, "unit", ["value", "unit"], "ng", True, "float")
if hint != "":
ret.append(hint)
hint = ""
exp2 = _get_all_children(node1, "templateRNAQuantity")
if len(exp2) > 0:
templateDNAQuantity = _get_first_child_text(node1, "templateDNAQuantity")
node1.remove(exp2[0])
if templateDNAQuantity != "":
hint = "Migration to v1.1 modified sample \"templateDNAQuantity\" element without loss."
ele = _get_or_create_subelement(node1, "templateDNAQuantity", xml_keys)
_change_subelement(ele, "value", ["value", "unit"], templateDNAQuantity, True, "float")
_change_subelement(ele, "unit", ["value", "unit"], "ng", True, "float")
if hint != "":
ret.append(hint)
xml_keys = ["description", "documentation", "xRef", "type", "amplificationEfficiencyMethod",
"amplificationEfficiency", "detectionLimit", "dyeId", "sequences", "commercialAssay"]
exp1 = _get_all_children(self._node, "target")
all_dyes = {}
hint = ""
for node1 in exp1:
hint = ""
dye_ele = _get_first_child_text(node1, "dyeId")
node1.remove(_get_first_child(node1, "dyeId"))
if dye_ele == "":
dye_ele = "conversion_dye_missing"
hint = "Migration to v1.1 created target nonsense \"dyeId\"."
forId = _get_or_create_subelement(node1, "dyeId", xml_keys)
forId.attrib['id'] = dye_ele
all_dyes[dye_ele] = True
if hint != "":
ret.append(hint)
for dkey in all_dyes.keys():
if _check_unique_id(self._node, "dye", dkey):
new_node = et.Element("dye", id=dkey)
place = _get_tag_pos(self._node, "dye", self.xmlkeys(), 999999)
self._node.insert(place, new_node)
xml_keys = ["description", "documentation", "experimenter", "instrument", "dataCollectionSoftware",
"backgroundDeterminationMethod", "cqDetectionMethod", "thermalCyclingConditions", "pcrFormat",
"runDate", "react"]
exp1 = _get_all_children(self._node, "experiment")
for node1 in exp1:
exp2 = _get_all_children(node1, "run")
for node2 in exp2:
old_format = _get_first_child_text(node2, "pcrFormat")
exp3 = _get_all_children(node2, "pcrFormat")
for node3 in exp3:
node2.remove(node3)
rows = "1"
columns = "1"
rowLabel = "ABC"
columnLabel = "123"
if old_format == "single-well":
rowLabel = "123"
if old_format == "48-well plate; A1-F8":
rows = "6"
columns = "8"
if old_format == "96-well plate; A1-H12":
rows = "8"
columns = "12"
if old_format == "384-well plate; A1-P24":
rows = "16"
columns = "24"
if old_format == "3072-well plate; A1a1-D12h8":
rows = "32"
columns = "96"
rowLabel = "A1a1"
columnLabel = "A1a1"
if old_format == "32-well rotor; 1-32":
rows = "32"
rowLabel = "123"
if old_format == "72-well rotor; 1-72":
rows = "72"
rowLabel = "123"
if old_format == "100-well rotor; 1-100":
rows = "100"
rowLabel = "123"
if old_format == "free format":
rows = "-1"
columns = "1"
rowLabel = "123"
ele3 = _get_or_create_subelement(node2, "pcrFormat", xml_keys)
_change_subelement(ele3, "rows", ["rows", "columns", "rowLabel", "columnLabel"], rows, True, "string")
_change_subelement(ele3, "columns", ["rows", "columns", "rowLabel", "columnLabel"], columns, True, "string")
_change_subelement(ele3, "rowLabel", ["rows", "columns", "rowLabel", "columnLabel"], rowLabel, True, "string")
_change_subelement(ele3, "columnLabel", ["rows", "columns", "rowLabel", "columnLabel"], columnLabel, True, "string")
if old_format == "48-well plate A1-F8" or \
old_format == "96-well plate; A1-H12" or \
old_format == "384-well plate; A1-P24":
exp3 = _get_all_children(node2, "react")
for node3 in exp3:
old_id = node3.get('id')
old_letter = ord(re.sub(r"\d", "", old_id).upper()) - ord("A")
old_nr = int(re.sub(r"\D", "", old_id))
newId = old_nr + old_letter * int(columns)
node3.attrib['id'] = str(newId)
if old_format == "3072-well plate; A1a1-D12h8":
exp3 = _get_all_children(node2, "react")
for node3 in exp3:
old_id = node3.get('id')
old_left = re.sub(r"\D\d+$", "", old_id)
old_left_letter = ord(re.sub(r"\d", "", old_left).upper()) - ord("A")
old_left_nr = int(re.sub(r"\D", "", old_left)) - 1
old_right = re.sub(r"^\D\d+", "", old_id)
old_right_letter = ord(re.sub(r"\d", "", old_right).upper()) - ord("A")
old_right_nr = int(re.sub(r"\D", "", old_right))
newId = old_left_nr * 8 + old_right_nr + old_left_letter * 768 + old_right_letter * 96
node3.attrib['id'] = str(newId)
self._node.attrib['version'] = "1.1"
return ret
def migrate_version_1_1_to_1_2(self):
"""Migrates the rdml version from v1.1 to v1.2.
Args:
self: The class self parameter.
Returns:
A list of strings with the modifications made.
"""
ret = []
rdml_version = self._node.get('version')
if rdml_version != '1.1':
raise RdmlError('RDML version for migration has to be v1.1.')
exp1 = _get_all_children(self._node, "sample")
for node1 in exp1:
hint = ""
exp2 = _get_all_children(node1, "templateRNAQuality")
for node2 in exp2:
node1.remove(node2)
hint = "Migration to v1.2 deleted sample \"templateRNAQuality\" element."
if hint != "":
ret.append(hint)
hint = ""
exp2 = _get_all_children(node1, "templateRNAQuantity")
for node2 in exp2:
node1.remove(node2)
hint = "Migration to v1.2 deleted sample \"templateRNAQuantity\" element."
if hint != "":
ret.append(hint)
hint = ""
exp2 = _get_all_children(node1, "templateDNAQuality")
for node2 in exp2:
node1.remove(node2)
hint = "Migration to v1.2 deleted sample \"templateDNAQuality\" element."
if hint != "":
ret.append(hint)
hint = ""
exp2 = _get_all_children(node1, "templateDNAQuantity")
for node2 in exp2:
node1.remove(node2)
hint = "Migration to v1.2 deleted sample \"templateDNAQuantity\" element."
if hint != "":
ret.append(hint)
self._node.attrib['version'] = "1.2"
return ret
def migrate_version_1_2_to_1_1(self):
"""Migrates the rdml version from v1.2 to v1.1.
Args:
self: The class self parameter.
Returns:
A list of strings with the modifications made.
"""
ret = []
rdml_version = self._node.get('version')
if rdml_version != '1.2':
raise RdmlError('RDML version for migration has to be v1.2.')
exp1 = _get_all_children(self._node, "sample")
for node1 in exp1:
hint = ""
exp2 = _get_all_children(node1, "annotation")
for node2 in exp2:
node1.remove(node2)
hint = "Migration to v1.1 deleted sample \"annotation\" element."
if hint != "":
ret.append(hint)
hint = ""
exp2 = _get_all_children(node1, "templateQuantity")
for node2 in exp2:
node1.remove(node2)
hint = "Migration to v1.1 deleted sample \"templateQuantity\" element."
if hint != "":
ret.append(hint)
exp1 = _get_all_children(self._node, "target")
for node1 in exp1:
hint = ""
exp2 = _get_all_children(node1, "amplificationEfficiencySE")
for node2 in exp2:
node1.remove(node2)
hint = "Migration to v1.1 deleted target \"amplificationEfficiencySE\" element."
if hint != "":
ret.append(hint)
hint = ""
exp1 = _get_all_children(self._node, "experiment")
for node1 in exp1:
exp2 = _get_all_children(node1, "run")
for node2 in exp2:
exp3 = _get_all_children(node2, "react")
for node3 in exp3:
exp4 = _get_all_children(node3, "data")
for node4 in exp4:
exp5 = _get_all_children(node4, "bgFluorSlp")
for node5 in exp5:
hint = "Migration to v1.1 deleted react data \"bgFluorSlp\" elements."
node4.remove(node5)
if hint != "":
ret.append(hint)
self._node.attrib['version'] = "1.1"
return ret
def migrate_version_1_2_to_1_3(self):
"""Migrates the rdml version from v1.2 to v1.3.
Args:
self: The class self parameter.
Returns:
A list of strings with the modifications made.
"""
ret = []
rdml_version = self._node.get('version')
if rdml_version != '1.2':
raise RdmlError('RDML version for migration has to be v1.2.')
self._node.attrib['version'] = "1.3"
return ret
def migrate_version_1_3_to_1_2(self):
"""Migrates the rdml version from v1.3 to v1.2.
Args:
self: The class self parameter.
Returns:
A list of strings with the modifications made.
"""
ret = []
rdml_version = self._node.get('version')
if rdml_version != '1.3':
raise RdmlError('RDML version for migration has to be v1.3.')
hint = ""
hint2 = ""
hint3 = ""
hint4 = ""
hint5 = ""
hint6 = ""
hint7 = ""
hint8 = ""
exp1 = _get_all_children(self._node, "experiment")
for node1 in exp1:
exp2 = _get_all_children(node1, "run")
for node2 in exp2:
exp3 = _get_all_children(node2, "react")
for node3 in exp3:
exp4 = _get_all_children(node3, "partitions")
for node4 in exp4:
hint = "Migration to v1.2 deleted react \"partitions\" elements."
node3.remove(node4)
# No data element, no react element in v 1.2
exp5 = _get_all_children(node3, "data")
if len(exp5) == 0:
hint = "Migration to v1.2 deleted run \"react\" elements."
node2.remove(node3)
exp4b = _get_all_children(node3, "data")
for node4 in exp4b:
exp5 = _get_all_children(node4, "ampEffMet")
for node5 in exp5:
hint2 = "Migration to v1.2 deleted react data \"ampEffMet\" elements."
node4.remove(node5)
exp5 = _get_all_children(node4, "N0")
for node5 in exp5:
hint3 = "Migration to v1.2 deleted react data \"N0\" elements."
node4.remove(node5)
exp5 = _get_all_children(node4, "ampEff")
for node5 in exp5:
hint4 = "Migration to v1.2 deleted react data \"ampEff\" elements."
node4.remove(node5)
exp5 = _get_all_children(node4, "ampEffSE")
for node5 in exp5:
hint5 = "Migration to v1.2 deleted react data \"ampEffSE\" elements."
node4.remove(node5)
exp5 = _get_all_children(node4, "corrF")
for node5 in exp5:
hint6 = "Migration to v1.2 deleted react data \"corrF\" elements."
node4.remove(node5)
exp5 = _get_all_children(node4, "meltTemp")
for node5 in exp5:
hint7 = "Migration to v1.2 deleted react data \"meltTemp\" elements."
node4.remove(node5)
exp5 = _get_all_children(node4, "note")
for node5 in exp5:
hint8 = "Migration to v1.2 deleted react data \"note\" elements."
node4.remove(node5)
if hint != "":
ret.append(hint)
if hint2 != "":
ret.append(hint2)
if hint3 != "":
ret.append(hint3)
if hint4 != "":
ret.append(hint4)
if hint5 != "":
ret.append(hint5)
if hint6 != "":
ret.append(hint6)
if hint7 != "":
ret.append(hint7)
if hint8 != "":
ret.append(hint8)
exp1 = _get_all_children(self._node, "sample")
hint = ""
hint2 = ""
for node1 in exp1:
exp2 = _get_all_children(node1, "type")
if "targetId" in exp2[0].attrib:
del exp2[0].attrib["targetId"]
hint = "Migration to v1.2 deleted sample type \"targetId\" attribute."
for elCount in range(1, len(exp2)):
node1.remove(exp2[elCount])
hint2 = "Migration to v1.2 deleted sample \"type\" elements."
if hint != "":
ret.append(hint)
if hint2 != "":
ret.append(hint2)
exp1 = _get_all_children(self._node, "target")
hint = ""
for node1 in exp1:
exp2 = _get_all_children(node1, "meltingTemperature")
for node2 in exp2:
node1.remove(node2)
hint = "Migration to v1.2 deleted target \"meltingTemperature\" elements."
if hint != "":
ret.append(hint)
exp1 = _get_all_children(self._node, "dye")
hint = ""
for node1 in exp1:
exp2 = _get_all_children(node1, "dyeChemistry")
for node2 in exp2:
node1.remove(node2)
hint = "Migration to v1.2 deleted dye \"dyeChemistry\" elements."
if hint != "":
ret.append(hint)
self._node.attrib['version'] = "1.2"
return ret
def recreate_lost_ids(self):
"""Searches for lost ids and repairs them.
Args:
self: The class self parameter.
Returns:
A string with the modifications.
"""
mess = ""
# Find lost dyes
foundIds = {}
allTar = _get_all_children(self._node, "target")
for node in allTar:
forId = _get_first_child(node, "dyeId")
if forId is not None:
foundIds[forId.attrib['id']] = 0
presentIds = []
exp = _get_all_children(self._node, "dye")
for node in exp:
presentIds.append(node.attrib['id'])
for used_id in foundIds:
if used_id not in presentIds:
self.new_dye(id=used_id, newposition=0)
mess += "Recreated new dye: " + used_id + "\n"
# Find lost thermalCycCon
foundIds = {}
allSam = _get_all_children(self._node, "sample")
for node in allSam:
subNode = _get_first_child(node, "cdnaSynthesisMethod")
if subNode is not None:
forId = _get_first_child(node, "thermalCyclingConditions")
if forId is not None:
foundIds[forId.attrib['id']] = 0
allExp = _get_all_children(self._node, "experiment")
for node in allExp:
subNodes = _get_all_children(node, "run")
for subNode in subNodes:
forId = _get_first_child(subNode, "thermalCyclingConditions")
if forId is not None:
foundIds[forId.attrib['id']] = 0
presentIds = []
exp = _get_all_children(self._node, "thermalCyclingConditions")
for node in exp:
presentIds.append(node.attrib['id'])
for used_id in foundIds:
if used_id not in presentIds:
self.new_therm_cyc_cons(id=used_id, newposition=0)
mess += "Recreated thermal cycling conditions: " + used_id + "\n"
# Find lost experimenter
foundIds = {}
allTh = _get_all_children(self._node, "thermalCyclingConditions")
for node in allTh:
subNodes = _get_all_children(node, "experimenter")
for subNode in subNodes:
foundIds[subNode.attrib['id']] = 0
allExp = _get_all_children(self._node, "experiment")
for node in allExp:
subNodes = _get_all_children(node, "run")
for subNode in subNodes:
lastNodes = _get_all_children(subNode, "experimenter")
for lastNode in lastNodes:
foundIds[lastNode.attrib['id']] = 0
presentIds = []
exp = _get_all_children(self._node, "experimenter")
for node in exp:
presentIds.append(node.attrib['id'])
for used_id in foundIds:
if used_id not in presentIds:
self.new_experimenter(id=used_id, firstName="unknown first name", lastName="unknown last name", newposition=0)
mess += "Recreated experimenter: " + used_id + "\n"
# Find lost documentation
foundIds = {}
allSam = _get_all_children(self._node, "sample")
for node in allSam:
subNodes = _get_all_children(node, "documentation")
for subNode in subNodes:
foundIds[subNode.attrib['id']] = 0
allTh = _get_all_children(self._node, "target")
for node in allTh:
subNodes = _get_all_children(node, "documentation")
for subNode in subNodes:
foundIds[subNode.attrib['id']] = 0
allTh = _get_all_children(self._node, "thermalCyclingConditions")
for node in allTh:
subNodes = _get_all_children(node, "documentation")
for subNode in subNodes:
foundIds[subNode.attrib['id']] = 0
allExp = _get_all_children(self._node, "experiment")
for node in allExp:
subNodes = _get_all_children(node, "documentation")
for subNode in subNodes:
foundIds[subNode.attrib['id']] = 0
subNodes = _get_all_children(node, "run")
for subNode in subNodes:
lastNodes = _get_all_children(subNode, "documentation")
for lastNode in lastNodes:
foundIds[lastNode.attrib['id']] = 0
presentIds = []
exp = _get_all_children(self._node, "documentation")
for node in exp:
presentIds.append(node.attrib['id'])
for used_id in foundIds:
if used_id not in presentIds:
self.new_documentation(id=used_id, newposition=0)
mess += "Recreated documentation: " + used_id + "\n"
# Find lost sample
foundIds = {}
allExp = _get_all_children(self._node, "experiment")
for node in allExp:
subNodes = _get_all_children(node, "run")
for subNode in subNodes:
reactNodes = _get_all_children(subNode, "react")
for reactNode in reactNodes:
lastNodes = _get_all_children(reactNode, "sample")
for lastNode in lastNodes:
foundIds[lastNode.attrib['id']] = 0
presentIds = []
exp = _get_all_children(self._node, "sample")
for node in exp:
presentIds.append(node.attrib['id'])
for used_id in foundIds:
if used_id not in presentIds:
self.new_sample(id=used_id, type="unkn", newposition=0)
mess += "Recreated sample: " + used_id + "\n"
# Find lost target
foundIds = {}
allExp = _get_all_children(self._node, "sample")
for node in allExp:
subNodes = _get_all_children(node, "type")
for subNode in subNodes:
if "targetId" in subNode.attrib:
foundIds[subNode.attrib['targetId']] = 0
allExp = _get_all_children(self._node, "experiment")
for node in allExp:
subNodes = _get_all_children(node, "run")
for subNode in subNodes:
reactNodes = _get_all_children(subNode, "react")
for reactNode in reactNodes:
dataNodes = _get_all_children(reactNode, "data")
for dataNode in dataNodes:
lastNodes = _get_all_children(dataNode, "tar")
for lastNode in lastNodes:
foundIds[lastNode.attrib['id']] = 0
partNodes = _get_all_children(reactNode, "partitions")
for partNode in partNodes:
dataNodes = _get_all_children(partNode, "data")
for dataNode in dataNodes:
lastNodes = _get_all_children(dataNode, "tar")
for lastNode in lastNodes:
foundIds[lastNode.attrib['id']] = 0
# Search in Table files
if self._rdmlFilename is not None and self._rdmlFilename != "":
if zipfile.is_zipfile(self._rdmlFilename):
zf = zipfile.ZipFile(self._rdmlFilename, 'r')
for item in zf.infolist():
if re.search("^partitions/", item.filename):
fileContent = zf.read(item.filename).decode('utf-8')
newlineFix = fileContent.replace("\r\n", "\n")
tabLines = newlineFix.split("\n")
header = tabLines[0].split("\t")
for cell in header:
if cell != "":
foundIds[cell] = 0
zf.close()
presentIds = []
exp = _get_all_children(self._node, "target")
for node in exp:
presentIds.append(node.attrib['id'])
for used_id in foundIds:
if used_id not in presentIds:
self.new_target(id=used_id, type="toi", newposition=0)
mess += "Recreated target: " + used_id + "\n"
return mess
def repair_rdml_file(self):
"""Searches for known errors and repairs them.
Args:
self: The class self parameter.
Returns:
A string with the modifications.
"""
mess = ""
mess += self.fixExclFalse()
mess += self.fixDuplicateReact()
return mess
def fixExclFalse(self):
"""Searches in experiment-run-react-data for excl=false and deletes the elements.
Args:
self: The class self parameter.
Returns:
A string with the modifications.
"""
mess = ""
count = 0
allExp = _get_all_children(self._node, "experiment")
for node in allExp:
subNodes = _get_all_children(node, "run")
for subNode in subNodes:
reactNodes = _get_all_children(subNode, "react")
for reactNode in reactNodes:
dataNodes = _get_all_children(reactNode, "data")
for dataNode in dataNodes:
lastNodes = _get_all_children(dataNode, "excl")
for lastNode in lastNodes:
if lastNode.text.lower() == "false":
count += 1
dataNode.remove(lastNode)
if count > 0:
mess = "The element excl=false was removed " + str(count) + " times!\n"
return mess
def fixDuplicateReact(self):
"""Searches in experiment-run-react for duplicates and keeps only the first.
Args:
self: The class self parameter.
Returns:
A string with the modifications.
"""
mess = ""
foundIds = {}
count = 0
allExp = _get_all_children(self._node, "experiment")
for node in allExp:
subNodes = _get_all_children(node, "run")
for subNode in subNodes:
reactNodes = _get_all_children(subNode, "react")
for reactNode in reactNodes:
tId = reactNode.attrib['id']
if tId not in foundIds:
foundIds[tId] = 0
else:
count += 1
subNode.remove(reactNode)
if count > 0:
mess = str(count) + " duplicate react elements were removed!\n"
return mess
def rdmlids(self):
"""Returns a list of all rdml id elements.
Args:
self: The class self parameter.
Returns:
A list of all rdml id elements.
"""
exp = _get_all_children(self._node, "id")
ret = []
for node in exp:
ret.append(Rdmlid(node))
return ret
def new_rdmlid(self, publisher, serialNumber, MD5Hash=None, newposition=None):
"""Creates a new rdml id element.
Args:
self: The class self parameter.
publisher: Publisher who created the serialNumber (required)
serialNumber: Serial Number for this file provided by publisher (required)
MD5Hash: A MD5 hash for this file (optional)
newposition: The new position of the element in the list (optional)
Returns:
Nothing, changes self.
"""
new_node = et.Element("id")
_add_new_subelement(new_node, "id", "publisher", publisher, False)
_add_new_subelement(new_node, "id", "serialNumber", serialNumber, False)
_add_new_subelement(new_node, "id", "MD5Hash", MD5Hash, True)
place = _get_tag_pos(self._node, "id", self.xmlkeys(), newposition)
self._node.insert(place, new_node)
def move_rdmlid(self, oldposition, newposition):
"""Moves the element to the new position in the list.
Args:
self: The class self parameter.
oldposition: The old position of the element
newposition: The new position of the element
Returns:
No return value, changes self. Function may raise RdmlError if required.
"""
pos = _get_tag_pos(self._node, "id", self.xmlkeys(), newposition)
ele = _get_first_child_by_pos_or_id(self._node, "id", None, oldposition)
self._node.insert(pos, ele)
def get_rdmlid(self, byposition=None):
"""Returns an experimenter element by position or id.
Args:
self: The class self parameter.
byposition: Select the element by position in the list.
Returns:
The found element or None.
"""
return Rdmlid(_get_first_child_by_pos_or_id(self._node, "id", None, byposition))
def delete_rdmlid(self, byposition=None):
"""Deletes an experimenter element.
Args:
self: The class self parameter.
byposition: Select the element by position in the list.
Returns:
Nothing, changes self.
"""
elem = _get_first_child_by_pos_or_id(self._node, "id", None, byposition)
self._node.remove(elem)
def experimenters(self):
"""Returns a list of all experimenter elements.
Args:
self: The class self parameter.
Returns:
A list of all experimenter elements.
"""
exp = _get_all_children(self._node, "experimenter")
ret = []
for node in exp:
ret.append(Experimenter(node))
return ret
def new_experimenter(self, id, firstName, lastName, email=None, labName=None, labAddress=None, newposition=None):
"""Creates a new experimenter element.
Args:
self: The class self parameter.
id: Experimenter unique id
firstName: Experimenters first name (required)
lastName: Experimenters last name (required)
email: Experimenters email (optional)
labName: Experimenters lab name (optional)
labAddress: Experimenters lab address (optional)
newposition: Experimenters position in the list of experimenters (optional)
Returns:
Nothing, changes self.
"""
new_node = _create_new_element(self._node, "experimenter", id)
_add_new_subelement(new_node, "experimenter", "firstName", firstName, False)
_add_new_subelement(new_node, "experimenter", "lastName", lastName, False)
_add_new_subelement(new_node, "experimenter", "email", email, True)
_add_new_subelement(new_node, "experimenter", "labName", labName, True)
_add_new_subelement(new_node, "experimenter", "labAddress", labAddress, True)
place = _get_tag_pos(self._node, "experimenter", self.xmlkeys(), newposition)
self._node.insert(place, new_node)
def move_experimenter(self, id, newposition):
"""Moves the element to the new position in the list.
Args:
self: The class self parameter.
id: Experimenter unique id
newposition: The new position of the element
Returns:
No return value, changes self. Function may raise RdmlError if required.
"""
_move_subelement(self._node, "experimenter", id, self.xmlkeys(), newposition)
def get_experimenter(self, byid=None, byposition=None):
"""Returns an experimenter element by position or id.
Args:
self: The class self parameter.
byid: Select the element by the element id.
byposition: Select the element by position in the list.
Returns:
The found element or None.
"""
return Experimenter(_get_first_child_by_pos_or_id(self._node, "experimenter", byid, byposition))
def delete_experimenter(self, byid=None, byposition=None):
"""Deletes an experimenter element.
Args:
self: The class self parameter.
byid: Select the element by the element id.
byposition: Select the element by position in the list.
Returns:
Nothing, changes self.
"""
elem = _get_first_child_by_pos_or_id(self._node, "experimenter", byid, byposition)
self._node.remove(elem)
def documentations(self):
"""Returns a list of all documentation elements.
Args:
self: The class self parameter.
Returns:
A list of all documentation elements.
"""
exp = _get_all_children(self._node, "documentation")
ret = []
for node in exp:
ret.append(Documentation(node))
return ret
def new_documentation(self, id, text=None, newposition=None):
"""Creates a new documentation element.
Args:
self: The class self parameter.
id: Documentation unique id
text: Documentation descriptive test (optional)
newposition: Experimenters position in the list of experimenters (optional)
Returns:
Nothing, changes self.
"""
new_node = _create_new_element(self._node, "documentation", id)
_add_new_subelement(new_node, "documentation", "text", text, True)
place = _get_tag_pos(self._node, "documentation", self.xmlkeys(), newposition)
self._node.insert(place, new_node)
def move_documentation(self, id, newposition):
"""Moves the element to the new position in the list.
Args:
self: The class self parameter.
id: Documentation unique id
newposition: The new position of the element
Returns:
No return value, changes self. Function may raise RdmlError if required.
"""
_move_subelement(self._node, "documentation", id, self.xmlkeys(), newposition)
def get_documentation(self, byid=None, byposition=None):
"""Returns an documentation element by position or id.
Args:
self: The class self parameter.
byid: Select the element by the element id.
byposition: Select the element by position in the list.
Returns:
The found element or None.
"""
return Documentation(_get_first_child_by_pos_or_id(self._node, "documentation", byid, byposition))
def delete_documentation(self, byid=None, byposition=None):
"""Deletes an documentation element.
Args:
self: The class self parameter.
byid: Select the element by the element id.
byposition: Select the element by position in the list.
Returns:
Nothing, changes self.
"""
elem = _get_first_child_by_pos_or_id(self._node, "documentation", byid, byposition)
self._node.remove(elem)
def dyes(self):
"""Returns a list of all dye elements.
Args:
self: The class self parameter.
Returns:
A list of all dye elements.
"""
exp = _get_all_children(self._node, "dye")
ret = []
for node in exp:
ret.append(Dye(node))
return ret
def new_dye(self, id, description=None, newposition=None):
"""Creates a new dye element.
Args:
self: The class self parameter.
id: Dye unique id
description: Dye descriptive test (optional)
newposition: Dye position in the list of dyes (optional)
Returns:
Nothing, changes self.
"""
new_node = _create_new_element(self._node, "dye", id)
_add_new_subelement(new_node, "dye", "description", description, True)
place = _get_tag_pos(self._node, "dye", self.xmlkeys(), newposition)
self._node.insert(place, new_node)
def move_dye(self, id, newposition):
"""Moves the element to the new position in the list.
Args:
self: The class self parameter.
id: Dye unique id
newposition: The new position of the element
Returns:
No return value, changes self. Function may raise RdmlError if required.
"""
_move_subelement(self._node, "dye", id, self.xmlkeys(), newposition)
def get_dye(self, byid=None, byposition=None):
"""Returns an dye element by position or id.
Args:
self: The class self parameter.
byid: Select the element by the element id.
byposition: Select the element by position in the list.
Returns:
The found element or None.
"""
return Dye(_get_first_child_by_pos_or_id(self._node, "dye", byid, byposition))
def delete_dye(self, byid=None, byposition=None):
"""Deletes an dye element.
Args:
self: The class self parameter.
byid: Select the element by the element id.
byposition: Select the element by position in the list.
Returns:
Nothing, changes self.
"""
elem = _get_first_child_by_pos_or_id(self._node, "dye", byid, byposition)
self._node.remove(elem)
def samples(self):
"""Returns a list of all sample elements.
Args:
self: The class self parameter.
Returns:
A list of all sample elements.
"""
exp = _get_all_children(self._node, "sample")
ret = []
for node in exp:
ret.append(Sample(node))
return ret
def new_sample(self, id, type, targetId=None, newposition=None):
"""Creates a new sample element.
Args:
self: The class self parameter.
id: Sample unique id (required)
type: Sample type (required)
targetId: The target linked to the type (makes sense in "pos" or "ntp" context) (optional)
newposition: Experimenters position in the list of experimenters (optional)
Returns:
Nothing, changes self.
"""
if type not in ["unkn", "ntc", "nac", "std", "ntp", "nrt", "pos", "opt"]:
raise RdmlError('Unknown or unsupported sample type value "' + type + '".')
new_node = _create_new_element(self._node, "sample", id)
typeEL = et.SubElement(new_node, "type")
typeEL.text = type
ver = self._node.get('version')
if ver == "1.3":
if targetId is not None:
if not targetId == "":
typeEL.attrib["targetId"] = targetId
place = _get_tag_pos(self._node, "sample", self.xmlkeys(), newposition)
self._node.insert(place, new_node)
def move_sample(self, id, newposition):
"""Moves the element to the new position in the list.
Args:
self: The class self parameter.
id: Sample unique id
newposition: The new position of the element
Returns:
No return value, changes self. Function may raise RdmlError if required.
"""
_move_subelement(self._node, "sample", id, self.xmlkeys(), newposition)
def get_sample(self, byid=None, byposition=None):
"""Returns an sample element by position or id.
Args:
self: The class self parameter.
byid: Select the element by the element id.
byposition: Select the element by position in the list.
Returns:
The found element or None.
"""
return Sample(_get_first_child_by_pos_or_id(self._node, "sample", byid, byposition))
def delete_sample(self, byid=None, byposition=None):
"""Deletes an sample element.
Args:
self: The class self parameter.
byid: Select the element by the element id.
byposition: Select the element by position in the list.
Returns:
Nothing, changes self.
"""
elem = _get_first_child_by_pos_or_id(self._node, "sample", byid, byposition)
self._node.remove(elem)
def targets(self):
"""Returns a list of all target elements.
Args:
self: The class self parameter.
Returns:
A list of all target elements.
"""
exp = _get_all_children(self._node, "target")
ret = []
for node in exp:
ret.append(Target(node, self._rdmlFilename))
return ret
def new_target(self, id, type, newposition=None):
"""Creates a new target element.
Args:
self: The class self parameter.
id: Target unique id (required)
type: Target type (required)
newposition: Targets position in the list of targets (optional)
Returns:
Nothing, changes self.
"""
if type not in ["ref", "toi"]:
raise RdmlError('Unknown or unsupported target type value "' + type + '".')
new_node = _create_new_element(self._node, "target", id)
_add_new_subelement(new_node, "target", "type", type, False)
place = _get_tag_pos(self._node, "target", self.xmlkeys(), newposition)
self._node.insert(place, new_node)
def move_target(self, id, newposition):
"""Moves the element to the new position in the list.
Args:
self: The class self parameter.
id: Target unique id
newposition: The new position of the element
Returns:
No return value, changes self. Function may raise RdmlError if required.
"""
_move_subelement(self._node, "target", id, self.xmlkeys(), newposition)
def get_target(self, byid=None, byposition=None):
"""Returns an target element by position or id.
Args:
self: The class self parameter.
byid: Select the element by the element id.
byposition: Select the element by position in the list.
Returns:
The found element or None.
"""
return Target(_get_first_child_by_pos_or_id(self._node, "target", byid, byposition), self._rdmlFilename)
def delete_target(self, byid=None, byposition=None):
"""Deletes an target element.
Args:
self: The class self parameter.
byid: Select the element by the element id.
byposition: Select the element by position in the list.
Returns:
Nothing, changes self.
"""
elem = _get_first_child_by_pos_or_id(self._node, "target", byid, byposition)
self._node.remove(elem)
def therm_cyc_cons(self):
"""Returns a list of all thermalCyclingConditions elements.
Args:
self: The class self parameter.
Returns:
A list of all target elements.
"""
exp = _get_all_children(self._node, "thermalCyclingConditions")
ret = []
for node in exp:
ret.append(Therm_cyc_cons(node))
return ret
def new_therm_cyc_cons(self, id, newposition=None):
"""Creates a new thermalCyclingConditions element.
Args:
self: The class self parameter.
id: ThermalCyclingConditions unique id (required)
newposition: ThermalCyclingConditions position in the list of ThermalCyclingConditions (optional)
Returns:
Nothing, changes self.
"""
new_node = _create_new_element(self._node, "thermalCyclingConditions", id)
step = et.SubElement(new_node, "step")
et.SubElement(step, "nr").text = "1"
et.SubElement(step, "lidOpen")
place = _get_tag_pos(self._node, "thermalCyclingConditions", self.xmlkeys(), newposition)
self._node.insert(place, new_node)
def move_therm_cyc_cons(self, id, newposition):
"""Moves the element to the new position in the list.
Args:
self: The class self parameter.
id: ThermalCyclingConditions unique id
newposition: The new position of the element
Returns:
No return value, changes self. Function may raise RdmlError if required.
"""
_move_subelement(self._node, "thermalCyclingConditions", id, self.xmlkeys(), newposition)
def get_therm_cyc_cons(self, byid=None, byposition=None):
"""Returns an thermalCyclingConditions element by position or id.
Args:
self: The class self parameter.
byid: Select the element by the element id.
byposition: Select the element by position in the list.
Returns:
The found element or None.
"""
return Therm_cyc_cons(_get_first_child_by_pos_or_id(self._node, "thermalCyclingConditions", byid, byposition))
def delete_therm_cyc_cons(self, byid=None, byposition=None):
"""Deletes an thermalCyclingConditions element.
Args:
self: The class self parameter.
byid: Select the element by the element id.
byposition: Select the element by position in the list.
Returns:
Nothing, changes self.
"""
elem = _get_first_child_by_pos_or_id(self._node, "thermalCyclingConditions", byid, byposition)
self._node.remove(elem)
def experiments(self):
"""Returns a list of all experiment elements.
Args:
self: The class self parameter.
Returns:
A list of all experiment elements.
"""
exp = _get_all_children(self._node, "experiment")
ret = []
for node in exp:
ret.append(Experiment(node, self._rdmlFilename))
return ret
def new_experiment(self, id, newposition=None):
"""Creates a new experiment element.
Args:
self: The class self parameter.
id: Experiment unique id (required)
newposition: Experiment position in the list of experiments (optional)
Returns:
Nothing, changes self.
"""
new_node = _create_new_element(self._node, "experiment", id)
place = _get_tag_pos(self._node, "experiment", self.xmlkeys(), newposition)
self._node.insert(place, new_node)
def move_experiment(self, id, newposition):
"""Moves the element to the new position in the list.
Args:
self: The class self parameter.
id: Experiments unique id
newposition: The new position of the element
Returns:
No return value, changes self. Function may raise RdmlError if required.
"""
_move_subelement(self._node, "experiment", id, self.xmlkeys(), newposition)
def get_experiment(self, byid=None, byposition=None):
"""Returns an experiment element by position or id.
Args:
self: The class self parameter.
byid: Select the element by the element id.
byposition: Select the element by position in the list.
Returns:
The found element or None.
"""
return Experiment(_get_first_child_by_pos_or_id(self._node, "experiment", byid, byposition), self._rdmlFilename)
def delete_experiment(self, byid=None, byposition=None):
"""Deletes an experiment element.
Args:
self: The class self parameter.
byid: Select the element by the element id.
byposition: Select the element by position in the list.
Returns:
Nothing, changes self.
"""
elem = _get_first_child_by_pos_or_id(self._node, "experiment", byid, byposition)
experiment = Experiment(elem, self._rdmlFilename)
# Required to delete digital files
runs = _get_all_children(elem, "run")
for node in runs:
run = Run(node, self._rdmlFilename)
experiment.delete_run(byid=run["id"])
# Now delete the experiment element
self._node.remove(elem)
def tojson(self):
"""Returns a json of the RDML object without fluorescence data.
Args:
self: The class self parameter.
Returns:
A json of the data.
"""
allRdmlids = self.rdmlids()
rdmlids = []
for elem in allRdmlids:
rdmlids.append(elem.tojson())
allExperimenters = self.experimenters()
experimenters = []
for exp in allExperimenters:
experimenters.append(exp.tojson())
allDocumentations = self.documentations()
documentations = []
for exp in allDocumentations:
documentations.append(exp.tojson())
allDyes = self.dyes()
dyes = []
for exp in allDyes:
dyes.append(exp.tojson())
allSamples = self.samples()
samples = []
for exp in allSamples:
samples.append(exp.tojson())
allTargets = self.targets()
targets = []
for exp in allTargets:
targets.append(exp.tojson())
allTherm_cyc_cons = self.therm_cyc_cons()
therm_cyc_cons = []
for exp in allTherm_cyc_cons:
therm_cyc_cons.append(exp.tojson())
allExperiments = self.experiments()
experiments = []
for exp in allExperiments:
experiments.append(exp.tojson())
data = {
"rdml": {
"version": self["version"],
"dateMade": self["dateMade"],
"dateUpdated": self["dateUpdated"],
"ids": rdmlids,
"experimenters": experimenters,
"documentations": documentations,
"dyes": dyes,
"samples": samples,
"targets": targets,
"therm_cyc_cons": therm_cyc_cons,
"experiments": experiments
}
}
return data
class Rdmlid:
"""RDML-Python library
The rdml id element used to read and edit one experimenter.
Attributes:
_node: The rdml id node of the RDML XML object.
"""
def __init__(self, node):
"""Inits an rdml id instance.
Args:
self: The class self parameter.
node: The experimenter node.
Returns:
No return value. Function may raise RdmlError if required.
"""
self._node = node
def __getitem__(self, key):
"""Returns the value for the key.
Args:
self: The class self parameter.
key: The key of the experimenter subelement
Returns:
A string of the data or None.
"""
if key in ["publisher", "serialNumber"]:
return _get_first_child_text(self._node, key)
if key in ["MD5Hash"]:
var = _get_first_child_text(self._node, key)
if var == "":
return None
else:
return var
raise KeyError
def __setitem__(self, key, value):
"""Changes the value for the key.
Args:
self: The class self parameter.
key: The key of the experimenter subelement
value: The new value for the key
Returns:
No return value, changes self. Function may raise RdmlError if required.
"""
if key in ["publisher", "serialNumber"]:
return _change_subelement(self._node, key, self.xmlkeys(), value, False, "string")
if key in ["MD5Hash"]:
return _change_subelement(self._node, key, self.xmlkeys(), value, True, "string")
raise KeyError
def keys(self):
"""Returns a list of the keys.
Args:
self: The class self parameter.
Returns:
A list of the key strings.
"""
return ["publisher", "serialNumber", "MD5Hash"]
def xmlkeys(self):
"""Returns a list of the keys in the xml file.
Args:
self: The class self parameter.
Returns:
A list of the key strings.
"""
return self.keys()
def tojson(self):
"""Returns a json of the RDML object without fluorescence data.
Args:
self: The class self parameter.
Returns:
A json of the data.
"""
data = {
"publisher": _get_first_child_text(self._node, "publisher"),
"serialNumber": _get_first_child_text(self._node, "serialNumber")
}
_add_first_child_to_dic(self._node, data, True, "MD5Hash")
return data
class Experimenter:
"""RDML-Python library
The experimenter element used to read and edit one experimenter.
Attributes:
_node: The experimenter node of the RDML XML object.
"""
def __init__(self, node):
"""Inits an experimenter instance.
Args:
self: The class self parameter.
node: The experimenter node.
Returns:
No return value. Function may raise RdmlError if required.
"""
self._node = node
def __getitem__(self, key):
"""Returns the value for the key.
Args:
self: The class self parameter.
key: The key of the experimenter subelement
Returns:
A string of the data or None.
"""
if key == "id":
return self._node.get('id')
if key in ["firstName", "lastName"]:
return _get_first_child_text(self._node, key)
if key in ["email", "labName", "labAddress"]:
var = _get_first_child_text(self._node, key)
if var == "":
return None
else:
return var
raise KeyError
def __setitem__(self, key, value):
"""Changes the value for the key.
Args:
self: The class self parameter.
key: The key of the experimenter subelement
value: The new value for the key
Returns:
No return value, changes self. Function may raise RdmlError if required.
"""
if key == "id":
self.change_id(value, merge_with_id=False)
return
if key in ["firstName", "lastName"]:
return _change_subelement(self._node, key, self.xmlkeys(), value, False, "string")
if key in ["email", "labName", "labAddress"]:
return _change_subelement(self._node, key, self.xmlkeys(), value, True, "string")
raise KeyError
def change_id(self, value, merge_with_id=False):
"""Changes the value for the id.
Args:
self: The class self parameter.
value: The new value for the id.
merge_with_id: If True only allow a unique id, if False only rename its uses with existing id.
Returns:
No return value, changes self. Function may raise RdmlError if required.
"""
oldValue = self._node.get('id')
if oldValue != value:
par = self._node.getparent()
if not _string_to_bool(merge_with_id, triple=False):
_change_subelement(self._node, "id", self.xmlkeys(), value, False, "string")
else:
groupTag = self._node.tag.replace("{http://www.rdml.org}", "")
if _check_unique_id(par, groupTag, value):
raise RdmlError('The ' + groupTag + ' id "' + value + '" does not exist.')
allTh = _get_all_children(par, "thermalCyclingConditions")
for node in allTh:
subNodes = _get_all_children(node, "experimenter")
for subNode in subNodes:
if subNode.attrib['id'] == oldValue:
subNode.attrib['id'] = value
allExp = _get_all_children(par, "experiment")
for node in allExp:
subNodes = _get_all_children(node, "run")
for subNode in subNodes:
lastNodes = _get_all_children(subNode, "experimenter")
for lastNode in lastNodes:
if lastNode.attrib['id'] == oldValue:
lastNode.attrib['id'] = value
return
def keys(self):
"""Returns a list of the keys.
Args:
self: The class self parameter.
Returns:
A list of the key strings.
"""
return ["id", "firstName", "lastName", "email", "labName", "labAddress"]
def xmlkeys(self):
"""Returns a list of the keys in the xml file.
Args:
self: The class self parameter.
Returns:
A list of the key strings.
"""
return self.keys()
def tojson(self):
"""Returns a json of the RDML object without fluorescence data.
Args:
self: The class self parameter.
Returns:
A json of the data.
"""
data = {
"id": self._node.get('id'),
"firstName": _get_first_child_text(self._node, "firstName"),
"lastName": _get_first_child_text(self._node, "lastName")
}
_add_first_child_to_dic(self._node, data, True, "email")
_add_first_child_to_dic(self._node, data, True, "labName")
_add_first_child_to_dic(self._node, data, True, "labAddress")
return data
class Documentation:
"""RDML-Python library
The documentation element used to read and edit one documentation tag.
Attributes:
_node: The documentation node of the RDML XML object.
"""
def __init__(self, node):
"""Inits an documentation instance.
Args:
self: The class self parameter.
node: The documentation node.
Returns:
No return value. Function may raise RdmlError if required.
"""
self._node = node
def __getitem__(self, key):
"""Returns the value for the key.
Args:
self: The class self parameter.
key: The key of the documentation subelement
Returns:
A string of the data or None.
"""
if key == "id":
return self._node.get('id')
if key == "text":
var = _get_first_child_text(self._node, key)
if var == "":
return None
else:
return var
raise KeyError
def __setitem__(self, key, value):
"""Changes the value for the key.
Args:
self: The class self parameter.
key: The key of the documentation subelement
value: The new value for the key
Returns:
No return value, changes self. Function may raise RdmlError if required.
"""
if key == "id":
self.change_id(value, merge_with_id=False)
return
if key == "text":
return _change_subelement(self._node, key, self.xmlkeys(), value, True, "string")
raise KeyError
def change_id(self, value, merge_with_id=False):
"""Changes the value for the id.
Args:
self: The class self parameter.
value: The new value for the id.
merge_with_id: If True only allow a unique id, if False only rename its uses with existing id.
Returns:
No return value, changes self. Function may raise RdmlError if required.
"""
oldValue = self._node.get('id')
if oldValue != value:
par = self._node.getparent()
if not _string_to_bool(merge_with_id, triple=False):
_change_subelement(self._node, "id", self.xmlkeys(), value, False, "string")
else:
groupTag = self._node.tag.replace("{http://www.rdml.org}", "")
if _check_unique_id(par, groupTag, value):
raise RdmlError('The ' + groupTag + ' id "' + value + '" does not exist.')
allSam = _get_all_children(par, "sample")
for node in allSam:
subNodes = _get_all_children(node, "documentation")
for subNode in subNodes:
if subNode.attrib['id'] == oldValue:
subNode.attrib['id'] = value
allTh = _get_all_children(par, "target")
for node in allTh:
subNodes = _get_all_children(node, "documentation")
for subNode in subNodes:
if subNode.attrib['id'] == oldValue:
subNode.attrib['id'] = value
allTh = _get_all_children(par, "thermalCyclingConditions")
for node in allTh:
subNodes = _get_all_children(node, "documentation")
for subNode in subNodes:
if subNode.attrib['id'] == oldValue:
subNode.attrib['id'] = value
allExp = _get_all_children(par, "experiment")
for node in allExp:
subNodes = _get_all_children(node, "documentation")
for subNode in subNodes:
if subNode.attrib['id'] == oldValue:
subNode.attrib['id'] = value
subNodes = _get_all_children(node, "run")
for subNode in subNodes:
lastNodes = _get_all_children(subNode, "documentation")
for lastNode in lastNodes:
if lastNode.attrib['id'] == oldValue:
lastNode.attrib['id'] = value
return
def keys(self):
"""Returns a list of the keys.
Args:
self: The class self parameter.
Returns:
A list of the key strings.
"""
return ["id", "text"]
def xmlkeys(self):
"""Returns a list of the keys in the xml file.
Args:
self: The class self parameter.
Returns:
A list of the key strings.
"""
return self.keys()
def tojson(self):
"""Returns a json of the RDML object without fluorescence data.
Args:
self: The class self parameter.
Returns:
A json of the data.
"""
data = {
"id": self._node.get('id'),
}
_add_first_child_to_dic(self._node, data, True, "text")
return data
class Dye:
"""RDML-Python library
The dye element used to read and edit one dye.
Attributes:
_node: The dye node of the RDML XML object.
"""
def __init__(self, node):
"""Inits an dye instance.
Args:
self: The class self parameter.
node: The dye node.
Returns:
No return value. Function may raise RdmlError if required.
"""
self._node = node
def __getitem__(self, key):
"""Returns the value for the key.
Args:
self: The class self parameter.
key: The key of the dye subelement
Returns:
A string of the data or None.
"""
if key == "id":
return self._node.get('id')
if key in ["description", "dyeChemistry"]:
var = _get_first_child_text(self._node, key)
if var == "":
return None
else:
return var
raise KeyError
def __setitem__(self, key, value):
"""Changes the value for the key.
Args:
self: The class self parameter.
key: The key of the dye subelement
value: The new value for the key
Returns:
No return value, changes self. Function may raise RdmlError if required.
"""
if key == "dyeChemistry":
if value not in ["non-saturating DNA binding dye", "saturating DNA binding dye", "hybridization probe",
"hydrolysis probe", "labelled forward primer", "labelled reverse primer",
"DNA-zyme probe"]:
raise RdmlError('Unknown or unsupported sample type value "' + value + '".')
if key == "id":
self.change_id(value, merge_with_id=False)
return
if key == "description":
return _change_subelement(self._node, key, self.xmlkeys(), value, True, "string")
par = self._node.getparent()
ver = par.get('version')
if ver == "1.3":
if key == "dyeChemistry":
return _change_subelement(self._node, key, self.xmlkeys(), value, True, "string")
raise KeyError
def change_id(self, value, merge_with_id=False):
"""Changes the value for the id.
Args:
self: The class self parameter.
value: The new value for the id.
merge_with_id: If True only allow a unique id, if False only rename its uses with existing id.
Returns:
No return value, changes self. Function may raise RdmlError if required.
"""
oldValue = self._node.get('id')
if oldValue != value:
par = self._node.getparent()
if not _string_to_bool(merge_with_id, triple=False):
_change_subelement(self._node, "id", self.xmlkeys(), value, False, "string")
else:
groupTag = self._node.tag.replace("{http://www.rdml.org}", "")
if _check_unique_id(par, groupTag, value):
raise RdmlError('The ' + groupTag + ' id "' + value + '" does not exist.')
allTar = _get_all_children(par, "target")
for node in allTar:
forId = _get_first_child(node, "dyeId")
if forId is not None:
if forId.attrib['id'] == oldValue:
forId.attrib['id'] = value
return
def keys(self):
"""Returns a list of the keys.
Args:
self: The class self parameter.
Returns:
A list of the key strings.
"""
return ["id", "description", "dyeChemistry"]
def xmlkeys(self):
"""Returns a list of the keys in the xml file.
Args:
self: The class self parameter.
Returns:
A list of the key strings.
"""
return self.keys()
def tojson(self):
"""Returns a json of the RDML object.
Args:
self: The class self parameter.
Returns:
A json of the data.
"""
data = {
"id": self._node.get('id'),
}
_add_first_child_to_dic(self._node, data, True, "description")
_add_first_child_to_dic(self._node, data, True, "dyeChemistry")
return data
class Sample:
"""RDML-Python library
The samples element used to read and edit one sample.
Attributes:
_node: The sample node of the RDML XML object.
"""
def __init__(self, node):
"""Inits an sample instance.
Args:
self: The class self parameter.
node: The sample node.
Returns:
No return value. Function may raise RdmlError if required.
"""
self._node = node
def __getitem__(self, key):
"""Returns the value for the key.
Args:
self: The class self parameter.
key: The key of the sample subelement
Returns:
A string of the data or None.
"""
if key == "id":
return self._node.get('id')
if key == "description":
var = _get_first_child_text(self._node, key)
if var == "":
return None
else:
return var
if key in ["interRunCalibrator", "calibratorSample"]:
return _get_first_child_bool(self._node, key, triple=True)
if key in ["cdnaSynthesisMethod_enzyme", "cdnaSynthesisMethod_primingMethod",
"cdnaSynthesisMethod_dnaseTreatment", "cdnaSynthesisMethod_thermalCyclingConditions"]:
ele = _get_first_child(self._node, "cdnaSynthesisMethod")
if ele is None:
return None
if key == "cdnaSynthesisMethod_enzyme":
return _get_first_child_text(ele, "enzyme")
if key == "cdnaSynthesisMethod_primingMethod":
return _get_first_child_text(ele, "primingMethod")
if key == "cdnaSynthesisMethod_dnaseTreatment":
return _get_first_child_text(ele, "dnaseTreatment")
if key == "cdnaSynthesisMethod_thermalCyclingConditions":
forId = _get_first_child(ele, "thermalCyclingConditions")
if forId is not None:
return forId.attrib['id']
else:
return None
raise RdmlError('Sample cdnaSynthesisMethod programming read error.')
if key == "quantity":
ele = _get_first_child(self._node, key)
vdic = {}
vdic["value"] = _get_first_child_text(ele, "value")
vdic["unit"] = _get_first_child_text(ele, "unit")
if len(vdic.keys()) != 0:
return vdic
else:
return None
par = self._node.getparent()
ver = par.get('version')
if ver == "1.1":
if key in ["templateRNAQuality", "templateDNAQuality"]:
ele = _get_first_child(self._node, key)
vdic = {}
vdic["method"] = _get_first_child_text(ele, "method")
vdic["result"] = _get_first_child_text(ele, "result")
if len(vdic.keys()) != 0:
return vdic
else:
return None
if key in ["templateRNAQuantity", "templateDNAQuantity"]:
ele = _get_first_child(self._node, key)
vdic = {}
vdic["value"] = _get_first_child_text(ele, "value")
vdic["unit"] = _get_first_child_text(ele, "unit")
if len(vdic.keys()) != 0:
return vdic
else:
return None
if ver == "1.2" or ver == "1.3":
if key == "templateQuantity":
ele = _get_first_child(self._node, key)
vdic = {}
vdic["nucleotide"] = _get_first_child_text(ele, "nucleotide")
vdic["conc"] = _get_first_child_text(ele, "conc")
if len(vdic.keys()) != 0:
return vdic
else:
return None
raise KeyError
def __setitem__(self, key, value):
"""Changes the value for the key.
Args:
self: The class self parameter.
key: The key of the sample subelement
value: The new value for the key
Returns:
No return value, changes self. Function may raise RdmlError if required.
"""
if key == "id":
self.change_id(value, merge_with_id=False)
return
if key == "description":
return _change_subelement(self._node, key, self.xmlkeys(), value, True, "string")
if key in ["interRunCalibrator", "calibratorSample"]:
return _change_subelement(self._node, key, self.xmlkeys(), value, True, "bool")
if key in ["cdnaSynthesisMethod_enzyme", "cdnaSynthesisMethod_primingMethod",
"cdnaSynthesisMethod_dnaseTreatment", "cdnaSynthesisMethod_thermalCyclingConditions"]:
ele = _get_or_create_subelement(self._node, "cdnaSynthesisMethod", self.xmlkeys())
if key == "cdnaSynthesisMethod_enzyme":
_change_subelement(ele, "enzyme",
["enzyme", "primingMethod", "dnaseTreatment", "thermalCyclingConditions"],
value, True, "string")
if key == "cdnaSynthesisMethod_primingMethod":
if value not in ["", "oligo-dt", "random", "target-specific", "oligo-dt and random", "other"]:
raise RdmlError('Unknown or unsupported sample ' + key + ' value "' + value + '".')
_change_subelement(ele, "primingMethod",
["enzyme", "primingMethod", "dnaseTreatment", "thermalCyclingConditions"],
value, True, "string")
if key == "cdnaSynthesisMethod_dnaseTreatment":
_change_subelement(ele, "dnaseTreatment",
["enzyme", "primingMethod", "dnaseTreatment", "thermalCyclingConditions"],
value, True, "bool")
if key == "cdnaSynthesisMethod_thermalCyclingConditions":
forId = _get_or_create_subelement(ele, "thermalCyclingConditions",
["enzyme", "primingMethod", "dnaseTreatment",
"thermalCyclingConditions"])
if value is not None and value != "":
# We do not check that ID is valid to allow recreate_lost_ids()
forId.attrib['id'] = value
else:
ele.remove(forId)
_remove_irrelevant_subelement(self._node, "cdnaSynthesisMethod")
return
if key == "quantity":
if value is None:
return
if "value" not in value or "unit" not in value:
raise RdmlError('Sample ' + key + ' must have a dictionary with "value" and "unit" as value.')
if value["unit"] not in ["", "cop", "fold", "dil", "ng", "nMol", "other"]:
raise RdmlError('Unknown or unsupported sample ' + key + ' value "' + value + '".')
ele = _get_or_create_subelement(self._node, key, self.xmlkeys())
_change_subelement(ele, "value", ["value", "unit"], value["value"], True, "float")
if value["value"] != "":
_change_subelement(ele, "unit", ["value", "unit"], value["unit"], True, "string")
else:
_change_subelement(ele, "unit", ["value", "unit"], "", True, "string")
_remove_irrelevant_subelement(self._node, key)
return
par = self._node.getparent()
ver = par.get('version')
if ver == "1.1":
if key in ["templateRNAQuality", "templateDNAQuality"]:
if value is None:
return
if "method" not in value or "result" not in value:
raise RdmlError('"' + key + '" must have a dictionary with "method" and "result" as value.')
ele = _get_or_create_subelement(self._node, key, self.xmlkeys())
_change_subelement(ele, "method", ["method", "result"], value["method"], True, "string")
_change_subelement(ele, "result", ["method", "result"], value["result"], True, "float")
_remove_irrelevant_subelement(self._node, key)
return
if key in ["templateRNAQuantity", "templateDNAQuantity"]:
if value is None:
return
if "value" not in value or "unit" not in value:
raise RdmlError('Sample ' + key + ' must have a dictionary with "value" and "unit" as value.')
if value["unit"] not in ["", "cop", "fold", "dil", "ng", "nMol", "other"]:
raise RdmlError('Unknown or unsupported sample ' + key + ' value "' + value + '".')
ele = _get_or_create_subelement(self._node, key, self.xmlkeys())
_change_subelement(ele, "value", ["value", "unit"], value["value"], True, "float")
if value["value"] != "":
_change_subelement(ele, "unit", ["value", "unit"], value["unit"], True, "string")
else:
_change_subelement(ele, "unit", ["value", "unit"], "", True, "string")
_remove_irrelevant_subelement(self._node, key)
return
if ver == "1.2" or ver == "1.3":
if key == "templateQuantity":
if value is None:
return
if "nucleotide" not in value or "conc" not in value:
raise RdmlError('Sample ' + key + ' must have a dictionary with "nucleotide" and "conc" as value.')
if value["nucleotide"] not in ["", "DNA", "genomic DNA", "cDNA", "RNA"]:
raise RdmlError('Unknown or unsupported sample ' + key + ' value "' + value + '".')
ele = _get_or_create_subelement(self._node, key, self.xmlkeys())
_change_subelement(ele, "conc", ["conc", "nucleotide"], value["conc"], True, "float")
if value["conc"] != "":
_change_subelement(ele, "nucleotide", ["conc", "nucleotide"], value["nucleotide"], True, "string")
else:
_change_subelement(ele, "nucleotide", ["conc", "nucleotide"], "", True, "string")
_remove_irrelevant_subelement(self._node, key)
return
raise KeyError
def change_id(self, value, merge_with_id=False):
"""Changes the value for the id.
Args:
self: The class self parameter.
value: The new value for the id.
merge_with_id: If True only allow a unique id, if False only rename its uses with existing id.
Returns:
No return value, changes self. Function may raise RdmlError if required.
"""
oldValue = self._node.get('id')
if oldValue != value:
par = self._node.getparent()
if not _string_to_bool(merge_with_id, triple=False):
_change_subelement(self._node, "id", self.xmlkeys(), value, False, "string")
else:
groupTag = self._node.tag.replace("{http://www.rdml.org}", "")
if _check_unique_id(par, groupTag, value):
raise RdmlError('The ' + groupTag + ' id "' + value + '" does not exist.')
allExp = _get_all_children(par, "experiment")
for node in allExp:
subNodes = _get_all_children(node, "run")
for subNode in subNodes:
reactNodes = _get_all_children(subNode, "react")
for reactNode in reactNodes:
lastNodes = _get_all_children(reactNode, "sample")
for lastNode in lastNodes:
if lastNode.attrib['id'] == oldValue:
lastNode.attrib['id'] = value
return
def keys(self):
"""Returns a list of the keys.
Args:
self: The class self parameter.
Returns:
A list of the key strings.
"""
par = self._node.getparent()
ver = par.get('version')
if ver == "1.1":
return ["id", "description", "interRunCalibrator", "quantity", "calibratorSample",
"cdnaSynthesisMethod_enzyme", "cdnaSynthesisMethod_primingMethod",
"cdnaSynthesisMethod_dnaseTreatment", "cdnaSynthesisMethod_thermalCyclingConditions",
"templateRNAQuantity", "templateRNAQuality", "templateDNAQuantity", "templateDNAQuality"]
return ["id", "description", "annotation", "interRunCalibrator", "quantity", "calibratorSample",
"cdnaSynthesisMethod_enzyme", "cdnaSynthesisMethod_primingMethod",
"cdnaSynthesisMethod_dnaseTreatment", "cdnaSynthesisMethod_thermalCyclingConditions",
"templateQuantity"]
def xmlkeys(self):
"""Returns a list of the keys in the xml file.
Args:
self: The class self parameter.
Returns:
A list of the key strings.
"""
par = self._node.getparent()
ver = par.get('version')
if ver == "1.1":
return ["description", "documentation", "xRef", "type", "interRunCalibrator",
"quantity", "calibratorSample", "cdnaSynthesisMethod",
"templateRNAQuantity", "templateRNAQuality", "templateDNAQuantity", "templateDNAQuality"]
return ["description", "documentation", "xRef", "annotation", "type", "interRunCalibrator",
"quantity", "calibratorSample", "cdnaSynthesisMethod", "templateQuantity"]
def types(self):
"""Returns a list of the types in the xml file.
Args:
self: The class self parameter.
Returns:
A list of dics with type and id strings.
"""
typesList = _get_all_children(self._node, "type")
ret = []
for node in typesList:
data = {}
data["type"] = node.text
if "targetId" in node.attrib:
data["targetId"] = node.attrib["targetId"]
else:
data["targetId"] = ""
ret.append(data)
return ret
def new_type(self, type, targetId=None, newposition=None):
"""Creates a new type element.
Args:
self: The class self parameter.
type: The "unkn", "ntc", "nac", "std", "ntp", "nrt", "pos" or "opt" type of sample
targetId: The target linked to the type (makes sense in "pos" or "ntp" context)
newposition: The new position of the element
Returns:
Nothing, changes self.
"""
if type not in ["unkn", "ntc", "nac", "std", "ntp", "nrt", "pos", "opt"]:
raise RdmlError('Unknown or unsupported sample type value "' + type + '".')
new_node = et.Element("type")
new_node.text = type
par = self._node.getparent()
ver = par.get('version')
if ver == "1.3":
if targetId is not None:
if not targetId == "":
new_node.attrib["targetId"] = targetId
place = _get_tag_pos(self._node, "type", self.xmlkeys(), newposition)
self._node.insert(place, new_node)
def edit_type(self, type, oldposition, newposition=None, targetId=None):
"""Edits a type element.
Args:
self: The class self parameter.
oldposition: The old position of the element
newposition: The new position of the element
type: The "unkn", "ntc", "nac", "std", "ntp", "nrt", "pos" or "opt" type of sample
targetId: The target linked to the type (makes sense in "pos" or "ntp" context)
Returns:
Nothing, changes self.
"""
if type not in ["unkn", "ntc", "nac", "std", "ntp", "nrt", "pos", "opt"]:
raise RdmlError('Unknown or unsupported sample type value "' + type + '".')
if oldposition is None:
raise RdmlError('A oldposition is required to edit a type.')
pos = _get_tag_pos(self._node, "type", self.xmlkeys(), newposition)
ele = _get_first_child_by_pos_or_id(self._node, "type", None, oldposition)
ele.text = type
par = self._node.getparent()
ver = par.get('version')
if "targetId" in ele.attrib:
del ele.attrib["targetId"]
if ver == "1.3":
if targetId is not None:
if not targetId == "":
ele.attrib["targetId"] = targetId
self._node.insert(pos, ele)
def move_type(self, oldposition, newposition):
"""Moves the element to the new position in the list.
Args:
self: The class self parameter.
oldposition: The old position of the element
newposition: The new position of the element
Returns:
No return value, changes self. Function may raise RdmlError if required.
"""
pos = _get_tag_pos(self._node, "type", self.xmlkeys(), newposition)
ele = _get_first_child_by_pos_or_id(self._node, "type", None, oldposition)
self._node.insert(pos, ele)
def delete_type(self, byposition):
"""Deletes an type element.
Args:
self: The class self parameter.
byposition: Select the element by position in the list.
Returns:
Nothing, changes self.
"""
ls = self.types()
if len(ls) < 2:
return
elem = _get_first_child_by_pos_or_id(self._node, "type", None, byposition)
self._node.remove(elem)
def xrefs(self):
"""Returns a list of the xrefs in the xml file.
Args:
self: The class self parameter.
Returns:
A list of dics with name and id strings.
"""
xref = _get_all_children(self._node, "xRef")
ret = []
for node in xref:
data = {}
_add_first_child_to_dic(node, data, True, "name")
_add_first_child_to_dic(node, data, True, "id")
ret.append(data)
return ret
def new_xref(self, name=None, id=None, newposition=None):
"""Creates a new xrefs element.
Args:
self: The class self parameter.
name: Publisher who created the xRef
id: Serial Number for this sample provided by publisher
newposition: The new position of the element
Returns:
Nothing, changes self.
"""
if name is None and id is None:
raise RdmlError('Either name or id is required to create a xRef.')
new_node = et.Element("xRef")
_add_new_subelement(new_node, "xRef", "name", name, True)
_add_new_subelement(new_node, "xRef", "id", id, True)
place = _get_tag_pos(self._node, "xRef", self.xmlkeys(), newposition)
self._node.insert(place, new_node)
def edit_xref(self, oldposition, newposition=None, name=None, id=None):
"""Creates a new xrefs element.
Args:
self: The class self parameter.
oldposition: The old position of the element
newposition: The new position of the element
name: Publisher who created the xRef
id: Serial Number for this sample provided by publisher
Returns:
Nothing, changes self.
"""
if oldposition is None:
raise RdmlError('A oldposition is required to edit a xRef.')
if (name is None or name == "") and (id is None or id == ""):
self.delete_xref(oldposition)
return
pos = _get_tag_pos(self._node, "xRef", self.xmlkeys(), newposition)
ele = _get_first_child_by_pos_or_id(self._node, "xRef", None, oldposition)
_change_subelement(ele, "name", ["name", "id"], name, True, "string")
_change_subelement(ele, "id", ["name", "id"], id, True, "string", id_as_element=True)
self._node.insert(pos, ele)
def move_xref(self, oldposition, newposition):
"""Moves the element to the new position in the list.
Args:
self: The class self parameter.
oldposition: The old position of the element
newposition: The new position of the element
Returns:
No return value, changes self. Function may raise RdmlError if required.
"""
pos = _get_tag_pos(self._node, "xRef", self.xmlkeys(), newposition)
ele = _get_first_child_by_pos_or_id(self._node, "xRef", None, oldposition)
self._node.insert(pos, ele)
def delete_xref(self, byposition):
"""Deletes an experimenter element.
Args:
self: The class self parameter.
byposition: Select the element by position in the list.
Returns:
Nothing, changes self.
"""
elem = _get_first_child_by_pos_or_id(self._node, "xRef", None, byposition)
self._node.remove(elem)
def annotations(self):
"""Returns a list of the annotations in the xml file.
Args:
self: The class self parameter.
Returns:
A list of dics with property and value strings.
"""
par = self._node.getparent()
ver = par.get('version')
if ver == "1.1":
return []
xref = _get_all_children(self._node, "annotation")
ret = []
for node in xref:
data = {}
_add_first_child_to_dic(node, data, True, "property")
_add_first_child_to_dic(node, data, True, "value")
ret.append(data)
return ret
def new_annotation(self, property=None, value=None, newposition=None):
"""Creates a new annotation element.
Args:
self: The class self parameter.
property: The property
value: Its value
newposition: The new position of the element
Returns:
Nothing, changes self.
"""
par = self._node.getparent()
ver = par.get('version')
if ver == "1.1":
return
if property is None or value is None:
raise RdmlError('Property and value are required to create a annotation.')
new_node = et.Element("annotation")
_add_new_subelement(new_node, "annotation", "property", property, True)
_add_new_subelement(new_node, "annotation", "value", value, True)
place = _get_tag_pos(self._node, "annotation", self.xmlkeys(), newposition)
self._node.insert(place, new_node)
def edit_annotation(self, oldposition, newposition=None, property=None, value=None):
"""Edits an annotation element.
Args:
self: The class self parameter.
oldposition: The old position of the element
newposition: The new position of the element
property: The property
value: Its value
Returns:
Nothing, changes self.
"""
par = self._node.getparent()
ver = par.get('version')
if ver == "1.1":
return
if oldposition is None:
raise RdmlError('A oldposition is required to edit a annotation.')
if (property is None or property == "") or (value is None or value == ""):
self.delete_annotation(oldposition)
return
pos = _get_tag_pos(self._node, "annotation", self.xmlkeys(), newposition)
ele = _get_first_child_by_pos_or_id(self._node, "annotation", None, oldposition)
_change_subelement(ele, "property", ["property", "value"], property, True, "string")
_change_subelement(ele, "value", ["property", "value"], value, True, "string")
self._node.insert(pos, ele)
def move_annotation(self, oldposition, newposition):
"""Moves the element to the new position in the list.
Args:
self: The class self parameter.
oldposition: The old position of the element
newposition: The new position of the element
Returns:
No return value, changes self. Function may raise RdmlError if required.
"""
par = self._node.getparent()
ver = par.get('version')
if ver == "1.1":
return
pos = _get_tag_pos(self._node, "annotation", self.xmlkeys(), newposition)
ele = _get_first_child_by_pos_or_id(self._node, "annotation", None, oldposition)
self._node.insert(pos, ele)
def delete_annotation(self, byposition):
"""Deletes an annotation element.
Args:
self: The class self parameter.
byposition: Select the element by position in the list.
Returns:
Nothing, changes self.
"""
par = self._node.getparent()
ver = par.get('version')
if ver == "1.1":
return
elem = _get_first_child_by_pos_or_id(self._node, "annotation", None, byposition)
self._node.remove(elem)
def documentation_ids(self):
"""Returns a list of the keys in the xml file.
Args:
self: The class self parameter.
Returns:
A list of the key strings.
"""
return _get_all_children_id(self._node, "documentation")
def update_documentation_ids(self, ids):
"""Returns a json of the RDML object without fluorescence data.
Args:
self: The class self parameter.
ids: A dictionary with id and true/false pairs
Returns:
True if a change was made, else false. Function may raise RdmlError if required.
"""
old = self.documentation_ids()
good_ids = _value_to_booldic(ids)
mod = False
for id, inc in good_ids.items():
if inc is True:
if id not in old:
new_node = _create_new_element(self._node, "documentation", id)
place = _get_tag_pos(self._node, "documentation", self.xmlkeys(), 999999999)
self._node.insert(place, new_node)
mod = True
else:
if id in old:
elem = _get_first_child_by_pos_or_id(self._node, "documentation", id, None)
self._node.remove(elem)
mod = True
return mod
def move_documentation(self, oldposition, newposition):
"""Moves the element to the new position in the list.
Args:
self: The class self parameter.
oldposition: The old position of the element
newposition: The new position of the element
Returns:
No return value, changes self. Function may raise RdmlError if required.
"""
pos = _get_tag_pos(self._node, "documentation", self.xmlkeys(), newposition)
ele = _get_first_child_by_pos_or_id(self._node, "documentation", None, oldposition)
self._node.insert(pos, ele)
def tojson(self):
"""Returns a json of the RDML object without fluorescence data.
Args:
self: The class self parameter.
Returns:
A json of the data.
"""
par = self._node.getparent()
ver = par.get('version')
data = {
"id": self._node.get('id'),
}
_add_first_child_to_dic(self._node, data, True, "description")
data["documentations"] = self.documentation_ids()
data["xRefs"] = self.xrefs()
if ver == "1.2" or ver == "1.3":
data["annotations"] = self.annotations()
data["types"] = self.types()
_add_first_child_to_dic(self._node, data, True, "interRunCalibrator")
elem = _get_first_child(self._node, "quantity")
if elem is not None:
qdic = {}
_add_first_child_to_dic(elem, qdic, False, "value")
_add_first_child_to_dic(elem, qdic, False, "unit")
data["quantity"] = qdic
_add_first_child_to_dic(self._node, data, True, "calibratorSample")
elem = _get_first_child(self._node, "cdnaSynthesisMethod")
if elem is not None:
qdic = {}
_add_first_child_to_dic(elem, qdic, True, "enzyme")
_add_first_child_to_dic(elem, qdic, True, "primingMethod")
_add_first_child_to_dic(elem, qdic, True, "dnaseTreatment")
forId = _get_first_child(elem, "thermalCyclingConditions")
if forId is not None:
if forId.attrib['id'] != "":
qdic["thermalCyclingConditions"] = forId.attrib['id']
if len(qdic.keys()) != 0:
data["cdnaSynthesisMethod"] = qdic
if ver == "1.1":
elem = _get_first_child(self._node, "templateRNAQuantity")
if elem is not None:
qdic = {}
_add_first_child_to_dic(elem, qdic, False, "value")
_add_first_child_to_dic(elem, qdic, False, "unit")
data["templateRNAQuantity"] = qdic
elem = _get_first_child(self._node, "templateRNAQuality")
if elem is not None:
qdic = {}
_add_first_child_to_dic(elem, qdic, False, "method")
_add_first_child_to_dic(elem, qdic, False, "result")
data["templateRNAQuality"] = qdic
elem = _get_first_child(self._node, "templateDNAQuantity")
if elem is not None:
qdic = {}
_add_first_child_to_dic(elem, qdic, False, "value")
_add_first_child_to_dic(elem, qdic, False, "unit")
data["templateDNAQuantity"] = qdic
elem = _get_first_child(self._node, "templateDNAQuality")
if elem is not None:
qdic = {}
_add_first_child_to_dic(elem, qdic, False, "method")
_add_first_child_to_dic(elem, qdic, False, "result")
data["templateDNAQuality"] = qdic
if ver == "1.2" or ver == "1.3":
elem = _get_first_child(self._node, "templateQuantity")
if elem is not None:
qdic = {}
_add_first_child_to_dic(elem, qdic, False, "nucleotide")
_add_first_child_to_dic(elem, qdic, False, "conc")
data["templateQuantity"] = qdic
return data
class Target:
"""RDML-Python library
The target element used to read and edit one target.
Attributes:
_node: The target node of the RDML XML object.
_rdmlFilename: The RDML filename
"""
def __init__(self, node, rdmlFilename):
"""Inits an target instance.
Args:
self: The class self parameter.
node: The target node.
rdmlFilename: The RDML filename.
Returns:
No return value. Function may raise RdmlError if required.
"""
self._node = node
self._rdmlFilename = rdmlFilename
def __getitem__(self, key):
"""Returns the value for the key.
Args:
self: The class self parameter.
key: The key of the target subelement
Returns:
A string of the data or None.
"""
if key == "id":
return self._node.get('id')
if key == "type":
return _get_first_child_text(self._node, key)
if key in ["description", "amplificationEfficiencyMethod", "amplificationEfficiency",
"amplificationEfficiencySE", "meltingTemperature", "detectionLimit"]:
var = _get_first_child_text(self._node, key)
if var == "":
return None
else:
return var
if key == "dyeId":
forId = _get_first_child(self._node, key)
if forId is not None:
return forId.attrib['id']
else:
return None
if key in ["sequences_forwardPrimer_threePrimeTag", "sequences_forwardPrimer_fivePrimeTag",
"sequences_forwardPrimer_sequence", "sequences_reversePrimer_threePrimeTag",
"sequences_reversePrimer_fivePrimeTag", "sequences_reversePrimer_sequence",
"sequences_probe1_threePrimeTag", "sequences_probe1_fivePrimeTag",
"sequences_probe1_sequence", "sequences_probe2_threePrimeTag",
"sequences_probe2_fivePrimeTag", "sequences_probe2_sequence",
"sequences_amplicon_threePrimeTag", "sequences_amplicon_fivePrimeTag",
"sequences_amplicon_sequence"]:
prim = _get_first_child(self._node, "sequences")
if prim is None:
return None
sec = None
if key in ["sequences_forwardPrimer_threePrimeTag", "sequences_forwardPrimer_fivePrimeTag",
"sequences_forwardPrimer_sequence"]:
sec = _get_first_child(prim, "forwardPrimer")
if key in ["sequences_reversePrimer_threePrimeTag", "sequences_reversePrimer_fivePrimeTag",
"sequences_reversePrimer_sequence"]:
sec = _get_first_child(prim, "reversePrimer")
if key in ["sequences_probe1_threePrimeTag", "sequences_probe1_fivePrimeTag", "sequences_probe1_sequence"]:
sec = _get_first_child(prim, "probe1")
if key in ["sequences_probe2_threePrimeTag", "sequences_probe2_fivePrimeTag", "sequences_probe2_sequence"]:
sec = _get_first_child(prim, "probe2")
if key in ["sequences_amplicon_threePrimeTag", "sequences_amplicon_fivePrimeTag",
"sequences_amplicon_sequence"]:
sec = _get_first_child(prim, "amplicon")
if sec is None:
return None
if key in ["sequences_forwardPrimer_threePrimeTag", "sequences_reversePrimer_threePrimeTag",
"sequences_probe1_threePrimeTag", "sequences_probe2_threePrimeTag",
"sequences_amplicon_threePrimeTag"]:
return _get_first_child_text(sec, "threePrimeTag")
if key in ["sequences_forwardPrimer_fivePrimeTag", "sequences_reversePrimer_fivePrimeTag",
"sequences_probe1_fivePrimeTag", "sequences_probe2_fivePrimeTag",
"sequences_amplicon_fivePrimeTag"]:
return _get_first_child_text(sec, "fivePrimeTag")
if key in ["sequences_forwardPrimer_sequence", "sequences_reversePrimer_sequence",
"sequences_probe1_sequence", "sequences_probe2_sequence",
"sequences_amplicon_sequence"]:
return _get_first_child_text(sec, "sequence")
raise RdmlError('Target sequences programming read error.')
if key in ["commercialAssay_company", "commercialAssay_orderNumber"]:
prim = _get_first_child(self._node, "commercialAssay")
if prim is None:
return None
if key == "commercialAssay_company":
return _get_first_child_text(prim, "company")
if key == "commercialAssay_orderNumber":
return _get_first_child_text(prim, "orderNumber")
par = self._node.getparent()
ver = par.get('version')
if ver == "1.2" or ver == "1.3":
if key == "amplificationEfficiencySE":
var = _get_first_child_text(self._node, key)
if var == "":
return None
else:
return var
raise KeyError
def __setitem__(self, key, value):
"""Changes the value for the key.
Args:
self: The class self parameter.
key: The key of the target subelement
value: The new value for the key
Returns:
No return value, changes self. Function may raise RdmlError if required.
"""
par = self._node.getparent()
ver = par.get('version')
if key == "type":
if value not in ["ref", "toi"]:
raise RdmlError('Unknown or unsupported target type value "' + value + '".')
if key == "id":
self.change_id(value, merge_with_id=False)
return
if key == "type":
return _change_subelement(self._node, key, self.xmlkeys(), value, False, "string")
if key in ["description", "amplificationEfficiencyMethod"]:
return _change_subelement(self._node, key, self.xmlkeys(), value, True, "string")
if key in ["amplificationEfficiency", "detectionLimit"]:
return _change_subelement(self._node, key, self.xmlkeys(), value, True, "float")
if ver == "1.2" or ver == "1.3":
if key == "amplificationEfficiencySE":
return _change_subelement(self._node, key, self.xmlkeys(), value, True, "float")
if ver == "1.3":
if key == "meltingTemperature":
return _change_subelement(self._node, key, self.xmlkeys(), value, True, "float")
if key == "dyeId":
forId = _get_or_create_subelement(self._node, "dyeId", self.xmlkeys())
if value is not None and value != "":
# We do not check that ID is valid to allow recreate_lost_ids()
forId.attrib['id'] = value
else:
self._node.remove(forId)
return
if key in ["sequences_forwardPrimer_threePrimeTag", "sequences_forwardPrimer_fivePrimeTag",
"sequences_forwardPrimer_sequence", "sequences_reversePrimer_threePrimeTag",
"sequences_reversePrimer_fivePrimeTag", "sequences_reversePrimer_sequence",
"sequences_probe1_threePrimeTag", "sequences_probe1_fivePrimeTag",
"sequences_probe1_sequence", "sequences_probe2_threePrimeTag",
"sequences_probe2_fivePrimeTag", "sequences_probe2_sequence",
"sequences_amplicon_threePrimeTag", "sequences_amplicon_fivePrimeTag",
"sequences_amplicon_sequence"]:
prim = _get_or_create_subelement(self._node, "sequences", self.xmlkeys())
sec = None
if key in ["sequences_forwardPrimer_threePrimeTag", "sequences_forwardPrimer_fivePrimeTag",
"sequences_forwardPrimer_sequence"]:
sec = _get_or_create_subelement(prim, "forwardPrimer",
["forwardPrimer", "reversePrimer", "probe1", "probe2", "amplicon"])
if key in ["sequences_reversePrimer_threePrimeTag", "sequences_reversePrimer_fivePrimeTag",
"sequences_reversePrimer_sequence"]:
sec = _get_or_create_subelement(prim, "reversePrimer",
["forwardPrimer", "reversePrimer", "probe1", "probe2", "amplicon"])
if key in ["sequences_probe1_threePrimeTag", "sequences_probe1_fivePrimeTag", "sequences_probe1_sequence"]:
sec = _get_or_create_subelement(prim, "probe1",
["forwardPrimer", "reversePrimer", "probe1", "probe2", "amplicon"])
if key in ["sequences_probe2_threePrimeTag", "sequences_probe2_fivePrimeTag", "sequences_probe2_sequence"]:
sec = _get_or_create_subelement(prim, "probe2",
["forwardPrimer", "reversePrimer", "probe1", "probe2", "amplicon"])
if key in ["sequences_amplicon_threePrimeTag", "sequences_amplicon_fivePrimeTag",
"sequences_amplicon_sequence"]:
sec = _get_or_create_subelement(prim, "amplicon",
["forwardPrimer", "reversePrimer", "probe1", "probe2", "amplicon"])
if sec is None:
return None
if key in ["sequences_forwardPrimer_threePrimeTag", "sequences_reversePrimer_threePrimeTag",
"sequences_probe1_threePrimeTag", "sequences_probe2_threePrimeTag",
"sequences_amplicon_threePrimeTag"]:
_change_subelement(sec, "threePrimeTag",
["threePrimeTag", "fivePrimeTag", "sequence"], value, True, "string")
if key in ["sequences_forwardPrimer_fivePrimeTag", "sequences_reversePrimer_fivePrimeTag",
"sequences_probe1_fivePrimeTag", "sequences_probe2_fivePrimeTag",
"sequences_amplicon_fivePrimeTag"]:
_change_subelement(sec, "fivePrimeTag",
["threePrimeTag", "fivePrimeTag", "sequence"], value, True, "string")
if key in ["sequences_forwardPrimer_sequence", "sequences_reversePrimer_sequence",
"sequences_probe1_sequence", "sequences_probe2_sequence",
"sequences_amplicon_sequence"]:
_change_subelement(sec, "sequence",
["threePrimeTag", "fivePrimeTag", "sequence"], value, True, "string")
if key in ["sequences_forwardPrimer_threePrimeTag", "sequences_forwardPrimer_fivePrimeTag",
"sequences_forwardPrimer_sequence"]:
_remove_irrelevant_subelement(prim, "forwardPrimer")
if key in ["sequences_reversePrimer_threePrimeTag", "sequences_reversePrimer_fivePrimeTag",
"sequences_reversePrimer_sequence"]:
_remove_irrelevant_subelement(prim, "reversePrimer")
if key in ["sequences_probe1_threePrimeTag", "sequences_probe1_fivePrimeTag", "sequences_probe1_sequence"]:
_remove_irrelevant_subelement(prim, "probe1")
if key in ["sequences_probe2_threePrimeTag", "sequences_probe2_fivePrimeTag", "sequences_probe2_sequence"]:
_remove_irrelevant_subelement(prim, "probe2")
if key in ["sequences_amplicon_threePrimeTag", "sequences_amplicon_fivePrimeTag",
"sequences_amplicon_sequence"]:
_remove_irrelevant_subelement(prim, "amplicon")
_remove_irrelevant_subelement(self._node, "sequences")
return
if key in ["commercialAssay_company", "commercialAssay_orderNumber"]:
ele = _get_or_create_subelement(self._node, "commercialAssay", self.xmlkeys())
if key == "commercialAssay_company":
_change_subelement(ele, "company", ["company", "orderNumber"], value, True, "string")
if key == "commercialAssay_orderNumber":
_change_subelement(ele, "orderNumber", ["company", "orderNumber"], value, True, "string")
_remove_irrelevant_subelement(self._node, "commercialAssay")
return
par = self._node.getparent()
ver = par.get('version')
if ver == "1.2" or ver == "1.3":
if key == "amplificationEfficiencySE":
return _change_subelement(self._node, key, self.xmlkeys(), value, True, "float")
raise KeyError
def change_id(self, value, merge_with_id=False):
"""Changes the value for the id.
Args:
self: The class self parameter.
value: The new value for the id.
merge_with_id: If True only allow a unique id, if False only rename its uses with existing id.
Returns:
No return value, changes self. Function may raise RdmlError if required.
"""
oldValue = self._node.get('id')
if oldValue != value:
par = self._node.getparent()
if not _string_to_bool(merge_with_id, triple=False):
_change_subelement(self._node, "id", self.xmlkeys(), value, False, "string")
else:
groupTag = self._node.tag.replace("{http://www.rdml.org}", "")
if _check_unique_id(par, groupTag, value):
raise RdmlError('The ' + groupTag + ' id "' + value + '" does not exist.')
allExp = _get_all_children(par, "sample")
for node in allExp:
subNodes = _get_all_children(node, "type")
for subNode in subNodes:
if "targetId" in subNode.attrib:
if subNode.attrib['targetId'] == oldValue:
subNode.attrib['targetId'] = value
allExp = _get_all_children(par, "experiment")
for node in allExp:
subNodes = _get_all_children(node, "run")
for subNode in subNodes:
reactNodes = _get_all_children(subNode, "react")
for reactNode in reactNodes:
dataNodes = _get_all_children(reactNode, "data")
for dataNode in dataNodes:
lastNodes = _get_all_children(dataNode, "tar")
for lastNode in lastNodes:
if lastNode.attrib['id'] == oldValue:
lastNode.attrib['id'] = value
partit = _get_first_child(reactNode, "partitions")
if partit is not None:
digDataNodes = _get_all_children(partit, "data")
for digDataNode in digDataNodes:
lastNodes = _get_all_children(digDataNode, "tar")
for lastNode in lastNodes:
if lastNode.attrib['id'] == oldValue:
lastNode.attrib['id'] = value
# Search in Table files
if self._rdmlFilename is not None and self._rdmlFilename != "":
if zipfile.is_zipfile(self._rdmlFilename):
fileList = []
tempName = ""
flipFiles = False
with zipfile.ZipFile(self._rdmlFilename, 'r') as RDMLin:
for item in RDMLin.infolist():
if re.search("^partitions/", item.filename):
fileContent = RDMLin.read(item.filename).decode('utf-8')
newlineFix = fileContent.replace("\r\n", "\n")
tabLines = newlineFix.split("\n")
header = tabLines[0].split("\t")
needRewrite = False
for cell in header:
if cell == oldValue:
needRewrite = True
if needRewrite:
fileList.append(item.filename)
if len(fileList) > 0:
tempFolder, tempName = tempfile.mkstemp(dir=os.path.dirname(self._rdmlFilename))
os.close(tempFolder)
flipFiles = True
with zipfile.ZipFile(tempName, mode='w', compression=zipfile.ZIP_DEFLATED) as RDMLout:
RDMLout.comment = RDMLin.comment
for item in RDMLin.infolist():
if item.filename not in fileList:
RDMLout.writestr(item, RDMLin.read(item.filename))
else:
fileContent = RDMLin.read(item.filename).decode('utf-8')
newlineFix = fileContent.replace("\r\n", "\n")
tabLines = newlineFix.split("\n")
header = tabLines[0].split("\t")
headerText = ""
for cell in header:
if cell == oldValue:
headerText += value + "\t"
else:
headerText += cell + "\t"
outFileStr = re.sub(r'\t$', '\n', headerText)
for tabLine in tabLines[1:]:
if tabLine != "":
outFileStr += tabLine + "\n"
RDMLout.writestr(item.filename, outFileStr)
if flipFiles:
os.remove(self._rdmlFilename)
os.rename(tempName, self._rdmlFilename)
return
def keys(self):
"""Returns a list of the keys.
Args:
self: The class self parameter.
Returns:
A list of the key strings.
"""
return ["id", "description", "type", "amplificationEfficiencyMethod", "amplificationEfficiency",
"amplificationEfficiencySE", "meltingTemperature", "detectionLimit", "dyeId",
"sequences_forwardPrimer_threePrimeTag",
"sequences_forwardPrimer_fivePrimeTag", "sequences_forwardPrimer_sequence",
"sequences_reversePrimer_threePrimeTag", "sequences_reversePrimer_fivePrimeTag",
"sequences_reversePrimer_sequence", "sequences_probe1_threePrimeTag",
"sequences_probe1_fivePrimeTag", "sequences_probe1_sequence", "sequences_probe2_threePrimeTag",
"sequences_probe2_fivePrimeTag", "sequences_probe2_sequence", "sequences_amplicon_threePrimeTag",
"sequences_amplicon_fivePrimeTag", "sequences_amplicon_sequence", "commercialAssay_company",
"commercialAssay_orderNumber"] # Also change in LinRegPCR save RDML
def xmlkeys(self):
"""Returns a list of the keys in the xml file.
Args:
self: The class self parameter.
Returns:
A list of the key strings.
"""
return ["description", "documentation", "xRef", "type", "amplificationEfficiencyMethod",
"amplificationEfficiency", "amplificationEfficiencySE", "meltingTemperature",
"detectionLimit", "dyeId", "sequences", "commercialAssay"]
def xrefs(self):
"""Returns a list of the xrefs in the xml file.
Args:
self: The class self parameter.
Returns:
A list of dics with name and id strings.
"""
xref = _get_all_children(self._node, "xRef")
ret = []
for node in xref:
data = {}
_add_first_child_to_dic(node, data, True, "name")
_add_first_child_to_dic(node, data, True, "id")
ret.append(data)
return ret
def new_xref(self, name=None, id=None, newposition=None):
"""Creates a new xrefs element.
Args:
self: The class self parameter.
name: Publisher who created the xRef
id: Serial Number for this target provided by publisher
newposition: The new position of the element
Returns:
Nothing, changes self.
"""
if name is None and id is None:
raise RdmlError('Either name or id is required to create a xRef.')
new_node = et.Element("xRef")
_add_new_subelement(new_node, "xRef", "name", name, True)
_add_new_subelement(new_node, "xRef", "id", id, True)
place = _get_tag_pos(self._node, "xRef", self.xmlkeys(), newposition)
self._node.insert(place, new_node)
def edit_xref(self, oldposition, newposition=None, name=None, id=None):
"""Creates a new xrefs element.
Args:
self: The class self parameter.
oldposition: The old position of the element
newposition: The new position of the element
name: Publisher who created the xRef
id: Serial Number for this target provided by publisher
Returns:
Nothing, changes self.
"""
if oldposition is None:
raise RdmlError('A oldposition is required to edit a xRef.')
if (name is None or name == "") and (id is None or id == ""):
self.delete_xref(oldposition)
return
pos = _get_tag_pos(self._node, "xRef", self.xmlkeys(), newposition)
ele = _get_first_child_by_pos_or_id(self._node, "xRef", None, oldposition)
_change_subelement(ele, "name", ["name", "id"], name, True, "string")
_change_subelement(ele, "id", ["name", "id"], id, True, "string", id_as_element=True)
self._node.insert(pos, ele)
def move_xref(self, oldposition, newposition):
"""Moves the element to the new position in the list.
Args:
self: The class self parameter.
oldposition: The old position of the element
newposition: The new position of the element
Returns:
No return value, changes self. Function may raise RdmlError if required.
"""
pos = _get_tag_pos(self._node, "xRef", self.xmlkeys(), newposition)
ele = _get_first_child_by_pos_or_id(self._node, "xRef", None, oldposition)
self._node.insert(pos, ele)
def delete_xref(self, byposition):
"""Deletes an experimenter element.
Args:
self: The class self parameter.
byposition: Select the element by position in the list.
Returns:
Nothing, changes self.
"""
elem = _get_first_child_by_pos_or_id(self._node, "xRef", None, byposition)
self._node.remove(elem)
def documentation_ids(self):
"""Returns a list of the keys in the xml file.
Args:
self: The class self parameter.
Returns:
A list of the key strings.
"""
return _get_all_children_id(self._node, "documentation")
def update_documentation_ids(self, ids):
"""Returns a json of the RDML object without fluorescence data.
Args:
self: The class self parameter.
ids: A dictionary with id and true/false pairs
Returns:
True if a change was made, else false. Function may raise RdmlError if required.
"""
old = self.documentation_ids()
good_ids = _value_to_booldic(ids)
mod = False
for id, inc in good_ids.items():
if inc is True:
if id not in old:
new_node = _create_new_element(self._node, "documentation", id)
place = _get_tag_pos(self._node, "documentation", self.xmlkeys(), 999999999)
self._node.insert(place, new_node)
mod = True
else:
if id in old:
elem = _get_first_child_by_pos_or_id(self._node, "documentation", id, None)
self._node.remove(elem)
mod = True
return mod
def move_documentation(self, oldposition, newposition):
"""Moves the element to the new position in the list.
Args:
self: The class self parameter.
oldposition: The old position of the element
newposition: The new position of the element
Returns:
No return value, changes self. Function may raise RdmlError if required.
"""
pos = _get_tag_pos(self._node, "documentation", self.xmlkeys(), newposition)
ele = _get_first_child_by_pos_or_id(self._node, "documentation", None, oldposition)
self._node.insert(pos, ele)
def tojson(self):
"""Returns a json of the RDML object without fluorescence data.
Args:
self: The class self parameter.
Returns:
A json of the data.
"""
data = {
"id": self._node.get('id'),
}
_add_first_child_to_dic(self._node, data, True, "description")
data["documentations"] = self.documentation_ids()
data["xRefs"] = self.xrefs()
_add_first_child_to_dic(self._node, data, False, "type")
_add_first_child_to_dic(self._node, data, True, "amplificationEfficiencyMethod")
_add_first_child_to_dic(self._node, data, True, "amplificationEfficiency")
_add_first_child_to_dic(self._node, data, True, "amplificationEfficiencySE")
_add_first_child_to_dic(self._node, data, True, "meltingTemperature")
_add_first_child_to_dic(self._node, data, True, "detectionLimit")
forId = _get_first_child(self._node, "dyeId")
if forId is not None:
if forId.attrib['id'] != "":
data["dyeId"] = forId.attrib['id']
elem = _get_first_child(self._node, "sequences")
if elem is not None:
qdic = {}
sec = _get_first_child(elem, "forwardPrimer")
if sec is not None:
sdic = {}
_add_first_child_to_dic(sec, sdic, True, "threePrimeTag")
_add_first_child_to_dic(sec, sdic, True, "fivePrimeTag")
_add_first_child_to_dic(sec, sdic, True, "sequence")
if len(sdic.keys()) != 0:
qdic["forwardPrimer"] = sdic
sec = _get_first_child(elem, "reversePrimer")
if sec is not None:
sdic = {}
_add_first_child_to_dic(sec, sdic, True, "threePrimeTag")
_add_first_child_to_dic(sec, sdic, True, "fivePrimeTag")
_add_first_child_to_dic(sec, sdic, True, "sequence")
if len(sdic.keys()) != 0:
qdic["reversePrimer"] = sdic
sec = _get_first_child(elem, "probe1")
if sec is not None:
sdic = {}
_add_first_child_to_dic(sec, sdic, True, "threePrimeTag")
_add_first_child_to_dic(sec, sdic, True, "fivePrimeTag")
_add_first_child_to_dic(sec, sdic, True, "sequence")
if len(sdic.keys()) != 0:
qdic["probe1"] = sdic
sec = _get_first_child(elem, "probe2")
if sec is not None:
sdic = {}
_add_first_child_to_dic(sec, sdic, True, "threePrimeTag")
_add_first_child_to_dic(sec, sdic, True, "fivePrimeTag")
_add_first_child_to_dic(sec, sdic, True, "sequence")
if len(sdic.keys()) != 0:
qdic["probe2"] = sdic
sec = _get_first_child(elem, "amplicon")
if sec is not None:
sdic = {}
_add_first_child_to_dic(sec, sdic, True, "threePrimeTag")
_add_first_child_to_dic(sec, sdic, True, "fivePrimeTag")
_add_first_child_to_dic(sec, sdic, True, "sequence")
if len(sdic.keys()) != 0:
qdic["amplicon"] = sdic
if len(qdic.keys()) != 0:
data["sequences"] = qdic
elem = _get_first_child(self._node, "commercialAssay")
if elem is not None:
qdic = {}
_add_first_child_to_dic(elem, qdic, True, "company")
_add_first_child_to_dic(elem, qdic, True, "orderNumber")
if len(qdic.keys()) != 0:
data["commercialAssay"] = qdic
return data
class Therm_cyc_cons:
"""RDML-Python library
The thermalCyclingConditions element used to read and edit one thermal Cycling Conditions.
Attributes:
_node: The thermalCyclingConditions node of the RDML XML object.
"""
def __init__(self, node):
"""Inits an thermalCyclingConditions instance.
Args:
self: The class self parameter.
node: The thermalCyclingConditions node.
Returns:
No return value. Function may raise RdmlError if required.
"""
self._node = node
def __getitem__(self, key):
"""Returns the value for the key.
Args:
self: The class self parameter.
key: The key of the thermalCyclingConditions subelement
Returns:
A string of the data or None.
"""
if key == "id":
return self._node.get('id')
if key in ["description", "lidTemperature"]:
var = _get_first_child_text(self._node, key)
if var == "":
return None
else:
return var
raise KeyError
def __setitem__(self, key, value):
"""Changes the value for the key.
Args:
self: The class self parameter.
key: The key of the thermalCyclingConditions subelement
value: The new value for the key
Returns:
No return value, changes self. Function may raise RdmlError if required.
"""
if key == "id":
self.change_id(value, merge_with_id=False)
return
if key == "description":
return _change_subelement(self._node, key, self.xmlkeys(), value, True, "string")
if key == "lidTemperature":
return _change_subelement(self._node, key, self.xmlkeys(), value, True, "float")
raise KeyError
def change_id(self, value, merge_with_id=False):
"""Changes the value for the id.
Args:
self: The class self parameter.
value: The new value for the id.
merge_with_id: If True only allow a unique id, if False only rename its uses with existing id.
Returns:
No return value, changes self. Function may raise RdmlError if required.
"""
oldValue = self._node.get('id')
if oldValue != value:
par = self._node.getparent()
if not _string_to_bool(merge_with_id, triple=False):
_change_subelement(self._node, "id", self.xmlkeys(), value, False, "string")
else:
groupTag = self._node.tag.replace("{http://www.rdml.org}", "")
if _check_unique_id(par, groupTag, value):
raise RdmlError('The ' + groupTag + ' id "' + value + '" does not exist.')
allSam = _get_all_children(par, "sample")
for node in allSam:
subNode = _get_first_child(node, "cdnaSynthesisMethod")
if subNode is not None:
forId = _get_first_child(subNode, "thermalCyclingConditions")
if forId is not None:
if forId.attrib['id'] == oldValue:
forId.attrib['id'] = value
allExp = _get_all_children(par, "experiment")
for node in allExp:
subNodes = _get_all_children(node, "run")
for subNode in subNodes:
forId = _get_first_child(subNode, "thermalCyclingConditions")
if forId is not None:
if forId.attrib['id'] == oldValue:
forId.attrib['id'] = value
return
def keys(self):
"""Returns a list of the keys.
Args:
self: The class self parameter.
Returns:
A list of the key strings.
"""
return ["id", "description", "lidTemperature"]
def xmlkeys(self):
"""Returns a list of the keys in the xml file.
Args:
self: The class self parameter.
Returns:
A list of the key strings.
"""
return ["description", "documentation", "lidTemperature", "experimenter", "step"]
def documentation_ids(self):
"""Returns a list of the keys in the xml file.
Args:
self: The class self parameter.
Returns:
A list of the key strings.
"""
return _get_all_children_id(self._node, "documentation")
def update_documentation_ids(self, ids):
"""Returns a json of the RDML object without fluorescence data.
Args:
self: The class self parameter.
ids: A dictionary with id and true/false pairs
Returns:
True if a change was made, else false. Function may raise RdmlError if required.
"""
old = self.documentation_ids()
good_ids = _value_to_booldic(ids)
mod = False
for id, inc in good_ids.items():
if inc is True:
if id not in old:
new_node = _create_new_element(self._node, "documentation", id)
place = _get_tag_pos(self._node, "documentation", self.xmlkeys(), 999999999)
self._node.insert(place, new_node)
mod = True
else:
if id in old:
elem = _get_first_child_by_pos_or_id(self._node, "documentation", id, None)
self._node.remove(elem)
mod = True
return mod
def move_documentation(self, oldposition, newposition):
"""Moves the element to the new position in the list.
Args:
self: The class self parameter.
oldposition: The old position of the element
newposition: The new position of the element
Returns:
No return value, changes self. Function may raise RdmlError if required.
"""
pos = _get_tag_pos(self._node, "documentation", self.xmlkeys(), newposition)
ele = _get_first_child_by_pos_or_id(self._node, "documentation", None, oldposition)
self._node.insert(pos, ele)
def experimenter_ids(self):
"""Returns a list of the keys in the xml file.
Args:
self: The class self parameter.
Returns:
A list of the key strings.
"""
return _get_all_children_id(self._node, "experimenter")
def update_experimenter_ids(self, ids):
"""Returns a json of the RDML object without fluorescence data.
Args:
self: The class self parameter.
ids: A dictionary with id and true/false pairs
Returns:
True if a change was made, else false. Function may raise RdmlError if required.
"""
old = self.experimenter_ids()
good_ids = _value_to_booldic(ids)
mod = False
for id, inc in good_ids.items():
if inc is True:
if id not in old:
new_node = _create_new_element(self._node, "experimenter", id)
place = _get_tag_pos(self._node, "experimenter", self.xmlkeys(), 999999999)
self._node.insert(place, new_node)
mod = True
else:
if id in old:
elem = _get_first_child_by_pos_or_id(self._node, "experimenter", id, None)
self._node.remove(elem)
mod = True
return mod
def move_experimenter(self, oldposition, newposition):
"""Moves the element to the new position in the list.
Args:
self: The class self parameter.
oldposition: The old position of the element
newposition: The new position of the element
Returns:
No return value, changes self. Function may raise RdmlError if required.
"""
pos = _get_tag_pos(self._node, "experimenter", self.xmlkeys(), newposition)
ele = _get_first_child_by_pos_or_id(self._node, "experimenter", None, oldposition)
self._node.insert(pos, ele)
def steps(self):
"""Returns a list of all step elements.
Args:
self: The class self parameter.
Returns:
A list of all step elements.
"""
# The steps are sorted transiently to not modify the file in a read situation
exp = _get_all_children(self._node, "step")
srt_exp = sorted(exp, key=_get_step_sort_nr)
ret = []
for node in srt_exp:
ret.append(Step(node))
return ret
def new_step_temperature(self, temperature, duration,
temperatureChange=None, durationChange=None,
measure=None, ramp=None, nr=None):
"""Creates a new step element.
Args:
self: The class self parameter.
temperature: The temperature of the step in degrees Celsius (required)
duration: The duration of this step in seconds (required)
temperatureChange: The change of the temperature from one cycle to the next (optional)
durationChange: The change of the duration from one cycle to the next (optional)
measure: Indicates to make a measurement and store it as meltcurve or real-time data (optional)
ramp: Limit temperature change from one step to the next in degrees Celsius per second (optional)
nr: Step unique nr (optional)
Returns:
Nothing, changes self.
"""
if measure is not None and measure not in ["", "real time", "meltcurve"]:
raise RdmlError('Unknown or unsupported step measure value: "' + measure + '".')
nr = int(nr)
count = _get_number_of_children(self._node, "step")
new_node = et.Element("step")
xml_temp_step = ["temperature", "duration", "temperatureChange", "durationChange", "measure", "ramp"]
_add_new_subelement(new_node, "step", "nr", str(count + 1), False)
subel = et.SubElement(new_node, "temperature")
_change_subelement(subel, "temperature", xml_temp_step, temperature, False, "float")
_change_subelement(subel, "duration", xml_temp_step, duration, False, "posint")
_change_subelement(subel, "temperatureChange", xml_temp_step, temperatureChange, True, "float")
_change_subelement(subel, "durationChange", xml_temp_step, durationChange, True, "int")
_change_subelement(subel, "measure", xml_temp_step, measure, True, "string")
_change_subelement(subel, "ramp", xml_temp_step, ramp, True, "float")
place = _get_first_tag_pos(self._node, "step", self.xmlkeys()) + count
self._node.insert(place, new_node)
# Now move step at final position
self.move_step(count + 1, nr)
def new_step_gradient(self, highTemperature, lowTemperature, duration,
temperatureChange=None, durationChange=None,
measure=None, ramp=None, nr=None):
"""Creates a new step element.
Args:
self: The class self parameter.
highTemperature: The high gradient temperature of the step in degrees Celsius (required)
lowTemperature: The low gradient temperature of the step in degrees Celsius (required)
duration: The duration of this step in seconds (required)
temperatureChange: The change of the temperature from one cycle to the next (optional)
durationChange: The change of the duration from one cycle to the next (optional)
measure: Indicates to make a measurement and store it as meltcurve or real-time data (optional)
ramp: Limit temperature change from one step to the next in degrees Celsius per second (optional)
nr: Step unique nr (optional)
Returns:
Nothing, changes self.
"""
if measure is not None and measure not in ["", "real time", "meltcurve"]:
raise RdmlError('Unknown or unsupported step measure value: "' + measure + '".')
nr = int(nr)
count = _get_number_of_children(self._node, "step")
new_node = et.Element("step")
xml_temp_step = ["highTemperature", "lowTemperature", "duration", "temperatureChange",
"durationChange", "measure", "ramp"]
_add_new_subelement(new_node, "step", "nr", str(count + 1), False)
subel = et.SubElement(new_node, "gradient")
_change_subelement(subel, "highTemperature", xml_temp_step, highTemperature, False, "float")
_change_subelement(subel, "lowTemperature", xml_temp_step, lowTemperature, False, "float")
_change_subelement(subel, "duration", xml_temp_step, duration, False, "posint")
_change_subelement(subel, "temperatureChange", xml_temp_step, temperatureChange, True, "float")
_change_subelement(subel, "durationChange", xml_temp_step, durationChange, True, "int")
_change_subelement(subel, "measure", xml_temp_step, measure, True, "string")
_change_subelement(subel, "ramp", xml_temp_step, ramp, True, "float")
place = _get_first_tag_pos(self._node, "step", self.xmlkeys()) + count
self._node.insert(place, new_node)
# Now move step at final position
self.move_step(count + 1, nr)
def new_step_loop(self, goto, repeat, nr=None):
"""Creates a new step element.
Args:
self: The class self parameter.
goto: The step nr to go back to (required)
repeat: The number of times to go back to goto step, one less than cycles (optional)
nr: Step unique nr (optional)
Returns:
Nothing, changes self.
"""
nr = int(nr)
count = _get_number_of_children(self._node, "step")
new_node = et.Element("step")
xml_temp_step = ["goto", "repeat"]
_add_new_subelement(new_node, "step", "nr", str(count + 1), False)
subel = et.SubElement(new_node, "loop")
_change_subelement(subel, "goto", xml_temp_step, goto, False, "posint")
_change_subelement(subel, "repeat", xml_temp_step, repeat, False, "posint")
place = _get_first_tag_pos(self._node, "step", self.xmlkeys()) + count
self._node.insert(place, new_node)
# Now move step at final position
self.move_step(count + 1, nr)
def new_step_pause(self, temperature, nr=None):
"""Creates a new step element.
Args:
self: The class self parameter.
temperature: The temperature of the step in degrees Celsius (required)
nr: Step unique nr (optional)
Returns:
Nothing, changes self.
"""
nr = int(nr)
count = _get_number_of_children(self._node, "step")
new_node = et.Element("step")
xml_temp_step = ["temperature"]
_add_new_subelement(new_node, "step", "nr", str(count + 1), False)
subel = et.SubElement(new_node, "pause")
_change_subelement(subel, "temperature", xml_temp_step, temperature, False, "float")
place = _get_first_tag_pos(self._node, "step", self.xmlkeys()) + count
self._node.insert(place, new_node)
# Now move step at final position
self.move_step(count + 1, nr)
def new_step_lidOpen(self, nr=None):
"""Creates a new step element.
Args:
self: The class self parameter.
nr: Step unique nr (optional)
Returns:
Nothing, changes self.
"""
nr = int(nr)
count = _get_number_of_children(self._node, "step")
new_node = et.Element("step")
_add_new_subelement(new_node, "step", "nr", str(count + 1), False)
et.SubElement(new_node, "lidOpen")
place = _get_first_tag_pos(self._node, "step", self.xmlkeys()) + count
self._node.insert(place, new_node)
# Now move step at final position
self.move_step(count + 1, nr)
def cleanup_steps(self):
"""The steps may not be in a order that makes sense. This function fixes it.
Args:
self: The class self parameter.
Returns:
No return value, changes self. Function may raise RdmlError if required.
"""
# The steps in the xml may be not sorted by "nr", so sort first
exp = _get_all_children(self._node, "step")
srt_exp = sorted(exp, key=_get_step_sort_nr)
i = 0
for node in srt_exp:
if _get_step_sort_nr(node) != _get_step_sort_nr(exp[i]):
pos = _get_first_tag_pos(self._node, "step", self.xmlkeys()) + i
self._node.insert(pos, node)
i += 1
# The steps in the xml may not have the correct numbering, so fix it
exp = _get_all_children(self._node, "step")
i = 1
for node in exp:
if _get_step_sort_nr(node) != i:
elem = _get_first_child(node, "nr")
elem.text = str(i)
i += 1
def move_step(self, oldnr, newnr):
"""Moves the element to the new position in the list.
Args:
self: The class self parameter.
oldnr: The old position of the element
newnr: The new position of the element
Returns:
No return value, changes self. Function may raise RdmlError if required.
"""
# The steps in the xml may be not sorted well, so fix it
self.cleanup_steps()
# Change the nr
_move_subelement_pos(self._node, "step", oldnr - 1, self.xmlkeys(), newnr - 1)
# Fix the nr
exp = _get_all_children(self._node, "step")
i = 1
goto_mod = 0
goto_start = newnr
goto_end = oldnr
if oldnr > newnr:
goto_mod = 1
if oldnr < newnr:
goto_mod = -1
goto_start = oldnr
goto_end = newnr
for node in exp:
if _get_step_sort_nr(node) != i:
elem = _get_first_child(node, "nr")
elem.text = str(i)
# Fix the goto steps
ele_type = _get_first_child(node, "loop")
if ele_type is not None:
ele_goto = _get_first_child(ele_type, "goto")
if ele_goto is not None:
jump_to = int(ele_goto.text)
if goto_start <= jump_to < goto_end:
ele_goto.text = str(jump_to + goto_mod)
i += 1
def get_step(self, bystep):
"""Returns an sample element by position or id.
Args:
self: The class self parameter.
bystep: Select the element by step nr in the list.
Returns:
The found element or None.
"""
return Step(_get_first_child_by_pos_or_id(self._node, "step", None, bystep - 1))
def delete_step(self, bystep=None):
"""Deletes an step element.
Args:
self: The class self parameter.
bystep: Select the element by step nr in the list.
Returns:
Nothing, changes self.
"""
elem = _get_first_child_by_pos_or_id(self._node, "step", None, bystep - 1)
self._node.remove(elem)
self.cleanup_steps()
# Fix the goto steps
exp = _get_all_children(self._node, "step")
for node in exp:
ele_type = _get_first_child(node, "loop")
if ele_type is not None:
ele_goto = _get_first_child(ele_type, "goto")
if ele_goto is not None:
jump_to = int(ele_goto.text)
if bystep < jump_to:
ele_goto.text = str(jump_to - 1)
def tojson(self):
"""Returns a json of the RDML object without fluorescence data.
Args:
self: The class self parameter.
Returns:
A json of the data.
"""
allSteps = self.steps()
steps = []
for exp in allSteps:
steps.append(exp.tojson())
data = {
"id": self._node.get('id'),
}
_add_first_child_to_dic(self._node, data, True, "description")
data["documentations"] = self.documentation_ids()
_add_first_child_to_dic(self._node, data, True, "lidTemperature")
data["experimenters"] = self.experimenter_ids()
data["steps"] = steps
return data
class Step:
"""RDML-Python library
The samples element used to read and edit one sample.
Attributes:
_node: The sample node of the RDML XML object.
"""
def __init__(self, node):
"""Inits an sample instance.
Args:
self: The class self parameter.
node: The sample node.
Returns:
No return value. Function may raise RdmlError if required.
"""
self._node = node
def __getitem__(self, key):
"""Returns the value for the key.
Args:
self: The class self parameter.
key: The key of the sample subelement. Be aware that change of type deletes all entries
except nr and description
Returns:
A string of the data or None.
"""
if key == "nr":
return _get_first_child_text(self._node, key)
if key == "description":
var = _get_first_child_text(self._node, key)
if var == "":
return None
else:
return var
ele_type = _get_first_child(self._node, "temperature")
if ele_type is not None:
if key == "type":
return "temperature"
if key in ["temperature", "duration"]:
return _get_first_child_text(ele_type, key)
if key in ["temperatureChange", "durationChange", "measure", "ramp"]:
var = _get_first_child_text(ele_type, key)
if var == "":
return None
else:
return var
ele_type = _get_first_child(self._node, "gradient")
if ele_type is not None:
if key == "type":
return "gradient"
if key in ["highTemperature", "lowTemperature", "duration"]:
return _get_first_child_text(ele_type, key)
if key in ["temperatureChange", "durationChange", "measure", "ramp"]:
var = _get_first_child_text(ele_type, key)
if var == "":
return None
else:
return var
ele_type = _get_first_child(self._node, "loop")
if ele_type is not None:
if key == "type":
return "loop"
if key in ["goto", "repeat"]:
return _get_first_child_text(ele_type, key)
ele_type = _get_first_child(self._node, "pause")
if ele_type is not None:
if key == "type":
return "pause"
if key == "temperature":
return _get_first_child_text(ele_type, key)
ele_type = _get_first_child(self._node, "lidOpen")
if ele_type is not None:
if key == "type":
return "lidOpen"
raise KeyError
def __setitem__(self, key, value):
"""Changes the value for the key.
Args:
self: The class self parameter.
key: The key of the sample subelement
value: The new value for the key
Returns:
No return value, changes self. Function may raise RdmlError if required.
"""
if key in ["nr", "type"]:
raise RdmlError('"' + key + '" can not be set. Use thermal cycling conditions methods instead')
if key == "description":
return _change_subelement(self._node, key, self.xmlkeys(), value, True, "string")
ele_type = _get_first_child(self._node, "temperature")
if ele_type is not None:
xml_temp_step = ["temperature", "duration", "temperatureChange", "durationChange", "measure", "ramp"]
if key == "temperature":
return _change_subelement(ele_type, key, xml_temp_step, value, False, "float")
if key == "duration":
return _change_subelement(ele_type, key, xml_temp_step, value, False, "posint")
if key in ["temperatureChange", "ramp"]:
return _change_subelement(ele_type, key, xml_temp_step, value, True, "float")
if key == "durationChange":
return _change_subelement(ele_type, key, xml_temp_step, value, True, "int")
if key == "measure":
if value not in ["", "real time", "meltcurve"]:
raise RdmlError('Unknown or unsupported step measure value: "' + value + '".')
return _change_subelement(ele_type, key, xml_temp_step, value, True, "string")
ele_type = _get_first_child(self._node, "gradient")
if ele_type is not None:
xml_temp_step = ["highTemperature", "lowTemperature", "duration", "temperatureChange",
"durationChange", "measure", "ramp"]
if key in ["highTemperature", "lowTemperature"]:
return _change_subelement(ele_type, key, xml_temp_step, value, False, "float")
if key == "duration":
return _change_subelement(ele_type, key, xml_temp_step, value, False, "posint")
if key in ["temperatureChange", "ramp"]:
return _change_subelement(ele_type, key, xml_temp_step, value, True, "float")
if key == "durationChange":
return _change_subelement(ele_type, key, xml_temp_step, value, True, "int")
if key == "measure":
if value not in ["", "real time", "meltcurve"]:
raise RdmlError('Unknown or unsupported step measure value: "' + value + '".')
return _change_subelement(ele_type, key, xml_temp_step, value, True, "string")
ele_type = _get_first_child(self._node, "loop")
if ele_type is not None:
xml_temp_step = ["goto", "repeat"]
if key in xml_temp_step:
return _change_subelement(ele_type, key, xml_temp_step, value, False, "posint")
ele_type = _get_first_child(self._node, "pause")
if ele_type is not None:
xml_temp_step = ["temperature"]
if key == "temperature":
return _change_subelement(ele_type, key, xml_temp_step, value, False, "float")
raise KeyError
def keys(self):
"""Returns a list of the keys.
Args:
self: The class self parameter.
Returns:
A list of the key strings.
"""
ele_type = _get_first_child(self._node, "temperature")
if ele_type is not None:
return ["nr", "type", "description", "temperature", "duration", "temperatureChange",
"durationChange", "measure", "ramp"]
ele_type = _get_first_child(self._node, "gradient")
if ele_type is not None:
return ["nr", "type", "description", "highTemperature", "lowTemperature", "duration",
"temperatureChange", "durationChange", "measure", "ramp"]
ele_type = _get_first_child(self._node, "loop")
if ele_type is not None:
return ["nr", "type", "description", "goto", "repeat"]
ele_type = _get_first_child(self._node, "pause")
if ele_type is not None:
return ["nr", "type", "description", "temperature"]
ele_type = _get_first_child(self._node, "lidOpen")
if ele_type is not None:
return ["nr", "type", "description"]
return []
def xmlkeys(self):
"""Returns a list of the keys in the xml file.
Args:
self: The class self parameter.
Returns:
A list of the key strings.
"""
ele_type = _get_first_child(self._node, "temperature")
if ele_type is not None:
return ["temperature", "duration", "temperatureChange", "durationChange", "measure", "ramp"]
ele_type = _get_first_child(self._node, "gradient")
if ele_type is not None:
return ["highTemperature", "lowTemperature", "duration", "temperatureChange",
"durationChange", "measure", "ramp"]
ele_type = _get_first_child(self._node, "loop")
if ele_type is not None:
return ["goto", "repeat"]
ele_type = _get_first_child(self._node, "pause")
if ele_type is not None:
return ["temperature"]
ele_type = _get_first_child(self._node, "lidOpen")
if ele_type is not None:
return []
return []
def tojson(self):
"""Returns a json of the RDML object without fluorescence data.
Args:
self: The class self parameter.
Returns:
A json of the data.
"""
data = {}
_add_first_child_to_dic(self._node, data, False, "nr")
_add_first_child_to_dic(self._node, data, True, "description")
elem = _get_first_child(self._node, "temperature")
if elem is not None:
qdic = {}
_add_first_child_to_dic(elem, qdic, False, "temperature")
_add_first_child_to_dic(elem, qdic, False, "duration")
_add_first_child_to_dic(elem, qdic, True, "temperatureChange")
_add_first_child_to_dic(elem, qdic, True, "durationChange")
_add_first_child_to_dic(elem, qdic, True, "measure")
_add_first_child_to_dic(elem, qdic, True, "ramp")
data["temperature"] = qdic
elem = _get_first_child(self._node, "gradient")
if elem is not None:
qdic = {}
_add_first_child_to_dic(elem, qdic, False, "highTemperature")
_add_first_child_to_dic(elem, qdic, False, "lowTemperature")
_add_first_child_to_dic(elem, qdic, False, "duration")
_add_first_child_to_dic(elem, qdic, True, "temperatureChange")
_add_first_child_to_dic(elem, qdic, True, "durationChange")
_add_first_child_to_dic(elem, qdic, True, "measure")
_add_first_child_to_dic(elem, qdic, True, "ramp")
data["gradient"] = qdic
elem = _get_first_child(self._node, "loop")
if elem is not None:
qdic = {}
_add_first_child_to_dic(elem, qdic, False, "goto")
_add_first_child_to_dic(elem, qdic, False, "repeat")
data["loop"] = qdic
elem = _get_first_child(self._node, "pause")
if elem is not None:
qdic = {}
_add_first_child_to_dic(elem, qdic, False, "temperature")
data["pause"] = qdic
elem = _get_first_child(self._node, "lidOpen")
if elem is not None:
data["lidOpen"] = "lidOpen"
return data
class Experiment:
"""RDML-Python library
The target element used to read and edit one experiment.
Attributes:
_node: The target node of the RDML XML object.
_rdmlFilename: The RDML filename
"""
def __init__(self, node, rdmlFilename):
"""Inits an experiment instance.
Args:
self: The class self parameter.
node: The experiment node.
rdmlFilename: The RDML filename.
Returns:
No return value. Function may raise RdmlError if required.
"""
self._node = node
self._rdmlFilename = rdmlFilename
def __getitem__(self, key):
"""Returns the value for the key.
Args:
self: The class self parameter.
key: The key of the experiment subelement
Returns:
A string of the data or None.
"""
if key == "id":
return self._node.get('id')
if key == "description":
var = _get_first_child_text(self._node, key)
if var == "":
return None
else:
return var
raise KeyError
def __setitem__(self, key, value):
"""Changes the value for the key.
Args:
self: The class self parameter.
key: The key of the target subelement
value: The new value for the key
Returns:
No return value, changes self. Function may raise RdmlError if required.
"""
if key == "id":
return _change_subelement(self._node, key, self.xmlkeys(), value, False, "string")
if key == "description":
return _change_subelement(self._node, key, self.xmlkeys(), value, True, "string")
raise KeyError
def keys(self):
"""Returns a list of the keys.
Args:
self: The class self parameter.
Returns:
A list of the key strings.
"""
return ["id", "description"]
def xmlkeys(self):
"""Returns a list of the keys in the xml file.
Args:
self: The class self parameter.
Returns:
A list of the key strings.
"""
return ["description", "documentation", "run"]
def documentation_ids(self):
"""Returns a list of the keys in the xml file.
Args:
self: The class self parameter.
Returns:
A list of the key strings.
"""
return _get_all_children_id(self._node, "documentation")
def update_documentation_ids(self, ids):
"""Returns a json of the RDML object without fluorescence data.
Args:
self: The class self parameter.
ids: A dictionary with id and true/false pairs
Returns:
True if a change was made, else false. Function may raise RdmlError if required.
"""
old = self.documentation_ids()
good_ids = _value_to_booldic(ids)
mod = False
for id, inc in good_ids.items():
if inc is True:
if id not in old:
new_node = _create_new_element(self._node, "documentation", id)
place = _get_tag_pos(self._node, "documentation", self.xmlkeys(), 999999999)
self._node.insert(place, new_node)
mod = True
else:
if id in old:
elem = _get_first_child_by_pos_or_id(self._node, "documentation", id, None)
self._node.remove(elem)
mod = True
return mod
def move_documentation(self, oldposition, newposition):
"""Moves the element to the new position in the list.
Args:
self: The class self parameter.
oldposition: The old position of the element
newposition: The new position of the element
Returns:
No return value, changes self. Function may raise RdmlError if required.
"""
pos = _get_tag_pos(self._node, "documentation", self.xmlkeys(), newposition)
ele = _get_first_child_by_pos_or_id(self._node, "documentation", None, oldposition)
self._node.insert(pos, ele)
def runs(self):
"""Returns a list of all run elements.
Args:
self: The class self parameter.
Returns:
A list of all run elements.
"""
exp = _get_all_children(self._node, "run")
ret = []
for node in exp:
ret.append(Run(node, self._rdmlFilename))
return ret
def new_run(self, id, newposition=None):
"""Creates a new run element.
Args:
self: The class self parameter.
id: Run unique id (required)
newposition: Run position in the list of experiments (optional)
Returns:
Nothing, changes self.
"""
new_node = _create_new_element(self._node, "run", id)
place = _get_tag_pos(self._node, "run", self.xmlkeys(), newposition)
self._node.insert(place, new_node)
def move_run(self, id, newposition):
"""Moves the element to the new position in the list.
Args:
self: The class self parameter.
id: Run unique id
newposition: The new position of the element
Returns:
No return value, changes self. Function may raise RdmlError if required.
"""
_move_subelement(self._node, "run", id, self.xmlkeys(), newposition)
def get_run(self, byid=None, byposition=None):
"""Returns an run element by position or id.
Args:
self: The class self parameter.
byid: Select the element by the element id.
byposition: Select the element by position in the list.
Returns:
The found element or None.
"""
return Run(_get_first_child_by_pos_or_id(self._node, "run", byid, byposition), self._rdmlFilename)
def delete_run(self, byid=None, byposition=None):
"""Deletes an run element.
Args:
self: The class self parameter.
byid: Select the element by the element id.
byposition: Select the element by position in the list.
Returns:
Nothing, changes self.
"""
elem = _get_first_child_by_pos_or_id(self._node, "run", byid, byposition)
# Delete in Table files
fileList = []
exp = _get_all_children(elem, "react")
for node in exp:
partit = _get_first_child(node, "partitions")
if partit is not None:
finalFileName = "partitions/" + _get_first_child_text(partit, "endPtTable")
if finalFileName != "partitions/":
fileList.append(finalFileName)
if len(fileList) > 0:
if self._rdmlFilename is not None and self._rdmlFilename != "":
if zipfile.is_zipfile(self._rdmlFilename):
with zipfile.ZipFile(self._rdmlFilename, 'r') as RDMLin:
tempFolder, tempName = tempfile.mkstemp(dir=os.path.dirname(self._rdmlFilename))
os.close(tempFolder)
with zipfile.ZipFile(tempName, mode='w', compression=zipfile.ZIP_DEFLATED) as RDMLout:
RDMLout.comment = RDMLin.comment
for item in RDMLin.infolist():
if item.filename not in fileList:
RDMLout.writestr(item, RDMLin.read(item.filename))
os.remove(self._rdmlFilename)
os.rename(tempName, self._rdmlFilename)
# Delete the node
self._node.remove(elem)
def tojson(self):
"""Returns a json of the RDML object without fluorescence data.
Args:
self: The class self parameter.
Returns:
A json of the data.
"""
allRuns = self.runs()
runs = []
for exp in allRuns:
runs.append(exp.tojson())
data = {
"id": self._node.get('id'),
}
_add_first_child_to_dic(self._node, data, True, "description")
data["documentations"] = self.documentation_ids()
data["runs"] = runs
return data
class Run:
"""RDML-Python library
The run element used to read and edit one run.
Attributes:
_node: The run node of the RDML XML object.
_rdmlFilename: The RDML filename.
"""
def __init__(self, node, rdmlFilename):
"""Inits an run instance.
Args:
self: The class self parameter.
node: The sample node.
rdmlFilename: The RDML filename.
Returns:
No return value. Function may raise RdmlError if required.
"""
self._node = node
self._rdmlFilename = rdmlFilename
def __getitem__(self, key):
"""Returns the value for the key.
Args:
self: The class self parameter.
key: The key of the run subelement
Returns:
A string of the data or None.
"""
if key == "id":
return self._node.get('id')
if key in ["description", "instrument", "backgroundDeterminationMethod", "cqDetectionMethod", "runDate"]:
var = _get_first_child_text(self._node, key)
if var == "":
return None
else:
return var
if key == "thermalCyclingConditions":
forId = _get_first_child(self._node, "thermalCyclingConditions")
if forId is not None:
return forId.attrib['id']
else:
return None
if key in ["dataCollectionSoftware_name", "dataCollectionSoftware_version"]:
ele = _get_first_child(self._node, "dataCollectionSoftware")
if ele is None:
return None
if key == "dataCollectionSoftware_name":
return _get_first_child_text(ele, "name")
if key == "dataCollectionSoftware_version":
return _get_first_child_text(ele, "version")
raise RdmlError('Run dataCollectionSoftware programming read error.')
if key in ["pcrFormat_rows", "pcrFormat_columns", "pcrFormat_rowLabel", "pcrFormat_columnLabel"]:
ele = _get_first_child(self._node, "pcrFormat")
if ele is None:
return None
if key == "pcrFormat_rows":
return _get_first_child_text(ele, "rows")
if key == "pcrFormat_columns":
return _get_first_child_text(ele, "columns")
if key == "pcrFormat_rowLabel":
return _get_first_child_text(ele, "rowLabel")
if key == "pcrFormat_columnLabel":
return _get_first_child_text(ele, "columnLabel")
raise RdmlError('Run pcrFormat programming read error.')
raise KeyError
def __setitem__(self, key, value):
"""Changes the value for the key.
Args:
self: The class self parameter.
key: The key of the run subelement
value: The new value for the key
Returns:
No return value, changes self. Function may raise RdmlError if required.
"""
if key == "cqDetectionMethod":
if value not in ["", "automated threshold and baseline settings", "manual threshold and baseline settings",
"second derivative maximum", "other"]:
raise RdmlError('Unknown or unsupported run cqDetectionMethod value "' + value + '".')
if key in ["pcrFormat_rowLabel", "pcrFormat_columnLabel"]:
if value not in ["ABC", "123", "A1a1"]:
raise RdmlError('Unknown or unsupported run ' + key + ' value "' + value + '".')
if key == "id":
return _change_subelement(self._node, key, self.xmlkeys(), value, False, "string")
if key in ["description", "instrument", "backgroundDeterminationMethod", "cqDetectionMethod", "runDate"]:
return _change_subelement(self._node, key, self.xmlkeys(), value, True, "string")
if key == "thermalCyclingConditions":
forId = _get_or_create_subelement(self._node, "thermalCyclingConditions", self.xmlkeys())
if value is not None and value != "":
# We do not check that ID is valid to allow recreate_lost_ids()
forId.attrib['id'] = value
else:
self._node.remove(forId)
return
if key in ["dataCollectionSoftware_name", "dataCollectionSoftware_version"]:
ele = _get_or_create_subelement(self._node, "dataCollectionSoftware", self.xmlkeys())
if key == "dataCollectionSoftware_name":
_change_subelement(ele, "name", ["name", "version"], value, True, "string")
if key == "dataCollectionSoftware_version":
_change_subelement(ele, "version", ["name", "version"], value, True, "string")
_remove_irrelevant_subelement(self._node, "dataCollectionSoftware")
return
if key in ["pcrFormat_rows", "pcrFormat_columns", "pcrFormat_rowLabel", "pcrFormat_columnLabel"]:
ele = _get_or_create_subelement(self._node, "pcrFormat", self.xmlkeys())
if key == "pcrFormat_rows":
_change_subelement(ele, "rows", ["rows", "columns", "rowLabel", "columnLabel"], value, True, "string")
if key == "pcrFormat_columns":
_change_subelement(ele, "columns", ["rows", "columns", "rowLabel", "columnLabel"], value, True, "string")
if key == "pcrFormat_rowLabel":
_change_subelement(ele, "rowLabel", ["rows", "columns", "rowLabel", "columnLabel"], value, True, "string")
if key == "pcrFormat_columnLabel":
_change_subelement(ele, "columnLabel", ["rows", "columns", "rowLabel", "columnLabel"], value, True, "string")
_remove_irrelevant_subelement(self._node, "pcrFormat")
return
raise KeyError
def keys(self):
"""Returns a list of the keys.
Args:
self: The class self parameter.
Returns:
A list of the key strings.
"""
return ["id", "description", "instrument", "dataCollectionSoftware_name", "dataCollectionSoftware_version",
"backgroundDeterminationMethod", "cqDetectionMethod", "thermalCyclingConditions", "pcrFormat_rows",
"pcrFormat_columns", "pcrFormat_rowLabel", "pcrFormat_columnLabel", "runDate", "react"]
def xmlkeys(self):
"""Returns a list of the keys in the xml file.
Args:
self: The class self parameter.
Returns:
A list of the key strings.
"""
return ["description", "documentation", "experimenter", "instrument", "dataCollectionSoftware",
"backgroundDeterminationMethod", "cqDetectionMethod", "thermalCyclingConditions", "pcrFormat",
"runDate", "react"]
def documentation_ids(self):
"""Returns a list of the keys in the xml file.
Args:
self: The class self parameter.
Returns:
A list of the key strings.
"""
return _get_all_children_id(self._node, "documentation")
def update_documentation_ids(self, ids):
"""Returns a json of the RDML object without fluorescence data.
Args:
self: The class self parameter.
ids: A dictionary with id and true/false pairs
Returns:
True if a change was made, else false. Function may raise RdmlError if required.
"""
old = self.documentation_ids()
good_ids = _value_to_booldic(ids)
mod = False
for id, inc in good_ids.items():
if inc is True:
if id not in old:
new_node = _create_new_element(self._node, "documentation", id)
place = _get_tag_pos(self._node, "documentation", self.xmlkeys(), 999999999)
self._node.insert(place, new_node)
mod = True
else:
if id in old:
elem = _get_first_child_by_pos_or_id(self._node, "documentation", id, None)
self._node.remove(elem)
mod = True
return mod
def move_documentation(self, oldposition, newposition):
"""Moves the element to the new position in the list.
Args:
self: The class self parameter.
oldposition: The old position of the element
newposition: The new position of the element
Returns:
No return value, changes self. Function may raise RdmlError if required.
"""
pos = _get_tag_pos(self._node, "documentation", self.xmlkeys(), newposition)
ele = _get_first_child_by_pos_or_id(self._node, "documentation", None, oldposition)
self._node.insert(pos, ele)
def experimenter_ids(self):
"""Returns a list of the keys in the xml file.
Args:
self: The class self parameter.
Returns:
A list of the key strings.
"""
return _get_all_children_id(self._node, "experimenter")
def update_experimenter_ids(self, ids):
"""Returns a json of the RDML object without fluorescence data.
Args:
self: The class self parameter.
ids: A dictionary with id and true/false pairs
Returns:
True if a change was made, else false. Function may raise RdmlError if required.
"""
old = self.experimenter_ids()
good_ids = _value_to_booldic(ids)
mod = False
for id, inc in good_ids.items():
if inc is True:
if id not in old:
new_node = _create_new_element(self._node, "experimenter", id)
place = _get_tag_pos(self._node, "experimenter", self.xmlkeys(), 999999999)
self._node.insert(place, new_node)
mod = True
else:
if id in old:
elem = _get_first_child_by_pos_or_id(self._node, "experimenter", id, None)
self._node.remove(elem)
mod = True
return mod
def move_experimenter(self, oldposition, newposition):
"""Moves the element to the new position in the list.
Args:
self: The class self parameter.
oldposition: The old position of the element
newposition: The new position of the element
Returns:
No return value, changes self. Function may raise RdmlError if required.
"""
pos = _get_tag_pos(self._node, "experimenter", self.xmlkeys(), newposition)
ele = _get_first_child_by_pos_or_id(self._node, "experimenter", None, oldposition)
self._node.insert(pos, ele)
def tojson(self):
"""Returns a json of the RDML object without fluorescence data.
Args:
self: The class self parameter.
Returns:
A json of the data.
"""
data = {
"id": self._node.get('id'),
}
_add_first_child_to_dic(self._node, data, True, "description")
data["documentations"] = self.documentation_ids()
data["experimenters"] = self.experimenter_ids()
_add_first_child_to_dic(self._node, data, True, "instrument")
elem = _get_first_child(self._node, "dataCollectionSoftware")
if elem is not None:
qdic = {}
_add_first_child_to_dic(elem, qdic, True, "name")
_add_first_child_to_dic(elem, qdic, True, "version")
if len(qdic.keys()) != 0:
data["dataCollectionSoftware"] = qdic
_add_first_child_to_dic(self._node, data, True, "backgroundDeterminationMethod")
_add_first_child_to_dic(self._node, data, True, "cqDetectionMethod")
forId = _get_first_child(self._node, "thermalCyclingConditions")
if forId is not None:
if forId.attrib['id'] != "":
data["thermalCyclingConditions"] = forId.attrib['id']
elem = _get_first_child(self._node, "pcrFormat")
if elem is not None:
qdic = {}
_add_first_child_to_dic(elem, qdic, False, "rows")
_add_first_child_to_dic(elem, qdic, False, "columns")
_add_first_child_to_dic(elem, qdic, False, "rowLabel")
_add_first_child_to_dic(elem, qdic, False, "columnLabel")
data["pcrFormat"] = qdic
_add_first_child_to_dic(self._node, data, True, "runDate")
data["react"] = _get_number_of_children(self._node, "react")
return data
def export_table(self, dMode):
"""Returns a tab seperated table file with the react fluorescence data.
Args:
self: The class self parameter.
dMode: amp for amplification data, melt for meltcurve data
Returns:
A string with the data.
"""
samTypeLookup = {}
tarTypeLookup = {}
tarDyeLookup = {}
data = ""
# Get the information for the lookup dictionaries
pExp = self._node.getparent()
pRoot = pExp.getparent()
samples = _get_all_children(pRoot, "sample")
for sample in samples:
if sample.attrib['id'] != "":
samId = sample.attrib['id']
forType = _get_first_child_text(sample, "type")
if forType != "":
samTypeLookup[samId] = forType
targets = _get_all_children(pRoot, "target")
for target in targets:
if target.attrib['id'] != "":
tarId = target.attrib['id']
forType = _get_first_child_text(target, "type")
if forType != "":
tarTypeLookup[tarId] = forType
forId = _get_first_child(target, "dyeId")
if forId is not None:
if forId.attrib['id'] != "":
tarDyeLookup[tarId] = forId.attrib['id']
# Now create the header line
data += "Well\tSample\tSample Type\tTarget\tTarget Type\tDye\t"
reacts = _get_all_children(self._node, "react")
if len(reacts) < 1:
return ""
react_datas = _get_all_children(reacts[0], "data")
if len(react_datas) < 1:
return ""
headArr = []
if dMode == "amp":
adps = _get_all_children(react_datas[0], "adp")
for adp in adps:
headArr.append(_get_first_child_text(adp, "cyc"))
headArr = sorted(headArr, key=int)
else:
mdps = _get_all_children(react_datas[0], "mdp")
for mdp in mdps:
headArr.append(_get_first_child_text(mdp, "tmp"))
headArr = sorted(headArr, key=float, reverse=True)
for hElem in headArr:
data += hElem + "\t"
data += '\n'
# Now create the data lines
reacts = _get_all_children(self._node, "react")
wellData = []
for react in reacts:
reactId = react.get('id')
dataSample = reactId + '\t'
react_sample = "No Sample"
react_sample_type = "No Sample Type"
forId = _get_first_child(react, "sample")
if forId is not None:
if forId.attrib['id'] != "":
react_sample = forId.attrib['id']
react_sample_type = samTypeLookup[react_sample]
dataSample += react_sample + '\t' + react_sample_type
react_datas = _get_all_children(react, "data")
for react_data in react_datas:
dataLine = dataSample
react_target = "No Target"
react_target_type = "No Target Type"
react_target_dye = "No Dye"
forId = _get_first_child(react_data, "tar")
if forId is not None:
if forId.attrib['id'] != "":
react_target = forId.attrib['id']
react_target_type = tarTypeLookup[react_target]
react_target_dye = tarDyeLookup[react_target]
dataLine += "\t" + react_target + '\t' + react_target_type + '\t' + react_target_dye
fluorList = []
if dMode == "amp":
adps = _get_all_children(react_data, "adp")
for adp in adps:
cyc = _get_first_child_text(adp, "cyc")
fluor = _get_first_child_text(adp, "fluor")
fluorList.append([cyc, fluor])
fluorList = sorted(fluorList, key=_sort_list_int)
else:
mdps = _get_all_children(react_data, "mdp")
for mdp in mdps:
tmp = _get_first_child_text(mdp, "tmp")
fluor = _get_first_child_text(mdp, "fluor")
fluorList.append([tmp, fluor])
fluorList = sorted(fluorList, key=_sort_list_float)
for hElem in fluorList:
dataLine += "\t" + hElem[1]
dataLine += '\n'
wellData.append([reactId, dataLine])
wellData = sorted(wellData, key=_sort_list_int)
for hElem in wellData:
data += hElem[1]
return data
def import_table(self, rootEl, filename, dMode):
"""Imports data from a tab seperated table file with react fluorescence data.
Args:
self: The class self parameter.
rootEl: The rdml root element.
filename: The tab file to open.
dMode: amp for amplification data, melt for meltcurve data.
Returns:
A string with the modifications made.
"""
ret = ""
with open(filename, "r") as tfile:
fileContent = tfile.read()
newlineFix = fileContent.replace("\r\n", "\n")
tabLines = newlineFix.split("\n")
head = tabLines[0].split("\t")
if (head[0] != "Well" or head[1] != "Sample" or head[2] != "Sample Type" or
head[3] != "Target" or head[4] != "Target Type" or head[5] != "Dye"):
raise RdmlError('The tab-format is not valid, essential columns are missing.')
# Get the information for the lookup dictionaries
samTypeLookup = {}
tarTypeLookup = {}
dyeLookup = {}
samples = _get_all_children(rootEl._node, "sample")
for sample in samples:
if sample.attrib['id'] != "":
samId = sample.attrib['id']
forType = _get_first_child_text(sample, "type")
if forType != "":
samTypeLookup[samId] = forType
targets = _get_all_children(rootEl._node, "target")
for target in targets:
if target.attrib['id'] != "":
tarId = target.attrib['id']
forType = _get_first_child_text(target, "type")
if forType != "":
tarTypeLookup[tarId] = forType
forId = _get_first_child(target, "dyeId")
if forId is not None and forId.attrib['id'] != "":
dyeLookup[forId.attrib['id']] = 1
# Process the lines
for tabLine in tabLines[1:]:
sLin = tabLine.split("\t")
if len(sLin) < 7 or sLin[1] == "" or sLin[2] == "" or sLin[3] == "" or sLin[4] == "" or sLin[5] == "":
continue
if sLin[1] not in samTypeLookup:
rootEl.new_sample(sLin[1], sLin[2])
samTypeLookup[sLin[1]] = sLin[2]
ret += "Created sample \"" + sLin[1] + "\" with type \"" + sLin[2] + "\"\n"
if sLin[3] not in tarTypeLookup:
if sLin[5] not in dyeLookup:
rootEl.new_dye(sLin[5])
dyeLookup[sLin[5]] = 1
ret += "Created dye \"" + sLin[5] + "\"\n"
rootEl.new_target(sLin[3], sLin[4])
elem = rootEl.get_target(byid=sLin[3])
elem["dyeId"] = sLin[5]
tarTypeLookup[sLin[3]] = sLin[4]
ret += "Created " + sLin[3] + " with type \"" + sLin[4] + "\" and dye \"" + sLin[5] + "\"\n"
react = None
data = None
# Get the position number if required
wellPos = sLin[0]
if re.search(r"\D\d+", sLin[0]):
old_letter = ord(re.sub(r"\d", "", sLin[0]).upper()) - ord("A")
old_nr = int(re.sub(r"\D", "", sLin[0]))
newId = old_nr + old_letter * int(self["pcrFormat_columns"])
wellPos = str(newId)
if re.search(r"\D\d+\D\d+", sLin[0]):
old_left = re.sub(r"\D\d+$", "", sLin[0])
old_left_letter = ord(re.sub(r"\d", "", old_left).upper()) - ord("A")
old_left_nr = int(re.sub(r"\D", "", old_left)) - 1
old_right = re.sub(r"^\D\d+", "", sLin[0])
old_right_letter = ord(re.sub(r"\d", "", old_right).upper()) - ord("A")
old_right_nr = int(re.sub(r"\D", "", old_right))
newId = old_left_nr * 8 + old_right_nr + old_left_letter * 768 + old_right_letter * 96
wellPos = str(newId)
exp = _get_all_children(self._node, "react")
for node in exp:
if wellPos == node.attrib['id']:
react = node
forId = _get_first_child_text(react, "sample")
if forId and forId != "" and forId.attrib['id'] != sLin[1]:
ret += "Missmatch: Well " + wellPos + " (" + sLin[0] + ") has sample \"" + forId.attrib['id'] + \
"\" in RDML file and sample \"" + sLin[1] + "\" in tab file.\n"
break
if react is None:
new_node = et.Element("react", id=wellPos)
place = _get_tag_pos(self._node, "react", self.xmlkeys(), 9999999)
self._node.insert(place, new_node)
react = new_node
new_node = et.Element("sample", id=sLin[1])
react.insert(0, new_node)
exp = _get_all_children(react, "data")
for node in exp:
forId = _get_first_child(node, "tar")
if forId is not None and forId.attrib['id'] == sLin[3]:
data = node
break
if data is None:
new_node = et.Element("data")
place = _get_tag_pos(react, "data", ["sample", "data", "partitions"], 9999999)
react.insert(place, new_node)
data = new_node
new_node = et.Element("tar", id=sLin[3])
place = _get_tag_pos(data, "tar",
_getXMLDataType(),
9999999)
data.insert(place, new_node)
if dMode == "amp":
presentAmp = _get_first_child(data, "adp")
if presentAmp is not None:
ret += "Well " + wellPos + " (" + sLin[0] + ") with sample \"" + sLin[1] + " and target \"" + \
sLin[3] + "\" has already amplification data, no data were added.\n"
else:
colCount = 6
for col in sLin[6:]:
new_node = et.Element("adp")
place = _get_tag_pos(data, "adp",
_getXMLDataType(),
9999999)
data.insert(place, new_node)
new_sub = et.Element("cyc")
new_sub.text = head[colCount]
place = _get_tag_pos(new_node, "cyc", ["cyc", "tmp", "fluor"], 9999999)
new_node.insert(place, new_sub)
new_sub = et.Element("fluor")
new_sub.text = col
place = _get_tag_pos(new_node, "fluor", ["cyc", "tmp", "fluor"], 9999999)
new_node.insert(place, new_sub)
colCount += 1
if dMode == "melt":
presentAmp = _get_first_child(data, "mdp")
if presentAmp is not None:
ret += "Well " + wellPos + " (" + sLin[0] + ") with sample \"" + sLin[1] + " and target \"" + \
sLin[3] + "\" has already melting data, no data were added.\n"
else:
colCount = 6
for col in sLin[6:]:
new_node = et.Element("mdp")
place = _get_tag_pos(data, "mdp",
_getXMLDataType(),
9999999)
data.insert(place, new_node)
new_sub = et.Element("tmp")
new_sub.text = head[colCount]
place = _get_tag_pos(new_node, "tmp", ["tmp", "fluor"], 9999999)
new_node.insert(place, new_sub)
new_sub = et.Element("fluor")
new_sub.text = col
place = _get_tag_pos(new_node, "fluor", ["tmp", "fluor"], 9999999)
new_node.insert(place, new_sub)
colCount += 1
return ret
def import_digital_data(self, rootEl, fileformat, filename, filelist, ignoreCh=""):
"""Imports data from a tab seperated table file with digital PCR overview data.
Args:
self: The class self parameter.
rootEl: The rdml root element.
fileformat: The format of the files (RDML, BioRad).
filename: The tab overvie file to open (recommended but optional).
filelist: A list of tab files with fluorescence data (optional, works without filename).
Returns:
A string with the modifications made.
"""
tempList = re.split(r"\D+", ignoreCh)
ignoreList = []
for posNum in tempList:
if re.search(r"\d", posNum):
ignoreList.append(int(posNum))
ret = ""
wellNames = []
uniqueFileNames = []
if filelist is None:
filelist = []
# Get the information for the lookup dictionaries
samTypeLookup = {}
tarTypeLookup = {}
dyeLookup = {}
headerLookup = {}
fileLookup = {}
fileNameSuggLookup = {}
samples = _get_all_children(rootEl._node, "sample")
for sample in samples:
if sample.attrib['id'] != "":
samId = sample.attrib['id']
forType = _get_first_child_text(sample, "type")
if forType != "":
samTypeLookup[samId] = forType
targets = _get_all_children(rootEl._node, "target")
for target in targets:
if target.attrib['id'] != "":
tarId = target.attrib['id']
forType = _get_first_child_text(target, "type")
if forType != "":
tarTypeLookup[tarId] = forType
dyes = _get_all_children(rootEl._node, "dye")
for dye in dyes:
if dye.attrib['id'] != "":
dyeLookup[dye.attrib['id']] = 1
# Work the overview file
if filename is not None:
with open(filename, newline='') as tfile: # add encoding='utf-8' ?
posCount = 0
posWell = 0
posSample = -1
posSampleType = -1
posDye = -1
posDyeCh2 = -1
posDyeCh3 = -1
posTarget = -1
posTargetCh2 = -1
posTargetCh3 = -1
posTargetType = -1
posCopConc = -1
posPositives = -1
posNegatives = -1
posCopConcCh2 = -1
posPositivesCh2 = -1
posNegativesCh2 = -1
posCopConcCh3 = -1
posPositivesCh3 = -1
posNegativesCh3 = -1
posUndefined = -1
posExcluded = -1
posVolume = -1
posFilename = -1
countUpTarget = 1
if fileformat == "RDML":
tabLines = list(csv.reader(tfile, delimiter='\t'))
for hInfo in tabLines[0]:
if hInfo == "Sample":
posSample = posCount
if hInfo == "SampleType":
posSampleType = posCount
if hInfo == "Target":
posTarget = posCount
if hInfo == "TargetType":
posTargetType = posCount
if hInfo == "Dye":
posDye = posCount
if hInfo == "Copies":
posCopConc = posCount
if hInfo == "Positives":
posPositives = posCount
if hInfo == "Negatives":
posNegatives = posCount
if hInfo == "Undefined":
posUndefined = posCount
if hInfo == "Excluded":
posExcluded = posCount
if hInfo == "Volume":
posVolume = posCount
if hInfo == "FileName":
posFilename = posCount
posCount += 1
elif fileformat == "Bio-Rad":
tabLines = list(csv.reader(tfile, delimiter=','))
for hInfo in tabLines[0]:
if hInfo == "Sample":
posSample = posCount
if hInfo in ["TargetType", "TypeAssay"]:
posDye = posCount
if hInfo in ["Target", "Assay"]:
posTarget = posCount
if hInfo == "CopiesPer20uLWell":
posCopConc = posCount
if hInfo == "Positives":
posPositives = posCount
if hInfo == "Negatives":
posNegatives = posCount
posCount += 1
elif fileformat == "Stilla":
posWell = 1
tabLines = list(csv.reader(tfile, delimiter=','))
for hInfo in tabLines[0]:
hInfo = re.sub(r"^ +", '', hInfo)
if hInfo == "SampleName":
posSample = posCount
# This is a hack of the format to allow specification of targets
if hInfo == "Blue_Channel_Target":
posTarget = posCount
if hInfo == "Green_Channel_Target":
posTargetCh2 = posCount
if hInfo == "Red_Channel_Target":
posTargetCh3 = posCount
# End of hack
if hInfo == "Blue_Channel_Concentration":
posCopConc = posCount
if hInfo == "Blue_Channel_NumberOfPositiveDroplets":
posPositives = posCount
if hInfo == "Blue_Channel_NumberOfNegativeDroplets":
posNegatives = posCount
if hInfo == "Green_Channel_Concentration":
posCopConcCh2 = posCount
if hInfo == "Green_Channel_NumberOfPositiveDroplets":
posPositivesCh2 = posCount
if hInfo == "Green_Channel_NumberOfNegativeDroplets":
posNegativesCh2 = posCount
if hInfo == "Red_Channel_Concentration":
posCopConcCh3 = posCount
if hInfo == "Red_Channel_NumberOfPositiveDroplets":
posPositivesCh3 = posCount
if hInfo == "Red_Channel_NumberOfNegativeDroplets":
posNegativesCh3 = posCount
posCount += 1
else:
raise RdmlError('Unknown digital file format.')
if posSample == -1:
raise RdmlError('The overview tab-format is not valid, sample columns are missing.')
if posDye == -1 and fileformat != "Stilla":
raise RdmlError('The overview tab-format is not valid, dye / channel columns are missing.')
if posTarget == -1 and fileformat != "Stilla":
raise RdmlError('The overview tab-format is not valid, target columns are missing.')
if posPositives == -1:
raise RdmlError('The overview tab-format is not valid, positives columns are missing.')
if posNegatives == -1:
raise RdmlError('The overview tab-format is not valid, negatives columns are missing.')
# Process the lines
for rowNr in range(1, len(tabLines)):
emptyLine = True
if len(tabLines[rowNr]) < 7:
continue
for colNr in range(0, len(tabLines[rowNr])):
if tabLines[rowNr][colNr] != "":
emptyLine = False
tabLines[rowNr][colNr] = re.sub(r'^ +', '', tabLines[rowNr][colNr])
tabLines[rowNr][colNr] = re.sub(r' +$', '', tabLines[rowNr][colNr])
if emptyLine is True:
continue
sLin = tabLines[rowNr]
if sLin[posSample] not in samTypeLookup:
posSampleTypeName = "unkn"
if posSampleType != -1:
posSampleTypeName = sLin[posSampleType]
rootEl.new_sample(sLin[posSample], posSampleTypeName)
samTypeLookup[sLin[posSample]] = posSampleTypeName
ret += "Created sample \"" + sLin[posSample] + "\" with type \"" + posSampleTypeName + "\"\n"
# Fix well position
wellPos = re.sub(r"\"", "", sLin[posWell])
if fileformat == "Stilla":
wellPos = re.sub(r'^\d+-', '', wellPos)
# Create nonexisting targets and dyes
if fileformat == "Stilla":
if 1 not in ignoreList:
if posTarget > -1:
crTarName = sLin[posTarget]
else:
crTarName = " Target " + str(countUpTarget) + " Ch1"
countUpTarget += 1
chan = "Ch1"
if crTarName not in tarTypeLookup:
if chan not in dyeLookup:
rootEl.new_dye(chan)
dyeLookup[chan] = 1
ret += "Created dye \"" + chan + "\"\n"
rootEl.new_target(crTarName, "toi")
elem = rootEl.get_target(byid=crTarName)
elem["dyeId"] = chan
tarTypeLookup[crTarName] = "toi"
ret += "Created " + crTarName + " with type \"toi\" and dye \"" + chan + "\"\n"
if wellPos.upper() not in headerLookup:
headerLookup[wellPos.upper()] = {}
headerLookup[wellPos.upper()][chan] = crTarName
if 2 not in ignoreList:
if posTargetCh2 > -1:
crTarName = sLin[posTargetCh2]
else:
crTarName = " Target " + str(countUpTarget) + " Ch2"
countUpTarget += 1
chan = "Ch2"
if crTarName not in tarTypeLookup:
if chan not in dyeLookup:
rootEl.new_dye(chan)
dyeLookup[chan] = 1
ret += "Created dye \"" + chan + "\"\n"
rootEl.new_target(crTarName, "toi")
elem = rootEl.get_target(byid=crTarName)
elem["dyeId"] = chan
tarTypeLookup[crTarName] = "toi"
ret += "Created " + crTarName + " with type \"toi\" and dye \"" + chan + "\"\n"
if wellPos.upper() not in headerLookup:
headerLookup[wellPos.upper()] = {}
headerLookup[wellPos.upper()][chan] = crTarName
if 3 not in ignoreList:
if posTargetCh3 > -1:
crTarName = sLin[posTargetCh3]
else:
crTarName = " Target " + str(countUpTarget) + " Ch3"
countUpTarget += 1
chan = "Ch3"
if crTarName not in tarTypeLookup:
if chan not in dyeLookup:
rootEl.new_dye(chan)
dyeLookup[chan] = 1
ret += "Created dye \"" + chan + "\"\n"
rootEl.new_target(crTarName, "toi")
elem = rootEl.get_target(byid=crTarName)
elem["dyeId"] = chan
tarTypeLookup[crTarName] = "toi"
ret += "Created " + crTarName + " with type \"toi\" and dye \"" + chan + "\"\n"
if wellPos.upper() not in headerLookup:
headerLookup[wellPos.upper()] = {}
headerLookup[wellPos.upper()][chan] = crTarName
else:
if fileformat == "Bio-Rad":
posDyeName = sLin[posDye][:3]
else:
posDyeName = sLin[posDye]
if posTarget > -1 and int(re.sub(r"\D", "", posDyeName)) not in ignoreList:
if sLin[posTarget] not in tarTypeLookup:
if posDyeName not in dyeLookup:
rootEl.new_dye(posDyeName)
dyeLookup[posDyeName] = 1
ret += "Created dye \"" + posDyeName + "\"\n"
posTargetTypeName = "toi"
if posTargetType != -1:
posTargetTypeName = sLin[posTargetType]
rootEl.new_target(sLin[posTarget], posTargetTypeName)
elem = rootEl.get_target(byid=sLin[posTarget])
elem["dyeId"] = posDyeName
tarTypeLookup[sLin[posTarget]] = posTargetTypeName
ret += "Created " + sLin[posTarget] + " with type \"" + posTargetTypeName + "\" and dye \"" + posDyeName + "\"\n"
if wellPos.upper() not in headerLookup:
headerLookup[wellPos.upper()] = {}
headerLookup[wellPos.upper()][posDyeName] = sLin[posTarget]
if posFilename != -1 and sLin[posFilename] != "":
fileNameSuggLookup[wellPos.upper()] = sLin[posFilename]
react = None
partit = None
data = None
# Get the position number if required
wellPosStore = wellPos
if re.search(r"\D\d+", wellPos):
old_letter = ord(re.sub(r"\d", "", wellPos.upper())) - ord("A")
old_nr = int(re.sub(r"\D", "", wellPos))
newId = old_nr + old_letter * int(self["pcrFormat_columns"])
wellPos = str(newId)
exp = _get_all_children(self._node, "react")
for node in exp:
if wellPos == node.attrib['id']:
react = node
forId = _get_first_child_text(react, "sample")
if forId and forId != "" and forId.attrib['id'] != sLin[posSample]:
ret += "Missmatch: Well " + wellPos + " (" + sLin[posWell] + ") has sample \"" + forId.attrib['id'] + \
"\" in RDML file and sample \"" + sLin[posSample] + "\" in tab file.\n"
break
if react is None:
new_node = et.Element("react", id=wellPos)
place = _get_tag_pos(self._node, "react", self.xmlkeys(), 9999999)
self._node.insert(place, new_node)
react = new_node
new_node = et.Element("sample", id=sLin[posSample])
react.insert(0, new_node)
partit = _get_first_child(react, "partitions")
if partit is None:
new_node = et.Element("partitions")
place = _get_tag_pos(react, "partitions", ["sample", "data", "partitions"], 9999999)
react.insert(place, new_node)
partit = new_node
new_node = et.Element("volume")
if fileformat == "RDML":
new_node.text = sLin[posVolume]
elif fileformat == "Bio-Rad":
new_node.text = "0.85"
elif fileformat == "Stilla":
new_node.text = "0.59"
else:
new_node.text = "0.70"
place = _get_tag_pos(partit, "volume", ["volume", "endPtTable", "data"], 9999999)
partit.insert(place, new_node)
if fileformat == "Stilla":
exp = _get_all_children(partit, "data")
for i in range(1, 4):
if i in ignoreList:
continue
data = None
posDyeName = "Ch" + str(i)
stillaTarget = headerLookup[wellPosStore.upper()][posDyeName]
stillaConc = "0"
stillaPos = "0"
stillaNeg = "0"
if i == 1:
stillaConc = sLin[posCopConc]
stillaPos = sLin[posPositives]
stillaNeg = sLin[posNegatives]
if i == 2:
stillaConc = sLin[posCopConcCh2]
stillaPos = sLin[posPositivesCh2]
stillaNeg = sLin[posNegativesCh2]
if i == 3:
stillaConc = sLin[posCopConcCh3]
stillaPos = sLin[posPositivesCh3]
stillaNeg = sLin[posNegativesCh3]
if re.search(r"\.", stillaConc):
stillaConc = re.sub(r"0+$", "", stillaConc)
stillaConc = re.sub(r"\.$", ".0", stillaConc)
for node in exp:
forId = _get_first_child(node, "tar")
if forId is not None and forId.attrib['id'] == stillaTarget:
data = node
break
if data is None:
new_node = et.Element("data")
place = _get_tag_pos(partit, "data", ["volume", "endPtTable", "data"], 9999999)
partit.insert(place, new_node)
data = new_node
new_node = et.Element("tar", id=stillaTarget)
place = _get_tag_pos(data, "tar", ["tar", "pos", "neg", "undef", "excl", "conc"], 9999999)
data.insert(place, new_node)
new_node = et.Element("pos")
new_node.text = stillaPos
place = _get_tag_pos(data, "pos", ["tar", "pos", "neg", "undef", "excl", "conc"], 9999999)
data.insert(place, new_node)
new_node = et.Element("neg")
new_node.text = stillaNeg
place = _get_tag_pos(data, "neg", ["tar", "pos", "neg", "undef", "excl", "conc"], 9999999)
data.insert(place, new_node)
new_node = et.Element("conc")
new_node.text = stillaConc
place = _get_tag_pos(data, "conc", ["tar", "pos", "neg", "undef", "excl", "conc"], 9999999)
data.insert(place, new_node)
else:
exp = _get_all_children(partit, "data")
for node in exp:
forId = _get_first_child(node, "tar")
if forId is not None and forId.attrib['id'] == sLin[posTarget]:
data = node
break
if data is None:
new_node = et.Element("data")
place = _get_tag_pos(partit, "data", ["volume", "endPtTable", "data"], 9999999)
partit.insert(place, new_node)
data = new_node
new_node = et.Element("tar", id=sLin[posTarget])
place = _get_tag_pos(data, "tar", ["tar", "pos", "neg", "undef", "excl", "conc"], 9999999)
data.insert(place, new_node)
new_node = et.Element("pos")
new_node.text = sLin[posPositives]
place = _get_tag_pos(data, "pos", ["tar", "pos", "neg", "undef", "excl", "conc"], 9999999)
data.insert(place, new_node)
new_node = et.Element("neg")
new_node.text = sLin[posNegatives]
place = _get_tag_pos(data, "neg", ["tar", "pos", "neg", "undef", "excl", "conc"], 9999999)
data.insert(place, new_node)
if posUndefined != -1 and sLin[posUndefined] != "":
new_node = et.Element("undef")
new_node.text = sLin[posUndefined]
place = _get_tag_pos(data, "neg", ["tar", "pos", "neg", "undef", "excl", "conc"], 9999999)
data.insert(place, new_node)
if posExcluded != -1 and sLin[posExcluded] != "":
new_node = et.Element("excl")
new_node.text = sLin[posExcluded]
place = _get_tag_pos(data, "neg", ["tar", "pos", "neg", "undef", "excl", "conc"], 9999999)
data.insert(place, new_node)
if posCopConc != -1:
new_node = et.Element("conc")
if int(sLin[posPositives]) == 0:
new_node.text = "0"
else:
if fileformat == "RDML":
new_node.text = sLin[posCopConc]
elif fileformat == "Bio-Rad":
new_node.text = str(float(sLin[posCopConc])/20)
else:
new_node.text = sLin[posCopConc]
place = _get_tag_pos(data, "conc", ["tar", "pos", "neg", "undef", "excl", "conc"], 9999999)
data.insert(place, new_node)
# Read the raw data files
# Extract the well position from file names
constNameChars = 0
if len(filelist) > 0:
charStopCount = False
for i in range(len(filelist[0])):
currChar = None
if charStopCount is False:
for wellFileName in filelist:
if currChar is None:
currChar = wellFileName[i]
else:
if currChar != wellFileName[i]:
charStopCount = True
if charStopCount is False:
constNameChars = i + 1
for wellFileName in filelist:
currName = wellFileName[constNameChars:].upper()
currName = currName.replace(".CSV", "")
currName = currName.replace(".TSV", "")
currName = currName.replace("_AMPLITUDE", "")
currName = currName.replace("_COMPENSATEDDATA", "")
currName = currName.replace("_RAWDATA", "")
currName = re.sub(r"^\d+_", "", currName)
wellNames.append(currName)
fileLookup[currName] = wellFileName
# Propose a filename for raw data
runId = self._node.get('id')
runFix = re.sub(r"[^A-Za-z0-9]", "", runId)
experimentId = self._node.getparent().get('id')
experimentFix = re.sub(r"[^A-Za-z0-9]", "", experimentId)
propFileName = "partitions/" + experimentFix + "_" + runFix
# Get the used unique file names
if zipfile.is_zipfile(self._rdmlFilename):
with zipfile.ZipFile(self._rdmlFilename, 'r') as rdmlObj:
# Get list of files names in rdml zip
allRDMLfiles = rdmlObj.namelist()
for ele in allRDMLfiles:
if re.search("^partitions/", ele):
uniqueFileNames.append(ele.lower())
# Now process the files
warnVolume = ""
for well in wellNames:
outTabFile = ""
keepCh1 = False
keepCh2 = False
keepCh3 = False
header = ""
react = None
partit = None
dataCh1 = None
dataCh2 = None
dataCh3 = None
wellPos = well
if re.search(r"\D\d+", well):
old_letter = ord(re.sub(r"\d", "", well).upper()) - ord("A")
old_nr = int(re.sub(r"\D", "", well))
newId = old_nr + old_letter * int(self["pcrFormat_columns"])
wellPos = str(newId)
exp = _get_all_children(self._node, "react")
for node in exp:
if wellPos == node.attrib['id']:
react = node
break
if react is None:
sampleName = "Sample in " + well
if sampleName not in samTypeLookup:
rootEl.new_sample(sampleName, "unkn")
samTypeLookup[sampleName] = "unkn"
ret += "Created sample \"" + sampleName + "\" with type \"" + "unkn" + "\"\n"
new_node = et.Element("react", id=wellPos)
place = _get_tag_pos(self._node, "react", self.xmlkeys(), 9999999)
self._node.insert(place, new_node)
react = new_node
new_node = et.Element("sample", id=sampleName)
react.insert(0, new_node)
partit = _get_first_child(react, "partitions")
if partit is None:
new_node = et.Element("partitions")
place = _get_tag_pos(react, "partitions", ["sample", "data", "partitions"], 9999999)
react.insert(place, new_node)
partit = new_node
new_node = et.Element("volume")
if fileformat == "RDML":
new_node.text = "0.7"
warnVolume = "No information on partition volume given, used 0.7."
elif fileformat == "Bio-Rad":
new_node.text = "0.85"
elif fileformat == "Stilla":
new_node.text = "0.59"
else:
new_node.text = "0.85"
place = _get_tag_pos(partit, "volume", ["volume", "endPtTable", "data"], 9999999)
partit.insert(place, new_node)
if wellPos in fileNameSuggLookup:
finalFileName = "partitions/" + fileNameSuggLookup[wellPos]
else:
finalFileName = "partitions/" + _get_first_child_text(partit, "endPtTable")
if finalFileName == "partitions/":
finalFileName = propFileName + "_" + wellPos + "_" + well + ".tsv"
triesCount = 0
if finalFileName.lower() in uniqueFileNames:
while triesCount < 100:
finalFileName = propFileName + "_" + wellPos + "_" + well + "_" + str(triesCount) + ".tsv"
if finalFileName.lower() not in uniqueFileNames:
uniqueFileNames.append(finalFileName.lower())
break
# print(finalFileName, flush=True)
with open(fileLookup[well], newline='') as wellfile: # add encoding='utf-8' ?
if fileformat == "RDML":
wellLines = list(csv.reader(wellfile, delimiter='\t'))
wellFileContent = wellfile.read()
_writeFileInRDML(self._rdmlFilename, finalFileName, wellFileContent)
delElem = _get_first_child(partit, "endPtTable")
if delElem is not None:
partit.remove(delElem)
new_node = et.Element("endPtTable")
new_node.text = re.sub(r'^partitions/', '', finalFileName)
place = _get_tag_pos(partit, "endPtTable", ["volume", "endPtTable", "data"], 9999999)
partit.insert(place, new_node)
header = wellLines[0]
for col in range(0, len(header), 2):
cPos = 0
cNeg = 0
cUndef = 0
cExcl = 0
if header[col] != "":
targetName = header[col]
if targetName not in tarTypeLookup:
dye = "Ch" + str(int((col + 1) / 2))
if dye not in dyeLookup:
rootEl.new_dye(dye)
dyeLookup[dye] = 1
ret += "Created dye \"" + dye + "\"\n"
rootEl.new_target(targetName, "toi")
elem = rootEl.get_target(byid=targetName)
elem["dyeId"] = dye
tarTypeLookup[targetName] = "toi"
ret += "Created target " + targetName + " with type \"" + "toi" + "\" and dye \"" + dye + "\"\n"
for line in wellLines[1:]:
splitLine = line.split("\t")
if len(splitLine) - 1 < col + 1:
continue
if splitLine[col + 1] == "p":
cPos += 1
if splitLine[col + 1] == "n":
cNeg += 1
if splitLine[col + 1] == "u":
cUndef += 1
if splitLine[col + 1] == "e":
cExcl += 1
data = None
exp = _get_all_children(partit, "data")
for node in exp:
forId = _get_first_child(node, "tar")
if forId is not None and forId.attrib['id'] == targetName:
data = node
if data is None:
new_node = et.Element("data")
place = _get_tag_pos(partit, "data", ["volume", "endPtTable", "data"], 9999999)
partit.insert(place, new_node)
data = new_node
new_node = et.Element("tar", id=targetName)
place = _get_tag_pos(data, "tar", ["tar", "pos", "neg", "undef", "excl", "conc"], 9999999)
data.insert(place, new_node)
delElem = _get_first_child(partit, "pos")
if delElem is not None:
data.remove(delElem)
new_node = et.Element("pos")
new_node.text = str(cPos)
place = _get_tag_pos(data, "pos", ["tar", "pos", "neg", "undef", "excl", "conc"], 9999999)
data.insert(place, new_node)
delElem = _get_first_child(partit, "neg")
if delElem is not None:
data.remove(delElem)
new_node = et.Element("neg")
new_node.text = str(cNeg)
place = _get_tag_pos(data, "pos", ["tar", "pos", "neg", "undef", "excl", "conc"], 9999999)
data.insert(place, new_node)
delElem = _get_first_child(partit, "undef")
if delElem is not None:
data.remove(delElem)
if cExcl > 0:
new_node = et.Element("undef")
new_node.text = str(cUndef)
place = _get_tag_pos(data, "pos", ["tar", "pos", "neg", "undef", "excl", "conc"], 9999999)
data.insert(place, new_node)
delElem = _get_first_child(partit, "excl")
if delElem is not None:
data.remove(delElem)
if cExcl > 0:
new_node = et.Element("excl")
new_node.text = str(cExcl)
place = _get_tag_pos(data, "pos", ["tar", "pos", "neg", "undef", "excl", "conc"], 9999999)
data.insert(place, new_node)
elif fileformat == "Bio-Rad":
wellLines = list(csv.reader(wellfile, delimiter=','))
ch1Pos = "0"
ch1Neg = "0"
ch1sum = 0
ch2Pos = "0"
ch2Neg = "0"
ch2sum = 0
if well in headerLookup:
if "Ch1" in headerLookup[well] and 1 not in ignoreList:
keepCh1 = True
header += headerLookup[well]["Ch1"] + "\t" + headerLookup[well]["Ch1"] + "\t"
if "Ch2" in headerLookup[well] and 2 not in ignoreList:
keepCh2 = True
header += headerLookup[well]["Ch2"] + "\t" + headerLookup[well]["Ch2"] + "\t"
outTabFile += re.sub(r'\t$', '\n', header)
else:
headerLookup[well] = {}
dyes = ["Ch1", "Ch2"]
if len(wellLines) > 1:
ch1Pos = ""
ch1Neg = ""
ch2Pos = ""
ch2Neg = ""
if re.search(r"\d", wellLines[1][0]) and 1 not in ignoreList:
keepCh1 = True
if len(wellLines[1]) > 1 and re.search(r"\d", wellLines[1][1]) and 2 not in ignoreList:
keepCh2 = True
for dye in dyes:
if dye not in dyeLookup:
rootEl.new_dye(dye)
dyeLookup[dye] = 1
ret += "Created dye \"" + dye + "\"\n"
dyeCount = 0
for dye in dyes:
dyeCount += 1
targetName = "Target in " + well + " " + dye
if targetName not in tarTypeLookup:
rootEl.new_target(targetName, "toi")
elem = rootEl.get_target(byid=targetName)
elem["dyeId"] = dye
tarTypeLookup[targetName] = "toi"
ret += "Created target " + targetName + " with type \"" + "toi" + "\" and dye \"" + dye + "\"\n"
headerLookup[well][dye] = targetName
if (dyeCount == 1 and keepCh1) or (dyeCount == 2 and keepCh2):
header += targetName + "\t" + targetName + "\t"
outTabFile += re.sub(r'\t$', '\n', header)
if keepCh1 or keepCh2:
exp = _get_all_children(partit, "data")
for node in exp:
forId = _get_first_child(node, "tar")
if keepCh1 and forId is not None and forId.attrib['id'] == headerLookup[well]["Ch1"]:
dataCh1 = node
ch1Pos = _get_first_child_text(dataCh1, "pos")
ch1Neg = _get_first_child_text(dataCh1, "neg")
ch1sum += int(ch1Pos) + int(ch1Neg)
if keepCh2 and forId is not None and forId.attrib['id'] == headerLookup[well]["Ch2"]:
dataCh2 = node
ch2Pos = _get_first_child_text(dataCh2, "pos")
ch2Neg = _get_first_child_text(dataCh2, "neg")
ch2sum += int(ch2Pos) + int(ch2Neg)
if dataCh1 is None and keepCh1:
new_node = et.Element("data")
place = _get_tag_pos(partit, "data", ["volume", "endPtTable", "data"], 9999999)
partit.insert(place, new_node)
dataCh1 = new_node
new_node = et.Element("tar", id=headerLookup[well]["Ch1"])
place = _get_tag_pos(dataCh1, "tar", ["tar", "pos", "neg", "undef", "excl", "conc"], 9999999)
dataCh1.insert(place, new_node)
ch1Pos = ""
ch1Neg = ""
ch1sum = 2
if dataCh2 is None and keepCh2:
new_node = et.Element("data")
place = _get_tag_pos(partit, "data", ["volume", "endPtTable", "data"], 9999999)
partit.insert(place, new_node)
dataCh2 = new_node
new_node = et.Element("tar", id=headerLookup[well]["Ch2"])
place = _get_tag_pos(dataCh2, "tar", ["tar", "pos", "neg", "undef", "excl", "conc"], 9999999)
dataCh2.insert(place, new_node)
ch2Pos = ""
ch2Neg = ""
ch2sum = 2
if dataCh1 is None and dataCh2 is None:
continue
if ch1sum < 1 and ch2sum < 1:
continue
if ch1Pos == "" and ch1Neg == "" and ch2Pos == "" and ch2Neg == "":
countPart = 0
for splitLine in wellLines[1:]:
if len(splitLine[0]) < 2:
continue
if keepCh1:
outTabFile += splitLine[0] + "\t" + "u"
if keepCh2:
if keepCh1:
outTabFile += "\t"
outTabFile += splitLine[1] + "\t" + "u\n"
else:
outTabFile += "\n"
countPart += 1
if keepCh1:
new_node = et.Element("pos")
new_node.text = "0"
place = _get_tag_pos(dataCh1, "pos", ["tar", "pos", "neg", "undef", "excl", "conc"], 9999999)
dataCh1.insert(place, new_node)
new_node = et.Element("neg")
new_node.text = "0"
place = _get_tag_pos(dataCh1, "neg", ["tar", "pos", "neg", "undef", "excl", "conc"], 9999999)
dataCh1.insert(place, new_node)
new_node = et.Element("undef")
new_node.text = str(countPart)
place = _get_tag_pos(dataCh1, "neg", ["tar", "pos", "neg", "undef", "excl", "conc"], 9999999)
dataCh1.insert(place, new_node)
if keepCh2:
new_node = et.Element("pos")
new_node.text = "0"
place = _get_tag_pos(dataCh2, "pos", ["tar", "pos", "neg", "undef", "excl", "conc"],
9999999)
dataCh2.insert(place, new_node)
new_node = et.Element("neg")
new_node.text = "0"
place = _get_tag_pos(dataCh2, "neg", ["tar", "pos", "neg", "undef", "excl", "conc"],
9999999)
dataCh2.insert(place, new_node)
new_node = et.Element("undef")
new_node.text = str(countPart)
place = _get_tag_pos(dataCh2, "neg", ["tar", "pos", "neg", "undef", "excl", "conc"],
9999999)
dataCh2.insert(place, new_node)
else:
ch1Arr = []
ch2Arr = []
ch1Cut = 0
ch2Cut = 0
for splitLine in wellLines[1:]:
if len(splitLine) < 2:
continue
if keepCh1:
ch1Arr.append(float(splitLine[0]))
if keepCh2:
ch2Arr.append(float(splitLine[1]))
if keepCh1:
ch1Arr.sort()
if 0 < int(ch1Neg) <= len(ch1Arr):
ch1Cut = ch1Arr[int(ch1Neg) - 1]
if keepCh2:
ch2Arr.sort()
if 0 < int(ch2Neg) <= len(ch2Arr):
ch2Cut = ch2Arr[int(ch2Neg) - 1]
for splitLine in wellLines[1:]:
if len(splitLine) < 2:
continue
if keepCh1:
outTabFile += splitLine[0] + "\t"
if float(splitLine[0]) > ch1Cut:
outTabFile += "p"
else:
outTabFile += "n"
if keepCh2:
if keepCh1:
outTabFile += "\t"
outTabFile += splitLine[1] + "\t"
if float(splitLine[1]) > ch2Cut:
outTabFile += "p\n"
else:
outTabFile += "n\n"
else:
outTabFile += "\n"
_writeFileInRDML(self._rdmlFilename, finalFileName, outTabFile)
new_node = et.Element("endPtTable")
new_node.text = re.sub(r'^partitions/', '', finalFileName)
place = _get_tag_pos(partit, "endPtTable", ["volume", "endPtTable", "data"], 9999999)
partit.insert(place, new_node)
else:
react.remove(partit)
elif fileformat == "Stilla":
wellLines = list(csv.reader(wellfile, delimiter=','))
ch1Pos = "0"
ch1Neg = "0"
ch1sum = 0
ch2Pos = "0"
ch2Neg = "0"
ch2sum = 0
ch3Pos = "0"
ch3Neg = "0"
ch3sum = 0
if well in headerLookup:
if "Ch1" in headerLookup[well] and 1 not in ignoreList:
keepCh1 = True
header += headerLookup[well]["Ch1"] + "\t" + headerLookup[well]["Ch1"] + "\t"
if "Ch2" in headerLookup[well] and 2 not in ignoreList:
keepCh2 = True
header += headerLookup[well]["Ch2"] + "\t" + headerLookup[well]["Ch2"] + "\t"
if "Ch3" in headerLookup[well] and 3 not in ignoreList:
keepCh3 = True
header += headerLookup[well]["Ch3"] + "\t" + headerLookup[well]["Ch3"] + "\t"
outTabFile += re.sub(r'\t$', '\n', header)
else:
headerLookup[well] = {}
dyes = ["Ch1", "Ch2", "Ch3"]
if len(wellLines) > 1:
ch1Pos = ""
ch1Neg = ""
ch2Pos = ""
ch2Neg = ""
ch3Pos = ""
ch3Neg = ""
if re.search(r"\d", wellLines[1][0]) and 1 not in ignoreList:
keepCh1 = True
if len(wellLines[1]) > 1 and re.search(r"\d", wellLines[1][1]) and 2 not in ignoreList:
keepCh2 = True
if len(wellLines[1]) > 2 and re.search(r"\d", wellLines[1][2]) and 3 not in ignoreList:
keepCh3 = True
for dye in dyes:
if dye not in dyeLookup:
rootEl.new_dye(dye)
dyeLookup[dye] = 1
ret += "Created dye \"" + dye + "\"\n"
dyeCount = 0
for dye in dyes:
dyeCount += 1
targetName = "Target in " + well + " " + dye
if targetName not in tarTypeLookup:
rootEl.new_target(targetName, "toi")
elem = rootEl.get_target(byid=targetName)
elem["dyeId"] = dye
tarTypeLookup[targetName] = "toi"
ret += "Created target " + targetName + " with type \"" + "toi" + "\" and dye \"" + dye + "\"\n"
if (dyeCount == 1 and keepCh1) or (dyeCount == 2 and keepCh2) or (dyeCount == 3 and keepCh3):
headerLookup[well][dye] = targetName
header += targetName + "\t" + targetName + "\t"
outTabFile += re.sub(r'\t$', '\n', header)
if keepCh1 or keepCh2 or keepCh3:
exp = _get_all_children(partit, "data")
for node in exp:
forId = _get_first_child(node, "tar")
if keepCh1 and forId is not None and forId.attrib['id'] == headerLookup[well]["Ch1"]:
dataCh1 = node
ch1Pos = _get_first_child_text(dataCh1, "pos")
ch1Neg = _get_first_child_text(dataCh1, "neg")
ch1sum += int(ch1Pos) + int(ch1Neg)
if keepCh2 and forId is not None and forId.attrib['id'] == headerLookup[well]["Ch2"]:
dataCh2 = node
ch2Pos = _get_first_child_text(dataCh2, "pos")
ch2Neg = _get_first_child_text(dataCh2, "neg")
ch2sum += int(ch2Pos) + int(ch2Neg)
if keepCh3 and forId is not None and forId.attrib['id'] == headerLookup[well]["Ch3"]:
dataCh3 = node
ch3Pos = _get_first_child_text(dataCh3, "pos")
ch3Neg = _get_first_child_text(dataCh3, "neg")
ch3sum += int(ch3Pos) + int(ch3Neg)
if dataCh1 is None and keepCh1:
new_node = et.Element("data")
place = _get_tag_pos(partit, "data", ["volume", "endPtTable", "data"], 9999999)
partit.insert(place, new_node)
dataCh1 = new_node
new_node = et.Element("tar", id=headerLookup[well]["Ch1"])
place = _get_tag_pos(dataCh1, "tar", ["tar", "pos", "neg", "undef", "excl", "conc"],
9999999)
dataCh1.insert(place, new_node)
ch1Pos = ""
ch1Neg = ""
ch1sum = 2
if dataCh2 is None and keepCh2:
new_node = et.Element("data")
place = _get_tag_pos(partit, "data", ["volume", "endPtTable", "data"], 9999999)
partit.insert(place, new_node)
dataCh2 = new_node
new_node = et.Element("tar", id=headerLookup[well]["Ch2"])
place = _get_tag_pos(dataCh2, "tar", ["tar", "pos", "neg", "undef", "excl", "conc"],
9999999)
dataCh2.insert(place, new_node)
ch2Pos = ""
ch2Neg = ""
ch2sum = 2
if dataCh3 is None and keepCh3:
new_node = et.Element("data")
place = _get_tag_pos(partit, "data", ["volume", "endPtTable", "data"], 9999999)
partit.insert(place, new_node)
dataCh3 = new_node
new_node = et.Element("tar", id=headerLookup[well]["Ch3"])
place = _get_tag_pos(dataCh3, "tar", ["tar", "pos", "neg", "undef", "excl", "conc"],
9999999)
dataCh3.insert(place, new_node)
ch3Pos = ""
ch3Neg = ""
ch3sum = 2
if dataCh1 is None and dataCh2 is None and dataCh3 is None:
continue
if ch1sum < 1 and ch2sum < 1 and ch3sum < 1:
continue
if ch1Pos == "" and ch1Neg == "" and ch2Pos == "" and ch2Neg == "" and ch3Pos == "" and ch3Neg == "":
countPart = 0
for splitLine in wellLines[1:]:
if len(splitLine[0]) < 2:
continue
if keepCh1:
outTabFile += splitLine[0] + "\t" + "u"
if keepCh2:
if keepCh1:
outTabFile += "\t"
outTabFile += splitLine[1] + "\t" + "u"
if keepCh3:
if keepCh1 or keepCh2:
outTabFile += "\t"
outTabFile += splitLine[2] + "\t" + "u\n"
else:
outTabFile += "\n"
countPart += 1
if keepCh1:
new_node = et.Element("pos")
new_node.text = "0"
place = _get_tag_pos(dataCh1, "pos", ["tar", "pos", "neg", "undef", "excl", "conc"],
9999999)
dataCh1.insert(place, new_node)
new_node = et.Element("neg")
new_node.text = "0"
place = _get_tag_pos(dataCh1, "neg", ["tar", "pos", "neg", "undef", "excl", "conc"],
9999999)
dataCh1.insert(place, new_node)
new_node = et.Element("undef")
new_node.text = str(countPart)
place = _get_tag_pos(dataCh1, "neg", ["tar", "pos", "neg", "undef", "excl", "conc"],
9999999)
dataCh1.insert(place, new_node)
if keepCh2:
new_node = et.Element("pos")
new_node.text = "0"
place = _get_tag_pos(dataCh2, "pos", ["tar", "pos", "neg", "undef", "excl", "conc"],
9999999)
dataCh2.insert(place, new_node)
new_node = et.Element("neg")
new_node.text = "0"
place = _get_tag_pos(dataCh2, "neg", ["tar", "pos", "neg", "undef", "excl", "conc"],
9999999)
dataCh2.insert(place, new_node)
new_node = et.Element("undef")
new_node.text = str(countPart)
place = _get_tag_pos(dataCh2, "neg", ["tar", "pos", "neg", "undef", "excl", "conc"],
9999999)
dataCh2.insert(place, new_node)
if keepCh3:
new_node = et.Element("pos")
new_node.text = "0"
place = _get_tag_pos(dataCh3, "pos", ["tar", "pos", "neg", "undef", "excl", "conc"],
9999999)
dataCh3.insert(place, new_node)
new_node = et.Element("neg")
new_node.text = "0"
place = _get_tag_pos(dataCh3, "neg", ["tar", "pos", "neg", "undef", "excl", "conc"],
9999999)
dataCh3.insert(place, new_node)
new_node = et.Element("undef")
new_node.text = str(countPart)
place = _get_tag_pos(dataCh3, "neg", ["tar", "pos", "neg", "undef", "excl", "conc"],
9999999)
dataCh3.insert(place, new_node)
else:
ch1Arr = []
ch2Arr = []
ch3Arr = []
ch1Cut = 0
ch2Cut = 0
ch3Cut = 0
for splitLine in wellLines[1:]:
if len(splitLine) < 3:
continue
if keepCh1:
ch1Arr.append(float(splitLine[0]))
if keepCh2:
ch2Arr.append(float(splitLine[1]))
if keepCh3:
ch3Arr.append(float(splitLine[2]))
if keepCh1:
ch1Arr.sort()
if 0 < int(ch1Neg) <= len(ch1Arr):
ch1Cut = ch1Arr[int(ch1Neg) - 1]
if keepCh2:
ch2Arr.sort()
if 0 < int(ch2Neg) <= len(ch2Arr):
ch2Cut = ch2Arr[int(ch2Neg) - 1]
if keepCh3:
ch3Arr.sort()
if 0 < int(ch3Neg) <= len(ch3Arr):
ch3Cut = ch3Arr[int(ch3Neg) - 1]
for splitLine in wellLines[1:]:
if len(splitLine) < 2:
continue
if keepCh1:
outTabFile += splitLine[0] + "\t"
if float(splitLine[0]) > ch1Cut:
outTabFile += "p"
else:
outTabFile += "n"
if keepCh2:
if keepCh1:
outTabFile += "\t"
outTabFile += splitLine[1] + "\t"
if float(splitLine[1]) > ch2Cut:
outTabFile += "p"
else:
outTabFile += "n"
if keepCh3:
if keepCh1 or keepCh2:
outTabFile += "\t"
outTabFile += splitLine[2] + "\t"
if float(splitLine[2]) > ch3Cut:
outTabFile += "p\n"
else:
outTabFile += "n\n"
else:
outTabFile += "\n"
_writeFileInRDML(self._rdmlFilename, finalFileName, outTabFile)
new_node = et.Element("endPtTable")
new_node.text = re.sub(r'^partitions/', '', finalFileName)
place = _get_tag_pos(partit, "endPtTable", ["volume", "endPtTable", "data"], 9999999)
partit.insert(place, new_node)
else:
react.remove(partit)
ret += warnVolume
return ret
def get_digital_overview_data(self, rootEl):
"""Provides the digital overview data in tab seperated format.
Args:
self: The class self parameter.
rootEl: The rdml root element.
Returns:
A string with the overview data table.
"""
# 0 1 2 3 4 5 6 7 8 9 10 11 12 13
ret = "Pos\tWell\tSample\tSampleType\tTarget\tTargetType\tDye\tCopies\tPositives\tNegatives\tUndefined\tExcluded\tVolume\tFileName\n"
tabLines = []
# Fill the lookup dics
samTypeLookup = {}
tarTypeLookup = {}
tarDyeLookup = {}
samples = _get_all_children(rootEl._node, "sample")
for sample in samples:
if sample.attrib['id'] != "":
samId = sample.attrib['id']
forType = _get_first_child_text(sample, "type")
if forType != "":
samTypeLookup[samId] = forType
targets = _get_all_children(rootEl._node, "target")
for target in targets:
if target.attrib['id'] != "":
tarId = target.attrib['id']
forType = _get_first_child_text(target, "type")
if forType != "":
tarTypeLookup[tarId] = forType
forId = _get_first_child(target, "dyeId")
if forId is not None and forId.attrib['id'] != "":
tarDyeLookup[tarId] = forId.attrib['id']
reacts = _get_all_children(self._node, "react")
for react in reacts:
pPos = react.attrib['id']
posId = int(react.attrib['id'])
pIdNumber = posId % int(self["pcrFormat_columns"])
pIdLetter = chr(ord("A") + int(posId / int(self["pcrFormat_columns"])))
pWell = pIdLetter + str(pIdNumber)
pSample = ""
pSampleType = ""
pFileName = ""
forId = _get_first_child(react, "sample")
if forId is not None:
if forId.attrib['id'] != "":
pSample = forId.attrib['id']
pSampleType = samTypeLookup[forId.attrib['id']]
partit = _get_first_child(react, "partitions")
if partit is not None:
endPtTable = _get_first_child_text(partit, "endPtTable")
if endPtTable != "":
pFileName = endPtTable
pVolume = _get_first_child_text(partit, "volume")
partit_datas = _get_all_children(partit, "data")
for partit_data in partit_datas:
pTarget = ""
pTargetType = ""
pDye = ""
forId = _get_first_child(partit_data, "tar")
if forId is not None:
if forId.attrib['id'] != "":
pTarget = forId.attrib['id']
pTargetType = tarTypeLookup[pTarget]
pDye = tarDyeLookup[pTarget]
pCopies = _get_first_child_text(partit_data, "conc")
pPositives = _get_first_child_text(partit_data, "pos")
pNegatives = _get_first_child_text(partit_data, "neg")
pUnknown = _get_first_child_text(partit_data, "undef")
pExcluded = _get_first_child_text(partit_data, "excl")
retLine = pPos + "\t"
retLine += pWell + "\t"
retLine += pSample + "\t"
retLine += pSampleType + "\t"
retLine += pTarget + "\t"
retLine += pTargetType + "\t"
retLine += pDye + "\t"
retLine += pCopies + "\t"
retLine += pPositives + "\t"
retLine += pNegatives + "\t"
retLine += pUnknown + "\t"
retLine += pExcluded + "\t"
retLine += pVolume + "\t"
retLine += pFileName + "\n"
tabLines.append(retLine)
tabLines.sort(key=_sort_list_digital_PCR)
for tLine in tabLines:
ret += tLine
return ret
def get_digital_raw_data(self, reactPos):
"""Provides the digital of a react in tab seperated format.
Args:
self: The class self parameter.
reactPos: The react id to get the digital raw data from
Returns:
A string with the raw data table.
"""
react = None
retVal = ""
# Get the position number if required
wellPos = str(reactPos)
if re.search(r"\D\d+", wellPos):
old_letter = ord(re.sub(r"\d", "", wellPos.upper())) - ord("A")
old_nr = int(re.sub(r"\D", "", wellPos))
newId = old_nr + old_letter * int(self["pcrFormat_columns"])
wellPos = str(newId)
exp = _get_all_children(self._node, "react")
for node in exp:
if wellPos == node.attrib['id']:
react = node
break
if react is None:
return ""
partit = _get_first_child(react, "partitions")
if partit is None:
return ""
finalFileName = "partitions/" + _get_first_child_text(partit, "endPtTable")
if finalFileName == "partitions/":
return ""
if zipfile.is_zipfile(self._rdmlFilename):
zf = zipfile.ZipFile(self._rdmlFilename, 'r')
try:
retVal = zf.read(finalFileName).decode('utf-8')
except KeyError:
raise RdmlError('No ' + finalFileName + ' in compressed RDML file found.')
finally:
zf.close()
return retVal
def getreactjson(self):
"""Returns a json of the react data including fluorescence data.
Args:
self: The class self parameter.
Returns:
A json of the data.
"""
all_data = {}
data = []
reacts = _get_all_children(self._node, "react")
adp_cyc_max = 0.0
adp_fluor_min = 99999999
adp_fluor_max = 0.0
mdp_tmp_min = 120.0
mdp_tmp_max = 0.0
mdp_fluor_min = 99999999
mdp_fluor_max = 0.0
max_data = 0
max_partition_data = 0
anyCorrections = 0
for react in reacts:
react_json = {
"id": react.get('id'),
}
forId = _get_first_child(react, "sample")
if forId is not None:
if forId.attrib['id'] != "":
react_json["sample"] = forId.attrib['id']
react_datas = _get_all_children(react, "data")
max_data = max(max_data, len(react_datas))
react_datas_json = []
for react_data in react_datas:
in_react = {}
forId = _get_first_child(react_data, "tar")
if forId is not None:
if forId.attrib['id'] != "":
in_react["tar"] = forId.attrib['id']
_add_first_child_to_dic(react_data, in_react, True, "cq")
_add_first_child_to_dic(react_data, in_react, True, "N0")
_add_first_child_to_dic(react_data, in_react, True, "ampEffMet")
_add_first_child_to_dic(react_data, in_react, True, "ampEff")
_add_first_child_to_dic(react_data, in_react, True, "ampEffSE")
_add_first_child_to_dic(react_data, in_react, True, "corrF")
# Calculate the correction factors
calcCorr = _get_first_child_text(react_data, "corrF")
calcCq = _get_first_child_text(react_data, "cq")
calcN0 = _get_first_child_text(react_data, "N0")
calcEff = _get_first_child_text(react_data, "ampEff")
in_react["corrCq"] = ""
in_react["corrN0"] = ""
if not calcCorr == "":
calcCorr = float(calcCorr)
if not np.isnan(calcCorr):
if 0.0 < calcCorr < 1.0:
if calcEff == "":
calcEff = 2.0
else:
calcEff = float(calcEff)
if not np.isnan(calcEff):
if 0.0 < calcEff < 3.0:
if not calcCq == "":
calcCq = float(calcCq)
if not np.isnan(calcCq):
if calcCq > 0.0:
finalCq = calcCq - np.log10(calcCorr) / np.log10(calcEff)
in_react["corrCq"] = "{:.3f}".format(finalCq)
anyCorrections = 1
else:
in_react["corrCq"] = "-1.0"
if not calcN0 == "":
calcN0 = float(calcN0)
if not np.isnan(calcN0):
if calcCq > 0.0:
finalN0 = calcCorr * calcN0
in_react["corrN0"] = "{:.2e}".format(finalN0)
anyCorrections = 1
else:
in_react["corrN0"] = "-1.0"
if calcCorr == 0.0:
if not calcCq == "":
in_react["corrCq"] = ""
if not calcN0 == "":
in_react["corrN0"] = 0.0
if calcCorr == 1.0:
if not calcCq == "":
in_react["corrCq"] = calcCq
if not calcN0 == "":
in_react["corrN0"] = calcN0
_add_first_child_to_dic(react_data, in_react, True, "meltTemp")
_add_first_child_to_dic(react_data, in_react, True, "excl")
_add_first_child_to_dic(react_data, in_react, True, "note")
_add_first_child_to_dic(react_data, in_react, True, "endPt")
_add_first_child_to_dic(react_data, in_react, True, "bgFluor")
_add_first_child_to_dic(react_data, in_react, True, "bgFluorSlp")
_add_first_child_to_dic(react_data, in_react, True, "quantFluor")
adps = _get_all_children(react_data, "adp")
adps_json = []
for adp in adps:
cyc = _get_first_child_text(adp, "cyc")
fluor = _get_first_child_text(adp, "fluor")
adp_cyc_max = max(adp_cyc_max, float(cyc))
adp_fluor_min = min(adp_fluor_min, float(fluor))
adp_fluor_max = max(adp_fluor_max, float(fluor))
in_adp = [cyc, fluor, _get_first_child_text(adp, "tmp")]
adps_json.append(in_adp)
in_react["adps"] = adps_json
mdps = _get_all_children(react_data, "mdp")
mdps_json = []
for mdp in mdps:
tmp = _get_first_child_text(mdp, "tmp")
fluor = _get_first_child_text(mdp, "fluor")
mdp_tmp_min = min(mdp_tmp_min, float(tmp))
mdp_tmp_max = max(mdp_tmp_max, float(tmp))
mdp_fluor_min = min(mdp_fluor_min, float(fluor))
mdp_fluor_max = max(mdp_fluor_max, float(fluor))
in_mdp = [tmp, fluor]
mdps_json.append(in_mdp)
in_react["mdps"] = mdps_json
react_datas_json.append(in_react)
react_json["datas"] = react_datas_json
partit = _get_first_child(react, "partitions")
if partit is not None:
in_partitions = {}
endPtTable = _get_first_child_text(partit, "endPtTable")
if endPtTable != "":
in_partitions["endPtTable"] = endPtTable
partit_datas = _get_all_children(partit, "data")
max_partition_data = max(max_partition_data, len(partit_datas))
partit_datas_json = []
for partit_data in partit_datas:
in_partit = {}
forId = _get_first_child(partit_data, "tar")
if forId is not None:
if forId.attrib['id'] != "":
in_partit["tar"] = forId.attrib['id']
_add_first_child_to_dic(partit_data, in_partit, False, "pos")
_add_first_child_to_dic(partit_data, in_partit, False, "neg")
_add_first_child_to_dic(partit_data, in_partit, True, "undef")
_add_first_child_to_dic(partit_data, in_partit, True, "excl")
_add_first_child_to_dic(partit_data, in_partit, True, "conc")
partit_datas_json.append(in_partit)
in_partitions["datas"] = partit_datas_json
react_json["partitions"] = in_partitions
data.append(react_json)
all_data["reacts"] = data
all_data["adp_cyc_max"] = adp_cyc_max
all_data["anyCalcCorrections"] = anyCorrections
all_data["adp_fluor_min"] = adp_fluor_min
all_data["adp_fluor_max"] = adp_fluor_max
all_data["mdp_tmp_min"] = mdp_tmp_min
all_data["mdp_tmp_max"] = mdp_tmp_max
all_data["mdp_fluor_min"] = mdp_fluor_min
all_data["mdp_fluor_max"] = mdp_fluor_max
all_data["max_data_len"] = max_data
all_data["max_partition_data_len"] = max_partition_data
return all_data
def setExclNote(self, vReact, vTar, vExcl, vNote):
"""Saves the note and excl string for one react/data combination.
Args:
self: The class self parameter.
vReact: The reaction id.
vTar: The target id.
vExcl: The exclusion string to save.
vNote: The note string to save.
Returns:
Nothing, updates RDML data.
"""
expParent = self._node.getparent()
rootPar = expParent.getparent()
ver = rootPar.get('version')
dataXMLelements = _getXMLDataType()
reacts = _get_all_children(self._node, "react")
for react in reacts:
if int(react.get('id')) == int(vReact):
react_datas = _get_all_children(react, "data")
for react_data in react_datas:
forId = _get_first_child(react_data, "tar")
if forId is not None:
if forId.attrib['id'] == vTar:
_change_subelement(react_data, "excl", dataXMLelements, vExcl, True, "string")
if ver == "1.3":
_change_subelement(react_data, "note", dataXMLelements, vNote, True, "string")
return
def webAppLinRegPCR(self, pcrEfficiencyExl=0.05, updateRDML=False, excludeNoPlateau=True, excludeEfficiency="outlier"):
"""Performs LinRegPCR on the run. Modifies the cq values and returns a json with additional data.
Args:
self: The class self parameter.
pcrEfficiencyExl: Exclude samples with an efficiency outside the given range (0.05).
updateRDML: If true, update the RDML data with the calculated values.
excludeNoPlateau: If true, samples without plateau are excluded from mean PCR efficiency calculation.
excludeEfficiency: Choose "outlier", "mean", "include" to exclude based on indiv PCR eff.
Returns:
A dictionary with the resulting data, presence and format depending on input.
rawData: A 2d array with the raw fluorescence values
baselineCorrectedData: A 2d array with the baseline corrected raw fluorescence values
resultsList: A 2d array object.
resultsCSV: A csv string.
"""
allData = self.getreactjson()
res = self.linRegPCR(pcrEfficiencyExl=pcrEfficiencyExl,
updateRDML=updateRDML,
excludeNoPlateau=excludeNoPlateau,
excludeEfficiency=excludeEfficiency,
saveRaw=False,
saveBaslineCorr=True,
saveResultsList=True,
saveResultsCSV=False,
verbose=False)
if "baselineCorrectedData" in res:
bas_cyc_max = len(res["baselineCorrectedData"][0]) - 5
bas_fluor_min = 99999999
bas_fluor_max = 0.0
for row in range(1, len(res["baselineCorrectedData"])):
bass_json = []
for col in range(5, len(res["baselineCorrectedData"][row])):
cyc = res["baselineCorrectedData"][0][col]
fluor = res["baselineCorrectedData"][row][col]
if not (np.isnan(fluor) or fluor <= 0.0):
bas_fluor_min = min(bas_fluor_min, float(fluor))
bas_fluor_max = max(bas_fluor_max, float(fluor))
in_bas = [cyc, fluor, ""]
bass_json.append(in_bas)
# Fixme do not loop over all, use sorted data and clever moving
for react in allData["reacts"]:
if react["id"] == res["baselineCorrectedData"][row][0]:
for data in react["datas"]:
if data["tar"] == res["baselineCorrectedData"][row][3]:
data["bass"] = list(bass_json)
allData["bas_cyc_max"] = bas_cyc_max
allData["bas_fluor_min"] = bas_fluor_min
allData["bas_fluor_max"] = bas_fluor_max
if "resultsList" in res:
header = res["resultsList"].pop(0)
resList = sorted(res["resultsList"], key=_sort_list_int)
for rRow in range(0, len(resList)):
for rCol in range(0, len(resList[rRow])):
if isinstance(resList[rRow][rCol], np.float64) and np.isnan(resList[rRow][rCol]):
resList[rRow][rCol] = ""
if isinstance(resList[rRow][rCol], float) and math.isnan(resList[rRow][rCol]):
resList[rRow][rCol] = ""
allData["LinRegPCR_Result_Table"] = json.dumps([header] + resList, cls=NpEncoder)
if "noRawData" in res:
allData["error"] = res["noRawData"]
return allData
def linRegPCR(self, pcrEfficiencyExl=0.05, updateRDML=False, excludeNoPlateau=True, excludeEfficiency="outlier",
commaConv=False, ignoreExclusion=False,
saveRaw=False, saveBaslineCorr=False, saveResultsList=False, saveResultsCSV=False,
timeRun=False, verbose=False):
"""Performs LinRegPCR on the run. Modifies the cq values and returns a json with additional data.
Args:
self: The class self parameter.
pcrEfficiencyExl: Exclude samples with an efficiency outside the given range (0.05).
updateRDML: If true, update the RDML data with the calculated values.
excludeNoPlateau: If true, samples without plateau are excluded from mean PCR efficiency calculation.
excludeEfficiency: Choose "outlier", "mean", "include" to exclude based on indiv PCR eff.
commaConv: If true, convert comma separator to dot.
ignoreExclusion: If true, ignore the RDML exclusion strings.
saveRaw: If true, no raw values are given in the returned data
saveBaslineCorr: If true, no baseline corrected values are given in the returned data
saveResultsList: If true, return a 2d array object.
saveResultsCSV: If true, return a csv string.
timeRun: If true, print runtime for baseline and total.
verbose: If true, comment every performed step.
Returns:
A dictionary with the resulting data, presence and format depending on input.
rawData: A 2d array with the raw fluorescence values
baselineCorrectedData: A 2d array with the baseline corrected raw fluorescence values
resultsList: A 2d array object.
resultsCSV: A csv string.
"""
expParent = self._node.getparent()
rootPar = expParent.getparent()
dataVersion = rootPar.get('version')
if dataVersion == "1.0":
raise RdmlError('LinRegPCR requires RDML version > 1.0.')
##############################
# Collect the data in arrays #
##############################
# res is a 2 dimensional array accessed only by
# variables, so columns might be added here
header = [["id", # 0
"well", # 1
"sample", # 2
"sample type", # 3
"sample nucleotide", # 4
"target", # 5
"target chemistry", # 6
"excluded", # 7
"note", # 8
"baseline", # 9
"lower limit", # 10
"upper limit", # 11
"common threshold", # 12
"group threshold", # 13
"n in log phase", # 14
"last log cycle", # 15
"n included", # 16
"log lin cycle", # 17
"log lin fluorescence", # 18
"indiv PCR eff", # 19
"R2", # 20
"N0 (indiv eff - for debug use)", # 21
"Cq (indiv eff - for debug use)", # 22
"Cq with group threshold (indiv eff - for debug use)", # 23
"mean PCR eff", # 24
"standard error of the mean PCR eff", # 25
"N0 (mean eff)", # 26
"Cq (mean eff)", # 27
"mean PCR eff - no plateau", # 28
"standard error of the mean PCR eff - no plateau", # 29
"N0 (mean eff) - no plateau", # 30
"Cq (mean eff) - no plateau", # 31
"mean PCR eff - mean efficiency", # 32
"standard error of the mean PCR eff - mean efficiency", # 33
"N0 (mean eff) - mean efficiency", # 34
"Cq (mean eff) - mean efficiency", # 35
"mean PCR eff - no plateau - mean efficiency", # 36
"standard error of the mean PCR eff - no plateau - mean efficiency", # 37
"N0 (mean eff) - no plateau - mean efficiency", # 38
"Cq (mean eff) - no plateau - mean efficiency", # 39
"mean PCR eff - stat efficiency", # 40
"standard error of the mean PCR eff - stat efficiency", # 41
"N0 (mean eff) - stat efficiency", # 42
"Cq (mean eff) - stat efficiency", # 43
"mean PCR eff - no plateau - stat efficiency", # 44
"standard error of the stat PCR eff - no plateau - stat efficiency", # 45
"N0 (mean eff) - no plateau - stat efficiency", # 46
"Cq (mean eff) - no plateau - stat efficiency", # 47
"amplification", # 48
"baseline error", # 49
"plateau", # 50
"noisy sample", # 51
"PCR efficiency outside mean rage", # 52
"PCR efficiency outside mean rage - no plateau", # 53
"PCR efficiency outlier", # 54
"PCR efficiency outlier - no plateau", # 55
"short log lin phase", # 56
"Cq is shifting", # 57
"too low Cq eff", # 58
"too low Cq N0", # 59
"used for W-o-L setting"]] # 60
rar_id = 0
rar_well = 1
rar_sample = 2
rar_sample_type = 3
rar_sample_nucleotide = 4
rar_tar = 5
rar_tar_chemistry = 6
rar_excl = 7
rar_note = 8
rar_baseline = 9
rar_lower_limit = 10
rar_upper_limit = 11
rar_threshold_common = 12
rar_threshold_group = 13
rar_n_log = 14
rar_stop_log = 15
rar_n_included = 16
rar_log_lin_cycle = 17
rar_log_lin_fluorescence = 18
rar_indiv_PCR_eff = 19
rar_R2 = 20
rar_N0_indiv_eff = 21
rar_Cq_common = 22
rar_Cq_grp = 23
rar_meanEff_Skip = 24
rar_stdEff_Skip = 25
rar_meanN0_Skip = 26
rar_Cq_Skip = 27
rar_meanEff_Skip_Plat = 28
rar_stdEff_Skip_Plat = 29
rar_meanN0_Skip_Plat = 30
rar_Cq_Skip_Plat = 31
rar_meanEff_Skip_Mean = 32
rar_stdEff_Skip_Mean = 33
rar_meanN0_Skip_Mean = 34
rar_Cq_Skip_Mean = 35
rar_meanEff_Skip_Plat_Mean = 36
rar_stdEff_Skip_Plat_Mean = 37
rar_meanN0_Skip_Plat_Mean = 38
rar_Cq_Skip_Plat_Mean = 39
rar_meanEff_Skip_Out = 40
rar_stdEff_Skip_Out = 41
rar_meanN0_Skip_Out = 42
rar_Cq_Skip_Out = 43
rar_meanEff_Skip_Plat_Out = 44
rar_stdEff_Skip_Plat_Out = 45
rar_meanN0_Skip_Plat_Out = 46
rar_Cq_Skip_Plat_Out = 47
rar_amplification = 48
rar_baseline_error = 49
rar_plateau = 50
rar_noisy_sample = 51
rar_effOutlier_Skip_Mean = 52
rar_effOutlier_Skip_Plat_Mean = 53
rar_effOutlier_Skip_Out = 54
rar_effOutlier_Skip_Plat_Out = 55
rar_shortLogLinPhase = 56
rar_CqIsShifting = 57
rar_tooLowCqEff = 58
rar_tooLowCqN0 = 59
rar_isUsedInWoL = 60
res = []
finalData = {}
adp_cyc_max = 0
pcrEfficiencyExl = float(pcrEfficiencyExl)
if excludeEfficiency not in ["outlier", "mean", "include"]:
excludeEfficiency = "outlier"
reacts = _get_all_children(self._node, "react")
# First get the max number of cycles and create the numpy array
for react in reacts:
react_datas = _get_all_children(react, "data")
for react_data in react_datas:
adps = _get_all_children(react_data, "adp")
for adp in adps:
cyc = _get_first_child_text(adp, "cyc")
adp_cyc_max = max(adp_cyc_max, float(cyc))
adp_cyc_max = math.ceil(adp_cyc_max)
# spFl is the shape for all fluorescence numpy data arrays
spFl = (len(reacts), int(adp_cyc_max))
rawFluor = np.zeros(spFl, dtype=np.float64)
rawFluor[rawFluor <= 0.00000001] = np.nan
# Create a matrix with the cycle for each rawFluor value
vecCycles = np.tile(np.arange(1, (spFl[1] + 1), dtype=np.int64), (spFl[0], 1))
# Initialization of the vecNoAmplification vector
vecExcludedByUser = np.zeros(spFl[0], dtype=np.bool_)
rdmlElemData = []
# Now process the data for numpy and create results array
rowCount = 0
for react in reacts:
posId = react.get('id')
pIdNumber = (int(posId) - 1) % int(self["pcrFormat_columns"]) + 1
pIdLetter = chr(ord("A") + int((int(posId) - 1) / int(self["pcrFormat_columns"])))
pWell = pIdLetter + str(pIdNumber)
sample = ""
forId = _get_first_child(react, "sample")
if forId is not None:
if forId.attrib['id'] != "":
sample = forId.attrib['id']
react_datas = _get_all_children(react, "data")
for react_data in react_datas:
forId = _get_first_child(react_data, "tar")
target = ""
if forId is not None:
if forId.attrib['id'] != "":
target = forId.attrib['id']
if ignoreExclusion:
excl = ""
else:
excl = _get_first_child_text(react_data, "excl")
excl = _cleanErrorString(excl, "amp")
excl = re.sub(r'^;|;$', '', excl)
if not excl == "":
vecExcludedByUser[rowCount] = True
noteVal = _get_first_child_text(react_data, "note")
noteVal = _cleanErrorString(noteVal, "amp")
noteVal = re.sub(r'^;|;$', '', noteVal)
rdmlElemData.append(react_data)
res.append([posId, pWell, sample, "", "", target, "", excl, noteVal, "",
"", "", "", "", "", "", "", "", "", "",
"", "", "", "", "", "", "", "", "", "",
"", "", "", "", "", "", "", "", "", "",
"", "", "", "", "", "", "", "", "", "",
"", "", "", "", "", "", "", "", "", "",
""]) # Must match header length
adps = _get_all_children(react_data, "adp")
for adp in adps:
cyc = int(math.ceil(float(_get_first_child_text(adp, "cyc")))) - 1
fluor = _get_first_child_text(adp, "fluor")
if commaConv:
noDot = fluor.replace(".", "")
fluor = noDot.replace(",", ".")
rawFluor[rowCount, cyc] = float(fluor)
rowCount += 1
# Look up sample and target information
parExp = self._node.getparent()
parRoot = parExp.getparent()
dicLU_dyes = {}
luDyes = _get_all_children(parRoot, "dye")
for lu_dye in luDyes:
lu_chemistry = _get_first_child_text(lu_dye, "dyeChemistry")
if lu_chemistry == "":
lu_chemistry = "non-saturating DNA binding dye"
if lu_dye.attrib['id'] != "":
dicLU_dyes[lu_dye.attrib['id']] = lu_chemistry
dicLU_targets = {}
luTargets = _get_all_children(parRoot, "target")
for lu_target in luTargets:
forId = _get_first_child(lu_target, "dyeId")
lu_dyeId = ""
if forId is not None:
if forId.attrib['id'] != "":
lu_dyeId = forId.attrib['id']
if lu_dyeId == "" or lu_dyeId not in dicLU_dyes:
dicLU_targets[lu_target.attrib['id']] = "non-saturating DNA binding dye"
if lu_target.attrib['id'] != "":
dicLU_targets[lu_target.attrib['id']] = dicLU_dyes[lu_dyeId]
dicLU_samSpecType = {}
dicLU_samGenType = {}
dicLU_samNucl = {}
luSamples = _get_all_children(parRoot, "sample")
for lu_sample in luSamples:
lu_Nucl = ""
forUnit = _get_first_child(lu_sample, "templateQuantity")
if forUnit is not None:
lu_Nucl = _get_first_child_text(forUnit, "nucleotide")
if lu_Nucl == "":
lu_Nucl = "cDNA"
if lu_sample.attrib['id'] != "":
dicLU_TypeData = {}
typesList = _get_all_children(lu_sample, "type")
for node in typesList:
if "targetId" in node.attrib:
dicLU_TypeData[node.attrib["targetId"]] = node.text
else:
dicLU_samGenType[lu_sample.attrib['id']] = node.text
dicLU_samSpecType[lu_sample.attrib['id']] = dicLU_TypeData
dicLU_samNucl[lu_sample.attrib['id']] = lu_Nucl
# Update the table with dictionary help
for oRow in range(0, spFl[0]):
if res[oRow][rar_sample] != "":
# Try to get specific type information else general else "unkn"
if res[oRow][rar_tar] in dicLU_samSpecType[res[oRow][rar_sample]]:
res[oRow][rar_sample_type] = dicLU_samSpecType[res[oRow][rar_sample]][res[oRow][rar_tar]]
elif res[oRow][rar_sample] in dicLU_samGenType:
res[oRow][rar_sample_type] = dicLU_samGenType[res[oRow][rar_sample]]
else:
res[oRow][rar_sample_type] = "unkn"
res[oRow][rar_sample_nucleotide] = dicLU_samNucl[res[oRow][rar_sample]]
if res[oRow][rar_tar] != "":
res[oRow][rar_tar_chemistry] = dicLU_targets[res[oRow][rar_tar]]
if saveRaw:
rawTable = [[header[0][rar_id], header[0][rar_well], header[0][rar_sample], header[0][rar_tar], header[0][rar_excl]]]
for oCol in range(0, spFl[1]):
rawTable[0].append(oCol + 1)
for oRow in range(0, spFl[0]):
rawTable.append([res[oRow][rar_id], res[oRow][rar_well], res[oRow][rar_sample], res[oRow][rar_tar], res[oRow][rar_excl]])
for oCol in range(0, spFl[1]):
rawTable[oRow + 1].append(float(rawFluor[oRow, oCol]))
finalData["rawData"] = rawTable
# Count the targets and create the target variables
# Position 0 is for the general over all window without targets
vecTarget = np.zeros(spFl[0], dtype=np.int64)
vecTarget[vecTarget <= 0] = -1
targetsCount = 1
tarWinLookup = {}
for oRow in range(0, spFl[0]):
if res[oRow][rar_tar] not in tarWinLookup:
tarWinLookup[res[oRow][rar_tar]] = targetsCount
targetsCount += 1
vecTarget[oRow] = tarWinLookup[res[oRow][rar_tar]]
upWin = np.zeros(targetsCount, dtype=np.float64)
lowWin = np.zeros(targetsCount, dtype=np.float64)
threshold = np.ones(targetsCount, dtype=np.float64)
# Initialization of the error vectors
vecNoAmplification = np.zeros(spFl[0], dtype=np.bool_)
vecBaselineError = np.zeros(spFl[0], dtype=np.bool_)
vecNoPlateau = np.zeros(spFl[0], dtype=np.bool_)
vecNoisySample = np.zeros(spFl[0], dtype=np.bool_)
vecSkipSample = np.zeros(spFl[0], dtype=np.bool_)
vecShortLogLin = np.zeros(spFl[0], dtype=np.bool_)
vecCtIsShifting = np.zeros(spFl[0], dtype=np.bool_)
vecIsUsedInWoL = np.zeros(spFl[0], dtype=np.bool_)
vecEffOutlier_Skip_Mean = np.zeros(spFl[0], dtype=np.bool_)
vecEffOutlier_Skip_Plat_Mean = np.zeros(spFl[0], dtype=np.bool_)
vecEffOutlier_Skip_Out = np.zeros(spFl[0], dtype=np.bool_)
vecEffOutlier_Skip_Plat_Out = np.zeros(spFl[0], dtype=np.bool_)
vecTooLowCqEff = np.zeros(spFl[0], dtype=np.bool_)
vecTooLowCqN0 = np.zeros(spFl[0], dtype=np.bool_)
# Start and stop cycles of the log lin phase
stopCyc = np.zeros(spFl[0], dtype=np.int64)
startCyc = np.zeros(spFl[0], dtype=np.int64)
startCycFix = np.zeros(spFl[0], dtype=np.int64)
# Initialization of the PCR efficiency vectors
pcrEff = np.ones(spFl[0], dtype=np.float64)
nNulls = np.ones(spFl[0], dtype=np.float64)
nInclu = np.zeros(spFl[0], dtype=np.int64)
correl = np.zeros(spFl[0], dtype=np.float64)
meanEff_Skip = np.zeros(spFl[0], dtype=np.float64)
meanEff_Skip_Plat = np.zeros(spFl[0], dtype=np.float64)
meanEff_Skip_Mean = np.zeros(spFl[0], dtype=np.float64)
meanEff_Skip_Plat_Mean = np.zeros(spFl[0], dtype=np.float64)
meanEff_Skip_Out = np.zeros(spFl[0], dtype=np.float64)
meanEff_Skip_Plat_Out = np.zeros(spFl[0], dtype=np.float64)
stdEff_Skip = np.zeros(spFl[0], dtype=np.float64)
stdEff_Skip_Plat = np.zeros(spFl[0], dtype=np.float64)
stdEff_Skip_Mean = np.zeros(spFl[0], dtype=np.float64)
stdEff_Skip_Plat_Mean = np.zeros(spFl[0], dtype=np.float64)
stdEff_Skip_Out = np.zeros(spFl[0], dtype=np.float64)
stdEff_Skip_Plat_Out = np.zeros(spFl[0], dtype=np.float64)
indMeanX = np.zeros(spFl[0], dtype=np.float64)
indMeanY = np.zeros(spFl[0], dtype=np.float64)
indivCq = np.zeros(spFl[0], dtype=np.float64)
indivCq_Grp = np.zeros(spFl[0], dtype=np.float64)
meanNnull_Skip = np.zeros(spFl[0], dtype=np.float64)
meanNnull_Skip_Plat = np.zeros(spFl[0], dtype=np.float64)
meanNnull_Skip_Mean = np.zeros(spFl[0], dtype=np.float64)
meanNnull_Skip_Plat_Mean = np.zeros(spFl[0], dtype=np.float64)
meanNnull_Skip_Out = np.zeros(spFl[0], dtype=np.float64)
meanNnull_Skip_Plat_Out = np.zeros(spFl[0], dtype=np.float64)
meanCq_Skip = np.zeros(spFl[0], dtype=np.float64)
meanCq_Skip_Plat = np.zeros(spFl[0], dtype=np.float64)
meanCq_Skip_Mean = np.zeros(spFl[0], dtype=np.float64)
meanCq_Skip_Plat_Mean = np.zeros(spFl[0], dtype=np.float64)
meanCq_Skip_Out = np.zeros(spFl[0], dtype=np.float64)
meanCq_Skip_Plat_Out = np.zeros(spFl[0], dtype=np.float64)
# Set all to nan
indMeanX[:] = np.nan
indMeanY[:] = np.nan
indivCq[:] = np.nan
indivCq_Grp[:] = np.nan
meanNnull_Skip[:] = np.nan
meanNnull_Skip_Plat[:] = np.nan
meanNnull_Skip_Mean[:] = np.nan
meanNnull_Skip_Plat_Mean[:] = np.nan
meanNnull_Skip_Out[:] = np.nan
meanNnull_Skip_Plat_Out[:] = np.nan
meanCq_Skip[:] = np.nan
meanCq_Skip_Plat[:] = np.nan
meanCq_Skip_Mean[:] = np.nan
meanCq_Skip_Plat_Mean[:] = np.nan
meanCq_Skip_Out[:] = np.nan
meanCq_Skip_Plat_Out[:] = np.nan
# Basic Variables
pointsInWoL = 4
baseCorFluor = rawFluor.copy()
########################
# Baseline correction #
########################
start_time = datetime.datetime.now()
###########################################################################
# First quality check : Is there enough amplification during the reaction #
###########################################################################
# Slope calculation per react/target - the intercept is never used for now
rawMod = rawFluor.copy()
# There should be no negative values in uncorrected raw data
absMinFluor = np.nanmin(rawMod)
if absMinFluor < 0.0:
finalData["noRawData"] = "Error: Fluorescence data have negative values. Use raw data without baseline correction!"
rawMod[np.isnan(rawMod)] = 0
rawMod[rawMod <= 0.00000001] = np.nan
[slopeAmp, _unused] = _lrp_linReg(vecCycles, np.log10(rawMod))
# Calculate the minimum of fluorescence values per react/target, store it as background
# and substract it from the raw fluorescence values
vecMinFluor = np.nanmin(rawMod, axis=1)
vecBackground = 0.99 * vecMinFluor
vecDefBackgrd = vecBackground.copy()
minCorFluor = rawMod - vecBackground[:, np.newaxis]
minCorFluor[np.isnan(minCorFluor)] = 0
minCorFluor[minCorFluor <= 0.00000001] = np.nan
minFluCount = np.ones(minCorFluor.shape, dtype=np.int64)
minFluCount[np.isnan(minCorFluor)] = 0
minFluCountSum = np.sum(minFluCount, axis=1)
[minSlopeAmp, _unused] = _lrp_linReg(vecCycles, np.log10(minCorFluor))
for oRow in range(0, spFl[0]):
# Check to detect the negative slopes and the PCR reactions that have an
# amplification less than seven the minimum fluorescence
if slopeAmp[oRow] < 0 or minSlopeAmp[oRow] < (np.log10(7.0) / minFluCountSum[oRow]):
vecNoAmplification[oRow] = True
# Get the right positions ignoring nan values
posCount = 0
posZero = 0
posOne = 0
posEight = 0
posNine = 0
for realPos in range(0, spFl[1]):
if not np.isnan(minCorFluor[oRow, realPos]):
if posCount == 0:
posZero = realPos
if posCount == 1:
posOne = realPos
if posCount == 8:
posEight = realPos
if posCount == 9:
posNine = realPos
if posCount > 9:
break
posCount += 1
# There must be an increase in fluorescence after the amplification.
if ((minCorFluor[oRow, posEight] + minCorFluor[oRow, posNine]) / 2) / \
((minCorFluor[oRow, posZero] + minCorFluor[oRow, posOne]) / 2) < 1.2:
if minCorFluor[oRow, -1] / np.nanmean(minCorFluor[oRow, posZero:posNine + 1]) < 7:
vecNoAmplification[oRow] = True
if not vecNoAmplification[oRow]:
stopCyc[oRow] = _lrp_findStopCyc(minCorFluor, oRow)
[startCyc[oRow], startCycFix[oRow]] = _lrp_findStartCyc(minCorFluor, oRow, stopCyc[oRow])
else:
vecSkipSample[oRow] = True
stopCyc[oRow] = minCorFluor.shape[1]
startCyc[oRow] = 1
startCycFix[oRow] = 1
# Get the positions ignoring nan values
posCount = 0
posMinOne = 0
posMinTwo = 0
for realPos in range(stopCyc[oRow] - 2, 0, -1):
if not np.isnan(minCorFluor[oRow, realPos - 1]):
if posCount == 0:
posMinOne = realPos + 1
if posCount > 0:
posMinTwo = realPos + 1
break
posCount += 1
if not (minCorFluor[oRow, stopCyc[oRow] - 1] > minCorFluor[oRow, posMinOne - 1] > minCorFluor[oRow, posMinTwo - 1]):
vecNoAmplification[oRow] = True
vecSkipSample[oRow] = True
if vecNoAmplification[oRow] or vecBaselineError[oRow] or stopCyc[oRow] == minCorFluor.shape[1]:
vecNoPlateau[oRow] = True
# Set an initial window already for WOL calculation
lastCycMeanMax = _lrp_lastCycMeanMax(minCorFluor, vecSkipSample, vecNoPlateau)
upWin[0] = 0.1 * lastCycMeanMax
lowWin[0] = 0.1 * lastCycMeanMax / 16.0
##################################################
# Main loop : Calculation of the baseline values #
##################################################
# The for loop go through all the react/target table and make calculations one by one
for oRow in range(0, spFl[0]):
if verbose:
print('React: ' + str(oRow))
# If there is a "no amplification" error, there is no baseline value calculated and it is automatically the
# minimum fluorescence value assigned as baseline value for the considered reaction :
if not vecNoAmplification[oRow]:
# Make sure baseline is overestimated, without using slope criterion
# increase baseline per cycle till eff > 2 or remaining log lin points < pointsInWoL
# fastest when vecBackground is directly set to 5 point below stopCyc
start = stopCyc[oRow]
# Find the first value that is not NaN
firstNotNaN = 1 # Cycles so +1 to array
while np.isnan(baseCorFluor[oRow, firstNotNaN - 1]) and firstNotNaN < stopCyc[oRow]:
firstNotNaN += 1
subtrCount = 5
while subtrCount > 0 and start > firstNotNaN:
start -= 1
if not np.isnan(rawFluor[oRow, start - 1]):
subtrCount -= 1
vecBackground[oRow] = 0.99 * rawFluor[oRow, start - 1]
baseCorFluor[oRow] = rawFluor[oRow] - vecBackground[oRow]
baseCorFluor[np.isnan(baseCorFluor)] = 0
baseCorFluor[baseCorFluor <= 0.00000001] = np.nan
# baseline is now certainly too high
# 1. extend line downwards from stopCyc[] till slopeLow < slopeHigh of vecBackground[] < vecMinFluor[]
countTrials = 0
slopeHigh = 0.0
slopeLow = 0.0
while True:
countTrials += 1
stopCyc[oRow] = _lrp_findStopCyc(baseCorFluor, oRow)
[startCyc[oRow], startCycFix[oRow]] = _lrp_findStartCyc(baseCorFluor, oRow, stopCyc[oRow])
if stopCyc[oRow] - startCycFix[oRow] > 0:
# Calculate a slope for the upper and the lower half between startCycFix and stopCyc
[slopeLow, slopeHigh] = _lrp_testSlopes(baseCorFluor, oRow, stopCyc, startCycFix)
vecDefBackgrd[oRow] = vecBackground[oRow]
else:
break
if slopeLow >= slopeHigh:
vecBackground[oRow] *= 0.99
baseCorFluor[oRow] = rawFluor[oRow] - vecBackground[oRow]
baseCorFluor[np.isnan(baseCorFluor)] = 0
baseCorFluor[baseCorFluor <= 0.00000001] = np.nan
if (slopeLow < slopeHigh or
vecBackground[oRow] < 0.95 * vecMinFluor[oRow] or
countTrials > 1000):
break
if vecBackground[oRow] < 0.95 * vecMinFluor[oRow]:
vecBaselineError[oRow] = True
# 2. fine tune slope of total line
stepVal = 0.005 * vecBackground[oRow]
baseStep = 1.0
countTrials = 0
trialsToShift = 0
curSlopeDiff = 10
curSignDiff = 0
SlopeHasShifted = False
while True:
countTrials += 1
trialsToShift += 1
if trialsToShift > 10 and not SlopeHasShifted:
baseStep *= 2
trialsToShift = 0
lastSignDiff = curSignDiff
lastSlopeDiff = curSlopeDiff
vecDefBackgrd[oRow] = vecBackground[oRow]
# apply baseline
baseCorFluor[oRow] = rawFluor[oRow] - vecBackground[oRow]
baseCorFluor[np.isnan(baseCorFluor)] = 0
baseCorFluor[baseCorFluor <= 0.00000001] = np.nan
# find start and stop of log lin phase
stopCyc[oRow] = _lrp_findStopCyc(baseCorFluor, oRow)
[startCyc[oRow], startCycFix[oRow]] = _lrp_findStartCyc(baseCorFluor, oRow, stopCyc[oRow])
if stopCyc[oRow] - startCycFix[oRow] > 0:
[slopeLow, slopeHigh] = _lrp_testSlopes(baseCorFluor, oRow, stopCyc, startCycFix)
curSlopeDiff = np.abs(slopeLow - slopeHigh)
if (slopeLow - slopeHigh) > 0.0:
curSignDiff = 1
else:
curSignDiff = -1
# start with baseline that is too low: slopeLow is low
if slopeLow < slopeHigh:
# increase baseline
vecBackground[oRow] += baseStep * stepVal
else:
# crossed right baseline
# go two steps back
vecBackground[oRow] -= baseStep * stepVal * 2
# decrease stepsize
baseStep /= 2
SlopeHasShifted = True
else:
break
if (((np.abs(curSlopeDiff - lastSlopeDiff) < 0.00001) and
(curSignDiff == lastSignDiff) and SlopeHasShifted) or
(np.abs(curSlopeDiff) < 0.0001) or
(countTrials > 1000)):
break
# reinstate samples that reach the slope diff criterion within 0.9 * vecMinFluor
if curSlopeDiff < 0.0001 and vecDefBackgrd[oRow] > 0.9 * vecMinFluor[oRow]:
vecBaselineError[oRow] = False
# 3: skip sample when fluor[stopCyc]/fluor[startCyc] < 20
loglinlen = 20.0 # RelaxLogLinLengthRG in Pascal may choose 10.0
if baseCorFluor[oRow, stopCyc[oRow] - 1] / baseCorFluor[oRow, startCycFix[oRow] - 1] < loglinlen:
vecShortLogLin[oRow] = True
pcrEff[oRow] = np.power(10, slopeHigh)
else:
vecSkipSample[oRow] = True
vecDefBackgrd[oRow] = 0.99 * vecMinFluor[oRow]
baseCorFluor[oRow] = rawFluor[oRow] - vecDefBackgrd[oRow]
baseCorFluor[np.isnan(baseCorFluor)] = 0
baseCorFluor[baseCorFluor <= 0.00000001] = np.nan
# This values are used for the table
stopCyc[oRow] = spFl[1]
startCyc[oRow] = spFl[1] + 1
startCycFix[oRow] = spFl[1] + 1
pcrEff[oRow] = np.nan
if vecBaselineError[oRow]:
vecSkipSample[oRow] = True
vecBackground = vecDefBackgrd
baselineCorrectedData = baseCorFluor
# Check if cq values are stable with a modified baseline
checkFluor = np.zeros(spFl, dtype=np.float64)
[meanPcrEff, _unused] = _lrp_meanPcrEff(None, [], pcrEff, vecSkipSample, vecNoPlateau, vecShortLogLin)
# The baseline is only used for this check
checkBaseline = np.log10(upWin[0]) - np.log10(meanPcrEff)
for oRow in range(0, spFl[0]):
if vecShortLogLin[oRow] and not vecNoAmplification[oRow]:
# Recalculate it separately from the good values
checkFluor[oRow] = rawFluor[oRow] - 1.05 * vecBackground[oRow]
checkFluor[np.isnan(checkFluor)] = 0.0
checkFluor[checkFluor <= 0.00000001] = np.nan
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=RuntimeWarning)
maxFlour = np.nanmax(checkFluor)
if np.isnan(maxFlour):
tempMeanX, tempMeanY, tempPcrEff, _unused, _unused2, _unused3 = _lrp_paramInWindow(baseCorFluor,
oRow,
upWin[0],
lowWin[0])
else:
tempMeanX, tempMeanY, tempPcrEff, _unused, _unused2, _unused3 = _lrp_paramInWindow(checkFluor,
oRow,
upWin[0],
lowWin[0])
if tempPcrEff > 1.000000000001:
CtShiftUp = tempMeanX + (checkBaseline - tempMeanY) / np.log10(tempPcrEff)
else:
CtShiftUp = 0.0
checkFluor[oRow] = rawFluor[oRow] - 0.95 * vecBackground[oRow]
checkFluor[np.isnan(checkFluor)] = 0
checkFluor[checkFluor <= 0.00000001] = np.nan
tempMeanX, tempMeanY, tempPcrEff, _unused, _unused2, _unused3 = _lrp_paramInWindow(checkFluor,
oRow,
upWin[0],
lowWin[0])
if tempPcrEff > 1.000000000001:
CtShiftDown = tempMeanX + (checkBaseline - tempMeanY) / np.log10(tempPcrEff)
else:
CtShiftDown = 0.0
if np.abs(CtShiftUp - CtShiftDown) > 1.0:
vecBaselineError[oRow] = True
vecSkipSample[oRow] = True
vecCtIsShifting[oRow] = True
else:
if not vecBaselineError[oRow]:
vecSkipSample[oRow] = False
vecSkipSample[vecExcludedByUser] = True
# Update the window
lastCycMeanMax = _lrp_lastCycMeanMax(baseCorFluor, vecSkipSample, vecNoPlateau)
upWin[0] = 0.1 * lastCycMeanMax
lowWin[0] = 0.1 * lastCycMeanMax / 16.0
maxFluorTotal = np.nanmax(baseCorFluor)
minFluorTotal = np.nanmin(baseCorFluor)
if minFluorTotal < maxFluorTotal / 10000:
minFluorTotal = maxFluorTotal / 10000
# Fixme: Per group
# CheckNoisiness
skipGroup = False
maxLim = _lrp_meanStopFluor(baseCorFluor, None, None, stopCyc, vecSkipSample, vecNoPlateau)
if maxLim > 0.0:
maxLim = np.log10(maxLim)
else:
skipGroup = True
checkMeanEff = 1.0
if not skipGroup:
step = pointsInWoL * _lrp_logStepStop(baseCorFluor, None, [], stopCyc, vecSkipSample, vecNoPlateau)
upWin, lowWin = _lrp_setLogWin(None, maxLim, step, upWin, lowWin, maxFluorTotal, minFluorTotal)
# checkBaseline = np.log10(0.5 * np.round(1000 * np.power(10, upWin[0])) / 1000)
_unused, _unused2, tempPcrEff, _unused3, _unused4, _unused5 = _lrp_allParamInWindow(baseCorFluor,
None, [],
indMeanX, indMeanY,
pcrEff, nNulls,
nInclu, correl,
upWin, lowWin,
vecNoAmplification,
vecBaselineError)
checkMeanEff, _unused = _lrp_meanPcrEff(None, [], tempPcrEff, vecSkipSample, vecNoPlateau, vecShortLogLin)
if checkMeanEff < 1.001:
skipGroup = True
if not skipGroup:
foldWidth = np.log10(np.power(checkMeanEff, pointsInWoL))
upWin, lowWin = _lrp_setLogWin(None, maxLim, foldWidth, upWin, lowWin, maxFluorTotal, minFluorTotal)
# compare to Log(1.01*lowLim) to compensate for
# the truncation in cuplimedit with + 0.0043
lowLim = maxLim - foldWidth + 0.0043
for oRow in range(0, spFl[0]):
if not vecSkipSample[oRow]:
startWinCyc, stopWinCyc, _unused = _lrp_startStopInWindow(baseCorFluor, oRow, upWin[0], lowWin[0])
minStartCyc = startWinCyc - 1
# Handle possible NaN
while np.isnan(baseCorFluor[oRow, minStartCyc - 1]) and minStartCyc > 1:
minStartCyc -= 1
minStopCyc = stopWinCyc - 1
while np.isnan(baseCorFluor[oRow, minStopCyc - 1]) and minStopCyc > 2:
minStopCyc -= 1
minStartFlour = baseCorFluor[oRow, minStartCyc - 1]
if np.isnan(minStartFlour):
minStartFlour = 0.00001
startStep = np.log10(baseCorFluor[oRow, startWinCyc - 1]) - np.log10(minStartFlour)
stopStep = np.log10(baseCorFluor[oRow, stopWinCyc - 1]) - np.log10(baseCorFluor[oRow, minStopCyc - 1])
if (np.log10(minStartFlour) > lowLim and not
((minStartFlour < baseCorFluor[oRow, startWinCyc - 1] and startStep < 1.2 * stopStep) or
(startWinCyc - minStartCyc > 1.2))):
vecNoisySample[oRow] = True
vecSkipSample[oRow] = True
if saveBaslineCorr:
rawTable = [[header[0][rar_id], header[0][rar_well], header[0][rar_sample], header[0][rar_tar], header[0][rar_excl]]]
for oCol in range(0, spFl[1]):
rawTable[0].append(oCol + 1)
for oRow in range(0, spFl[0]):
rawTable.append([res[oRow][rar_id], res[oRow][rar_well], res[oRow][rar_sample], res[oRow][rar_tar], res[oRow][rar_excl]])
for oCol in range(0, spFl[1]):
rawTable[oRow + 1].append(float(baselineCorrectedData[oRow, oCol]))
finalData["baselineCorrectedData"] = rawTable
if timeRun:
stop_time = datetime.datetime.now() - start_time
print("Done Baseline: " + str(stop_time) + "sec")
###########################################################
# Calculation of the Window of Linearity (WOL) per target #
###########################################################
# Set a starting window for all groups
for tar in range(1, targetsCount):
upWin[tar] = upWin[0]
lowWin[tar] = lowWin[0]
for oRow in range(0, spFl[0]):
if vecNoAmplification[oRow] or vecBaselineError[oRow] or stopCyc[oRow] == spFl[1]:
vecNoPlateau[oRow] = True
else:
vecNoPlateau[oRow] = False
for tar in range(1, targetsCount):
indMeanX, indMeanY, pcrEff, nNulls, nInclu, correl, upWin, lowWin, threshold, vecIsUsedInWoL = _lrp_setWoL(baseCorFluor, tar, vecTarget, pointsInWoL,
indMeanX, indMeanY, pcrEff, nNulls, nInclu,
correl, upWin, lowWin, maxFluorTotal,
minFluorTotal, stopCyc, startCyc, threshold,
vecNoAmplification, vecBaselineError,
vecSkipSample, vecNoPlateau, vecShortLogLin,
vecIsUsedInWoL)
indMeanX, indMeanY, pcrEff, nNulls, nInclu, correl, upWin, lowWin, threshold, vecIsUsedInWoL, vecNoPlateau = _lrp_assignNoPlateau(baseCorFluor, tar, vecTarget,
pointsInWoL, indMeanX, indMeanY,
pcrEff, nNulls, nInclu, correl,
upWin, lowWin, maxFluorTotal,
minFluorTotal, stopCyc, startCyc,
threshold, vecNoAmplification,
vecBaselineError, vecSkipSample,
vecNoPlateau, vecShortLogLin,
vecIsUsedInWoL)
# Median values calculation
vecSkipSample_Plat = vecSkipSample.copy()
vecSkipSample_Plat[vecNoPlateau] = True
logThreshold = np.log10(threshold[1:])
threshold[0] = np.power(10, np.mean(logThreshold))
# Create the warnings for the different chemistries
# Chem Arr 0 1 2 3 4 5 6 7 8 9 10
critCqEff = [28.0, 28.0, 19.0, 16.0, 14.0, 12.0, 11.0, 11.0, 10.0, 10.0, 9.0] # For error Eff < 0.01
critCqN0 = [40.0, 40.0, 27.0, 19.0, 16.0, 13.0, 12.0, 11.0, 10.0, 9.0, 9.0] # For bias N0 < 0.95
for oRow in range(0, spFl[0]):
if res[oRow][rar_tar_chemistry] in ["hydrolysis probe", "labelled reverse primer", "DNA-zyme probe"]:
critCqOffset = 0.0
if (res[oRow][rar_tar_chemistry] == "labelled reverse primer" and
res[oRow][rar_sample_nucleotide] in ["DNA", "genomic DNA"]):
critCqOffset = 1.0
if (res[oRow][rar_tar_chemistry] == "DNA-zyme probe" and
res[oRow][rar_sample_nucleotide] in ["DNA", "genomic DNA"]):
critCqOffset = 4.0
if (res[oRow][rar_tar_chemistry] == "DNA-zyme probe" and
res[oRow][rar_sample_nucleotide] in ["cDNA", "RNA"]):
critCqOffset = 6.0
if (not np.isnan(pcrEff[oRow]) and pcrEff[oRow] > 1.0001 and
threshold[vecTarget[oRow]] > 0.0001 and not (vecNoAmplification[oRow] or vecBaselineError[oRow])):
effIndex = int(np.trunc(10 * pcrEff[oRow] + 1 - 10))
if effIndex < 0:
effIndex = 0
if effIndex > 10:
effIndex = 10
tempCq_Grp = indMeanX[oRow] + (np.log10(threshold[0]) - indMeanY[oRow]) / np.log10(pcrEff[oRow])
if tempCq_Grp > 0.0:
if tempCq_Grp < (critCqEff[effIndex] + critCqOffset):
vecTooLowCqEff[oRow] = True
if tempCq_Grp < (critCqN0[effIndex] + critCqOffset):
vecTooLowCqN0[oRow] = True
pcreff_NoNaN = pcrEff.copy()
pcreff_NoNaN[np.isnan(pcrEff)] = 0.0
for tar in range(1, targetsCount):
# Calculating all choices takes less time then to recalculate
pcreff_Skip = pcrEff.copy()
pcreff_Skip[vecTooLowCqEff] = np.nan
pcreff_Skip[vecSkipSample] = np.nan
pcreff_Skip[pcreff_NoNaN < 1.001] = np.nan
pcreff_Skip[~(vecTarget == tar)] = np.nan
pcreff_Skip_Plat = pcreff_Skip.copy()
pcreff_Skip_Plat[vecSkipSample_Plat] = np.nan
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=RuntimeWarning)
pcreffMedian_Skip = np.nanmedian(pcreff_Skip)
pcreffMedian_Skip_Plat = np.nanmedian(pcreff_Skip_Plat)
for oRow in range(0, spFl[0]):
if tar == vecTarget[oRow]:
if not np.isnan(pcrEff[oRow]):
if (np.isnan(pcreffMedian_Skip) or
not (pcreffMedian_Skip - pcrEfficiencyExl <= pcrEff[oRow] <= pcreffMedian_Skip + pcrEfficiencyExl)):
vecEffOutlier_Skip_Mean[oRow] = True
if (np.isnan(pcreffMedian_Skip_Plat) or
not (pcreffMedian_Skip_Plat - pcrEfficiencyExl <= pcrEff[oRow] <= pcreffMedian_Skip_Plat + pcrEfficiencyExl)):
vecEffOutlier_Skip_Plat_Mean[oRow] = True
pcreff_Skip_Mean = pcreff_Skip.copy()
pcreff_Skip_Mean[vecEffOutlier_Skip_Mean] = np.nan
pcreff_Skip_Plat_Mean = pcreff_Skip_Plat.copy()
pcreff_Skip_Plat_Mean[vecEffOutlier_Skip_Plat_Mean] = np.nan
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=RuntimeWarning)
pcreffMedian_Skip = np.nanmedian(pcreff_Skip_Mean)
pcreffMedian_Skip_Plat = np.nanmedian(pcreff_Skip_Plat_Mean)
for oRow in range(0, spFl[0]):
if tar is None or tar == vecTarget[oRow]:
if not np.isnan(pcrEff[oRow]):
if (np.isnan(pcreffMedian_Skip) or
not (pcreffMedian_Skip - pcrEfficiencyExl <= pcrEff[oRow] <= pcreffMedian_Skip + pcrEfficiencyExl)):
vecEffOutlier_Skip_Mean[oRow] = True
else:
vecEffOutlier_Skip_Mean[oRow] = False
if (np.isnan(pcreffMedian_Skip_Plat) or
not (pcreffMedian_Skip_Plat - pcrEfficiencyExl <= pcrEff[oRow] <= pcreffMedian_Skip_Plat + pcrEfficiencyExl)):
vecEffOutlier_Skip_Plat_Mean[oRow] = True
else:
vecEffOutlier_Skip_Plat_Mean[oRow] = False
else:
vecEffOutlier_Skip_Mean[oRow] = True
vecEffOutlier_Skip_Plat_Mean[oRow] = True
pcreff_Skip_Mean = pcreff_Skip.copy()
pcreff_Skip_Mean[vecEffOutlier_Skip_Mean] = np.nan
pcreff_Skip_Plat_Mean = pcreff_Skip_Plat.copy()
pcreff_Skip_Plat_Mean[vecEffOutlier_Skip_Plat_Mean] = np.nan
vecEffOutlier_Skip_Out[_lrp_removeOutlier(pcreff_Skip, vecNoPlateau)] = True
vecEffOutlier_Skip_Plat_Out[_lrp_removeOutlier(pcreff_Skip_Plat, vecNoPlateau)] = True
pcreff_Skip_Out = pcreff_Skip.copy()
pcreff_Skip_Out[vecEffOutlier_Skip_Out] = np.nan
pcreff_Skip_Plat_Out = pcreff_Skip_Plat.copy()
pcreff_Skip_Plat_Out[vecEffOutlier_Skip_Plat_Out] = np.nan
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=RuntimeWarning)
tempMeanEff_Skip = np.nanmean(pcreff_Skip)
tempMeanEff_Skip_Plat = np.nanmean(pcreff_Skip_Plat)
tempMeanEff_Skip_Mean = np.nanmean(pcreff_Skip_Mean)
tempMeanEff_Skip_Plat_Mean = | np.nanmean(pcreff_Skip_Plat_Mean) | numpy.nanmean |
import random
import numpy as np
from scipy.stats import multivariate_normal
from numpy.linalg import norm
from collision import *
from gym.envs.classic_control import rendering
class Vaga:
"""
Representa uma vaga entre dois carros de largura e comprimento definidos. Os carros apresentam uma forma de colisão.
"""
def __init__(self, target_inic, desvio_target, dist_target_tol, compr, larg, fator_larg, fator_compr):
"""
Inicialização da vaga. Vértices são nomeados a partir da parte mais baixa e à esquerda, seguindo o sentido AH.
Args:
target_inic (tuple): coordenadas e orientação do target inicial (vaga).
desvio_target (tuple): desvio de cada coordenada do target inicial.
compr (float): comprimento do carro.
larg (float): largura do carro.
fator_larg (float): a largura da vaga é dada por fator*larg.
fator_compr (float): distância do carro ao final é dado por
Returns:
Vaga: objeto composto de dois retângulos e um espaço entre eles.
"""
self.target_inic = target_inic
self.desvio_target = desvio_target
self.compr = compr
self.larg = larg
self.fator_larg = fator_larg
self.fato_compr = fator_compr
self.dist_target_tol = dist_target_tol
centro_x = target_inic[0] + random.uniform(- desvio_target[0], + desvio_target[0])
centro_y = target_inic[1] + random.uniform(- desvio_target[1], + desvio_target[1])
angle = target_inic[2] + random.uniform(- desvio_target[2], + desvio_target[2])
self.rect_esq = Poly(pos=Vector(centro_x - (larg / 2) * (1 + fator_larg), centro_y),
points=[
Vector(- larg/2,
- compr/2),
Vector(+ larg/2,
- compr/2),
Vector(+ larg/2,
+ compr/2),
Vector(- larg/2,
+ compr/2)])
self.rect_dir = Poly(pos=Vector(centro_x + (larg / 2) * (1 + fator_larg), centro_y),
points=[
Vector(- larg / 2,
- compr / 2),
Vector(+ larg / 2,
- compr / 2),
Vector(+ larg / 2,
+ compr / 2),
Vector(- larg / 2,
+ compr / 2)])
c1 = list(self.rect_esq.points[2])
a2 = list(self.rect_dir.points[0])
d2 = list(self.rect_dir.points[3])
x = [a2[0], d2[0]]
y = [a2[1], d2[1]]
coef = np.polyfit(x, y, 1)
m = coef[0]
k = coef[1]
dist_desej = np.abs(c1[0] - d2[0])
self.rect_esq.angle = angle
self.rect_dir.angle = angle
c1 = np.asarray(list(self.rect_esq.points[2]))
a2 = np.asarray(list(self.rect_dir.points[0]))
d2 = np.asarray(list(self.rect_dir.points[3]))
dist_apos_rot = np.abs(np.cross(d2 - a2, c1 - a2)) / | norm(d2 - a2) | numpy.linalg.norm |
from sklearn.metrics.pairwise import cosine_similarity
from .single_template import Single_Template_AverageAll
import numpy as np
def FirstFrame_CosineSimilarity(features, num_of_template):
templateList = np.empty((num_of_template, 512))
templateList[0] = features[0]
features = features[1:]
index = 1
count = 0
fingerList = []
fingerList.append(0)
while index < num_of_template:
finalCount = 0
count = 0
diff_score = 1
for feature in features:
count = count + 1
score = 0
for i in range(index):
x = np.expand_dims(templateList[i], axis=0)
y = np.expand_dims(feature, axis=0)
score = score + cosine_similarity(x, y)
score /= len(templateList)
if score < diff_score and not count in fingerList:
diff_score = score
templateList[index] = feature
finalCount = count
index = index + 1
fingerList.append(finalCount)
return templateList
def FirstFrame_CosineSimilarity_Average(features, num_of_template):
templateList = FirstFrame_CosineSimilarity(features, num_of_template)
return Single_Template_AverageAll(templateList)
# All sklearn.cluster methods
# Number of templates methods
def kMeansPreprocessed(features, num_of_template):
from sklearn.cluster import KMeans
kmeans = KMeans(n_clusters=num_of_template)
kmeans.fit(features)
centers = kmeans.cluster_centers_
return centers
def kMeans(features, num_of_template):
from sklearn.cluster import KMeans
kmeans = KMeans(n_clusters=num_of_template)
kmeans.fit(features)
centers = kmeans.cluster_centers_
return centers
def kMedoids(features, num_of_template):
from sklearn_extra.cluster import KMedoids
kmedoids = KMedoids(n_clusters=num_of_template, metric="euclidean").fit(features)
centers = kmedoids.cluster_centers_
return centers
def featureAgglomeration(features, num_of_template):
from sklearn.cluster import FeatureAgglomeration
agglo = FeatureAgglomeration(n_clusters=num_of_template).fit(features.T)
features_reduced = agglo.transform(features.T)
return features_reduced.T
def miniBatchKMeans(features, num_of_template):
from sklearn.cluster import MiniBatchKMeans
kmeans = MiniBatchKMeans(n_clusters=num_of_template, batch_size=features.shape[0] // num_of_template).fit(features)
centers = kmeans.cluster_centers_
return centers
def spectralClustering(features, num_of_template):
from sklearn.cluster import SpectralClustering
clustering = SpectralClustering(n_clusters=num_of_template).fit(features)
labels = clustering.labels_
cluster_sum = []
for label in np.unique(labels):
cluster_sum.append(np.sum(features[np.where(labels == label)], axis=0))
cluster_sum = np.array(cluster_sum)
unique, counts = np.unique(labels, return_counts=True)
center = []
for i in range(len(unique)):
center.append(np.divide(cluster_sum[i], counts[i]))
return np.array(center)
# Other parameters methods
def dbscan(features):
from sklearn.cluster import DBSCAN
clustering = DBSCAN().fit(features)
labels = clustering.labels_
cluster_sum = []
for label in np.unique(labels):
cluster_sum.append(np.sum(features[np.where(labels == label)], axis=0))
cluster_sum = np.array(cluster_sum)
unique, counts = np.unique(labels, return_counts=True)
center = []
for i in range(len(unique)):
center.append(np.divide(cluster_sum[i], counts[i]))
return np.array(center)
def optics(features):
from sklearn.cluster import OPTICS
clustering = OPTICS(min_samples=10).fit(features)
labels = clustering.labels_
cluster_sum = []
for label in np.unique(labels):
cluster_sum.append(np.sum(features[np.where(labels == label)], axis=0))
cluster_sum = np.array(cluster_sum)
unique, counts = np.unique(labels, return_counts=True)
center = []
for i in range(len(unique)):
center.append(np.divide(cluster_sum[i], counts[i]))
return np.array(center)
def meanShift(features):
from sklearn.cluster import MeanShift
clustering = MeanShift().fit(features)
labels = clustering.labels_
cluster_sum = []
for label in np.unique(labels):
cluster_sum.append(np.sum(features[np.where(labels == label)], axis=0))
cluster_sum = np.array(cluster_sum)
unique, counts = np.unique(labels, return_counts=True)
center = []
for i in range(len(unique)):
center.append(np.divide(cluster_sum[i], counts[i]))
return np.array(center)
if __name__ == "__main__":
features = np.random.rand(150, 512)
import time
for i in range(1, 6):
start = time.time()
while np.any( | np.isnan(features) | numpy.isnan |
import numpy as np
import random
import numexpr as ne
def gen_layer(rin, rout, nsize):
R = 1.0
phi = np.random.uniform(0, 2*np.pi, size=(nsize))
costheta = np.random.uniform(-1, 1, size=(nsize))
u = np.random.uniform(rin**3, rout**3, size=(nsize))
theta = np.arccos( costheta )
r = R * np.cbrt( u )
x = r * np.sin( theta ) * np.cos( phi )
y = r * np.sin( theta ) * np.sin( phi )
z = r * np.cos( theta )
return( x, y, z )
def LPFbead(qrange, sigmabead):
'''
Compute the spherical form factor given a range of q values.
Parameters
----------
qrange: numpy.array
array of values in q-space to compute form factor for.
sigmabead: float
diameter of the sphere.
Return
-------
Fqb: numpy.array
array of values of the spherical form factors (F(q)) computed at q-points listed in qrange.
'''
R=np.true_divide(sigmabead,2)
QR=np.multiply(qrange,R)
Fqb=np.multiply(np.true_divide(np.sin(QR)-np.multiply(QR,np.cos(QR)),np.power(QR,3)),3)
return Fqb
def LPOmega(qrange, nAin, nAout, nB, r): # qvalues number_of_B number_of_A scatterer_coordinates
Ntot=nAin+nB+nAout # Total number of scatterers to loop through
omegaarrt=np.zeros((1,len(qrange))) # initiating array
omegaarr=np.zeros((1,len(qrange))) # initiating array
rur=r[0,:,:]# selects
rur=rur.transpose()
for i in range(Ntot-1): # loops through index and all further indexes to prevent double counting
all_disp = rur[i,:]-rur[(i+1):,:]
rij = np.sqrt(np.sum(np.square(all_disp),axis=1))
rij = rij.transpose()
rs = rij[:,np.newaxis] # reshapes array for consistency
Q = qrange[np.newaxis,:] # reshapes array for consistency
vals = ne.evaluate("sin(Q*rs)/(Q*rs)") # ne is efficient at calculations
inds=np.argwhere(np.isnan(vals)) # error catching in case there are NaN values
if len(inds)>0:
for val in inds:
vals[val[0],val[1]]=1
inds_double_check=np.argwhere(np.isnan(vals))
if len(inds_double_check)>0:
print('nan error!')
vals = ne.evaluate("sum((vals), axis=0)") # adds together scatterer contributions for each q value
omegaarr+=vals
omegaarr=np.true_divide(2*omegaarr,Ntot)+1 # 1 accounts for the guarenteed overlap of same bead # 2* accounts for double counting avoided to reduce computational expense by looping for all other pairs
omegaarrt+=omegaarr # stores values between loops
return omegaarrt
def visualize(r, Rcore, dR_Ain, dR_B, dR_Aout, sigmabead):
import py3Dmol
view = py3Dmol.view()
for ri in r[0,:,:].transpose():
if np.linalg.norm(ri) < Rcore+dR_Ain or | np.linalg.norm(ri) | numpy.linalg.norm |
# -*- coding: utf-8 -*-
"""
Unit tests for the spike_train_correlation module.
:copyright: Copyright 2015-2016 by the Elephant team, see `doc/authors.rst`.
:license: Modified BSD, see LICENSE.txt for details.
"""
import sys
import unittest
import neo
import numpy as np
import quantities as pq
from numpy.testing.utils import assert_array_equal, assert_array_almost_equal
import elephant.conversion as conv
import elephant.spike_train_correlation as sc
from elephant.spike_train_generation import homogeneous_poisson_process,\
homogeneous_gamma_process
import math
python_version_major = sys.version_info.major
class CovarianceTestCase(unittest.TestCase):
def setUp(self):
# These two arrays must be such that they do not have coincidences
# spanning across two neighbor bins assuming ms bins [0,1),[1,2),...
self.test_array_1d_0 = [
1.3, 7.56, 15.87, 28.23, 30.9, 34.2, 38.2, 43.2]
self.test_array_1d_1 = [
1.02, 2.71, 18.82, 28.46, 28.79, 43.6]
# Build spike trains
self.st_0 = neo.SpikeTrain(
self.test_array_1d_0, units='ms', t_stop=50.)
self.st_1 = neo.SpikeTrain(
self.test_array_1d_1, units='ms', t_stop=50.)
# And binned counterparts
self.binned_st = conv.BinnedSpikeTrain(
[self.st_0, self.st_1], t_start=0 * pq.ms, t_stop=50. * pq.ms,
bin_size=1 * pq.ms)
def test_covariance_binned(self):
'''
Test covariance between two binned spike trains.
'''
# Calculate clipped and unclipped
res_clipped = sc.covariance(
self.binned_st, binary=True, fast=False)
res_unclipped = sc.covariance(
self.binned_st, binary=False, fast=False)
# Check dimensions
self.assertEqual(len(res_clipped), 2)
self.assertEqual(len(res_unclipped), 2)
# Check result unclipped against result calculated from scratch for
# the off-diagonal element
mat = self.binned_st.to_array()
mean_0 = np.mean(mat[0])
mean_1 = np.mean(mat[1])
target_from_scratch = \
np.dot(mat[0] - mean_0, mat[1] - mean_1) / (len(mat[0]) - 1)
# Check result unclipped against result calculated by numpy.corrcoef
target_numpy = np.cov(mat)
self.assertAlmostEqual(target_from_scratch, target_numpy[0][1])
self.assertAlmostEqual(res_unclipped[0][1], target_from_scratch)
self.assertAlmostEqual(res_unclipped[1][0], target_from_scratch)
# Check result clipped against result calculated from scratch for
# the off-diagonal elemant
mat = self.binned_st.to_bool_array()
mean_0 = np.mean(mat[0])
mean_1 = np.mean(mat[1])
target_from_scratch = \
np.dot(mat[0] - mean_0, mat[1] - mean_1) / (len(mat[0]) - 1)
# Check result unclipped against result calculated by numpy.corrcoef
target_numpy = np.cov(mat)
self.assertAlmostEqual(target_from_scratch, target_numpy[0][1])
self.assertAlmostEqual(res_clipped[0][1], target_from_scratch)
self.assertAlmostEqual(res_clipped[1][0], target_from_scratch)
def test_covariance_binned_same_spiketrains(self):
'''
Test if the covariation between two identical binned spike
trains evaluates to the expected 2x2 matrix.
'''
# Calculate correlation
binned_st = conv.BinnedSpikeTrain(
[self.st_0, self.st_0], t_start=0 * pq.ms, t_stop=50. * pq.ms,
bin_size=1 * pq.ms)
result = sc.covariance(binned_st, fast=False)
# Check dimensions
self.assertEqual(len(result), 2)
# Check result
assert_array_equal(result[0][0], result[1][1])
def test_covariance_binned_short_input(self):
'''
Test if input list of only one binned spike train yields correct result
that matches numpy.cov (covariance with itself)
'''
# Calculate correlation
binned_st = conv.BinnedSpikeTrain(
self.st_0, t_start=0 * pq.ms, t_stop=50. * pq.ms,
bin_size=1 * pq.ms)
result = sc.covariance(binned_st, binary=True, fast=False)
# Check result unclipped against result calculated by numpy.corrcoef
mat = binned_st.to_bool_array()
target = np.cov(mat)
# Check result and dimensionality of result
self.assertEqual(result.ndim, target.ndim)
assert_array_almost_equal(result, target)
assert_array_almost_equal(target,
sc.covariance(binned_st, binary=True,
fast=True))
def test_covariance_fast_mode(self):
np.random.seed(27)
st = homogeneous_poisson_process(rate=10 * pq.Hz, t_stop=10 * pq.s)
binned_st = conv.BinnedSpikeTrain(st, n_bins=10)
assert_array_almost_equal(sc.covariance(binned_st, fast=False),
sc.covariance(binned_st, fast=True))
class CorrCoefTestCase(unittest.TestCase):
def setUp(self):
# These two arrays must be such that they do not have coincidences
# spanning across two neighbor bins assuming ms bins [0,1),[1,2),...
self.test_array_1d_0 = [
1.3, 7.56, 15.87, 28.23, 30.9, 34.2, 38.2, 43.2]
self.test_array_1d_1 = [
1.02, 2.71, 18.82, 28.46, 28.79, 43.6]
self.test_array_1d_2 = []
# Build spike trains
self.st_0 = neo.SpikeTrain(
self.test_array_1d_0, units='ms', t_stop=50.)
self.st_1 = neo.SpikeTrain(
self.test_array_1d_1, units='ms', t_stop=50.)
self.st_2 = neo.SpikeTrain(
self.test_array_1d_2, units='ms', t_stop=50.)
# And binned counterparts
self.binned_st = conv.BinnedSpikeTrain(
[self.st_0, self.st_1], t_start=0 * pq.ms, t_stop=50. * pq.ms,
bin_size=1 * pq.ms)
def test_corrcoef_binned(self):
'''
Test the correlation coefficient between two binned spike trains.
'''
# Calculate clipped and unclipped
res_clipped = sc.correlation_coefficient(
self.binned_st, binary=True)
res_unclipped = sc.correlation_coefficient(
self.binned_st, binary=False)
# Check dimensions
self.assertEqual(len(res_clipped), 2)
self.assertEqual(len(res_unclipped), 2)
# Check result unclipped against result calculated from scratch for
# the off-diagonal element
mat = self.binned_st.to_array()
mean_0 = np.mean(mat[0])
mean_1 = np.mean(mat[1])
target_from_scratch = \
np.dot(mat[0] - mean_0, mat[1] - mean_1) / \
np.sqrt(
np.dot(mat[0] - mean_0, mat[0] - mean_0) *
np.dot(mat[1] - mean_1, mat[1] - mean_1))
# Check result unclipped against result calculated by numpy.corrcoef
target_numpy = np.corrcoef(mat)
self.assertAlmostEqual(target_from_scratch, target_numpy[0][1])
self.assertAlmostEqual(res_unclipped[0][1], target_from_scratch)
self.assertAlmostEqual(res_unclipped[1][0], target_from_scratch)
# Check result clipped against result calculated from scratch for
# the off-diagonal elemant
mat = self.binned_st.to_bool_array()
mean_0 = np.mean(mat[0])
mean_1 = np.mean(mat[1])
target_from_scratch = \
np.dot(mat[0] - mean_0, mat[1] - mean_1) / \
np.sqrt(
np.dot(mat[0] - mean_0, mat[0] - mean_0) *
np.dot(mat[1] - mean_1, mat[1] - mean_1))
# Check result unclipped against result calculated by numpy.corrcoef
target_numpy = np.corrcoef(mat)
self.assertAlmostEqual(target_from_scratch, target_numpy[0][1])
self.assertAlmostEqual(res_clipped[0][1], target_from_scratch)
self.assertAlmostEqual(res_clipped[1][0], target_from_scratch)
def test_corrcoef_binned_same_spiketrains(self):
'''
Test if the correlation coefficient between two identical binned spike
trains evaluates to a 2x2 matrix of ones.
'''
# Calculate correlation
binned_st = conv.BinnedSpikeTrain(
[self.st_0, self.st_0], t_start=0 * pq.ms, t_stop=50. * pq.ms,
bin_size=1 * pq.ms)
result = sc.correlation_coefficient(binned_st, fast=False)
target = np.ones((2, 2))
# Check dimensions
self.assertEqual(len(result), 2)
# Check result
assert_array_almost_equal(result, target)
assert_array_almost_equal(
result, sc.correlation_coefficient(
binned_st, fast=True))
def test_corrcoef_binned_short_input(self):
'''
Test if input list of one binned spike train yields 1.0.
'''
# Calculate correlation
binned_st = conv.BinnedSpikeTrain(
self.st_0, t_start=0 * pq.ms, t_stop=50. * pq.ms,
bin_size=1 * pq.ms)
result = sc.correlation_coefficient(binned_st, fast=False)
target = np.array(1.)
# Check result and dimensionality of result
self.assertEqual(result.ndim, 0)
assert_array_almost_equal(result, target)
assert_array_almost_equal(
result, sc.correlation_coefficient(
binned_st, fast=True))
@unittest.skipUnless(python_version_major == 3, "assertWarns requires 3.2")
def test_empty_spike_train(self):
'''
Test whether a warning is yielded in case of empty spike train.
Also check correctness of the output array.
'''
# st_2 is empty
binned_12 = conv.BinnedSpikeTrain([self.st_1, self.st_2],
bin_size=1 * pq.ms)
with self.assertWarns(UserWarning):
result = sc.correlation_coefficient(binned_12, fast=False)
# test for NaNs in the output array
target = np.zeros((2, 2)) * np.NaN
target[0, 0] = 1.0
assert_array_almost_equal(result, target)
def test_corrcoef_fast_mode(self):
np.random.seed(27)
st = homogeneous_poisson_process(rate=10 * pq.Hz, t_stop=10 * pq.s)
binned_st = conv.BinnedSpikeTrain(st, n_bins=10)
assert_array_almost_equal(
sc.correlation_coefficient(
binned_st, fast=False), sc.correlation_coefficient(
binned_st, fast=True))
class CrossCorrelationHistogramTest(unittest.TestCase):
def setUp(self):
# These two arrays must be such that they do not have coincidences
# spanning across two neighbor bins assuming ms bins [0,1),[1,2),...
self.test_array_1d_1 = [
1.3, 7.56, 15.87, 28.23, 30.9, 34.2, 38.2, 43.2]
self.test_array_1d_2 = [
1.02, 2.71, 18.82, 28.46, 28.79, 43.6]
# Build spike trains
self.st_1 = neo.SpikeTrain(
self.test_array_1d_1, units='ms', t_stop=50.)
self.st_2 = neo.SpikeTrain(
self.test_array_1d_2, units='ms', t_stop=50.)
# And binned counterparts
self.binned_st1 = conv.BinnedSpikeTrain(
[self.st_1], t_start=0 * pq.ms, t_stop=50. * pq.ms,
bin_size=1 * pq.ms)
self.binned_st2 = conv.BinnedSpikeTrain(
[self.st_2], t_start=0 * pq.ms, t_stop=50. * pq.ms,
bin_size=1 * pq.ms)
self.binned_sts = conv.BinnedSpikeTrain(
[self.st_1, self.st_2], t_start=0 * pq.ms, t_stop=50. * pq.ms,
bin_size=1 * pq.ms)
# Binned sts to check errors raising
self.st_check_bin_size = conv.BinnedSpikeTrain(
[self.st_1], t_start=0 * pq.ms, t_stop=50. * pq.ms,
bin_size=5 * pq.ms)
self.st_check_t_start = conv.BinnedSpikeTrain(
[self.st_1], t_start=1 * pq.ms, t_stop=50. * pq.ms,
bin_size=1 * pq.ms)
self.st_check_t_stop = conv.BinnedSpikeTrain(
[self.st_1], t_start=0 * pq.ms, t_stop=40. * pq.ms,
bin_size=1 * pq.ms)
self.st_check_dimension = conv.BinnedSpikeTrain(
[self.st_1, self.st_2], t_start=0 * pq.ms, t_stop=50. * pq.ms,
bin_size=1 * pq.ms)
def test_cross_correlation_histogram(self):
'''
Test generic result of a cross-correlation histogram between two binned
spike trains.
'''
# Calculate CCH using Elephant (normal and binary version) with
# mode equal to 'full' (whole spike trains are correlated)
cch_clipped, bin_ids_clipped = sc.cross_correlation_histogram(
self.binned_st1, self.binned_st2, window='full',
binary=True)
cch_unclipped, bin_ids_unclipped = sc.cross_correlation_histogram(
self.binned_st1, self.binned_st2, window='full', binary=False)
cch_clipped_mem, bin_ids_clipped_mem = sc.cross_correlation_histogram(
self.binned_st1, self.binned_st2, window='full',
binary=True, method='memory')
cch_unclipped_mem, bin_ids_unclipped_mem = \
sc.cross_correlation_histogram(
self.binned_st1, self.binned_st2, window='full',
binary=False, method='memory')
# Check consistency two methods
assert_array_equal(
np.squeeze(cch_clipped.magnitude), np.squeeze(
cch_clipped_mem.magnitude))
assert_array_equal(
np.squeeze(cch_clipped.times), np.squeeze(
cch_clipped_mem.times))
assert_array_equal(
np.squeeze(cch_unclipped.magnitude), np.squeeze(
cch_unclipped_mem.magnitude))
assert_array_equal(
np.squeeze(cch_unclipped.times), np.squeeze(
cch_unclipped_mem.times))
assert_array_almost_equal(bin_ids_clipped, bin_ids_clipped_mem)
assert_array_almost_equal(bin_ids_unclipped, bin_ids_unclipped_mem)
# Check normal correlation Note: Use numpy correlate to verify result.
# Note: numpy conventions for input array 1 and input array 2 are
# swapped compared to Elephant!
mat1 = self.binned_st1.to_array()[0]
mat2 = self.binned_st2.to_array()[0]
target_numpy = np.correlate(mat2, mat1, mode='full')
assert_array_equal(
target_numpy, np.squeeze(cch_unclipped.magnitude))
# Check cross correlation function for several displacements tau
# Note: Use Elephant corrcoeff to verify result
tau = [-25.0, 0.0, 13.0] # in ms
for t in tau:
# adjust t_start, t_stop to shift by tau
t0 = np.min([self.st_1.t_start + t * pq.ms, self.st_2.t_start])
t1 = np.max([self.st_1.t_stop + t * pq.ms, self.st_2.t_stop])
st1 = neo.SpikeTrain(self.st_1.magnitude + t, units='ms',
t_start=t0 * pq.ms, t_stop=t1 * pq.ms)
st2 = neo.SpikeTrain(self.st_2.magnitude, units='ms',
t_start=t0 * pq.ms, t_stop=t1 * pq.ms)
binned_sts = conv.BinnedSpikeTrain([st1, st2],
bin_size=1 * pq.ms,
t_start=t0 * pq.ms,
t_stop=t1 * pq.ms)
# caluclate corrcoef
corrcoef = sc.correlation_coefficient(binned_sts)[1, 0]
# expand t_stop to have two spike trains with same length as st1,
# st2
st1 = neo.SpikeTrain(self.st_1.magnitude, units='ms',
t_start=self.st_1.t_start,
t_stop=self.st_1.t_stop + np.abs(t) * pq.ms)
st2 = neo.SpikeTrain(self.st_2.magnitude, units='ms',
t_start=self.st_2.t_start,
t_stop=self.st_2.t_stop + np.abs(t) * pq.ms)
binned_st1 = conv.BinnedSpikeTrain(
st1, t_start=0 * pq.ms, t_stop=(50 + np.abs(t)) * pq.ms,
bin_size=1 * pq.ms)
binned_st2 = conv.BinnedSpikeTrain(
st2, t_start=0 * pq.ms, t_stop=(50 + np.abs(t)) * pq.ms,
bin_size=1 * pq.ms)
# calculate CCHcoef and take value at t=tau
CCHcoef, _ = sc.cch(binned_st1, binned_st2,
cross_correlation_coefficient=True)
left_edge = - binned_st1.n_bins + 1
tau_bin = int(t / float(binned_st1.bin_size.magnitude))
assert_array_almost_equal(
corrcoef, CCHcoef[tau_bin - left_edge].magnitude)
# Check correlation using binary spike trains
mat1 = np.array(self.binned_st1.to_bool_array()[0], dtype=int)
mat2 = np.array(self.binned_st2.to_bool_array()[0], dtype=int)
target_numpy = np.correlate(mat2, mat1, mode='full')
assert_array_equal(
target_numpy, np.squeeze(cch_clipped.magnitude))
# Check the time axis and bin IDs of the resulting AnalogSignal
assert_array_almost_equal(
(bin_ids_clipped - 0.5) * self.binned_st1.bin_size,
cch_unclipped.times)
assert_array_almost_equal(
(bin_ids_clipped - 0.5) * self.binned_st1.bin_size,
cch_clipped.times)
# Calculate CCH using Elephant (normal and binary version) with
# mode equal to 'valid' (only completely overlapping intervals of the
# spike trains are correlated)
cch_clipped, bin_ids_clipped = sc.cross_correlation_histogram(
self.binned_st1, self.binned_st2, window='valid',
binary=True)
cch_unclipped, bin_ids_unclipped = sc.cross_correlation_histogram(
self.binned_st1, self.binned_st2, window='valid',
binary=False)
cch_clipped_mem, bin_ids_clipped_mem = sc.cross_correlation_histogram(
self.binned_st1, self.binned_st2, window='valid',
binary=True, method='memory')
cch_unclipped_mem, bin_ids_unclipped_mem = \
sc.cross_correlation_histogram(
self.binned_st1, self.binned_st2, window='valid',
binary=False, method='memory')
# Check consistency two methods
assert_array_equal(
np.squeeze(cch_clipped.magnitude), np.squeeze(
cch_clipped_mem.magnitude))
assert_array_equal(
np.squeeze(cch_clipped.times), np.squeeze(
cch_clipped_mem.times))
assert_array_equal(
np.squeeze(cch_unclipped.magnitude), np.squeeze(
cch_unclipped_mem.magnitude))
assert_array_equal(
np.squeeze(cch_unclipped.times), np.squeeze(
cch_unclipped_mem.times))
assert_array_equal(bin_ids_clipped, bin_ids_clipped_mem)
assert_array_equal(bin_ids_unclipped, bin_ids_unclipped_mem)
# Check normal correlation Note: Use numpy correlate to verify result.
# Note: numpy conventions for input array 1 and input array 2 are
# swapped compared to Elephant!
mat1 = self.binned_st1.to_array()[0]
mat2 = self.binned_st2.to_array()[0]
target_numpy = np.correlate(mat2, mat1, mode='valid')
assert_array_equal(
target_numpy, np.squeeze(cch_unclipped.magnitude))
# Check correlation using binary spike trains
mat1 = np.array(self.binned_st1.to_bool_array()[0], dtype=int)
mat2 = np.array(self.binned_st2.to_bool_array()[0], dtype=int)
target_numpy = np.correlate(mat2, mat1, mode='valid')
assert_array_equal(
target_numpy, np.squeeze(cch_clipped.magnitude))
# Check the time axis and bin IDs of the resulting AnalogSignal
assert_array_equal(
(bin_ids_clipped - 0.5) * self.binned_st1.bin_size,
cch_unclipped.times)
assert_array_equal(
(bin_ids_clipped - 0.5) * self.binned_st1.bin_size,
cch_clipped.times)
# Check for wrong window parameter setting
self.assertRaises(
ValueError, sc.cross_correlation_histogram, self.binned_st1,
self.binned_st2, window='dsaij')
self.assertRaises(
ValueError, sc.cross_correlation_histogram, self.binned_st1,
self.binned_st2, window='dsaij', method='memory')
def test_raising_error_wrong_inputs(self):
'''Check that an exception is thrown if the two spike trains are not
fullfilling the requirement of the function'''
# Check the bin_sizes are the same
self.assertRaises(
ValueError,
sc.cross_correlation_histogram, self.binned_st1,
self.st_check_bin_size)
# Check input are one dimensional
self.assertRaises(
ValueError, sc.cross_correlation_histogram,
self.st_check_dimension, self.binned_st2)
self.assertRaises(
ValueError, sc.cross_correlation_histogram,
self.binned_st2, self.st_check_dimension)
def test_window(self):
'''Test if the window parameter is correctly interpreted.'''
cch_win, bin_ids = sc.cch(
self.binned_st1, self.binned_st2, window=[-30, 30])
cch_win_mem, bin_ids_mem = sc.cch(
self.binned_st1, self.binned_st2, window=[-30, 30],
method='memory')
self.assertEqual(len(bin_ids), cch_win.shape[0])
assert_array_equal(bin_ids, np.arange(-30, 31, 1))
assert_array_equal(
(bin_ids - 0.5) * self.binned_st1.bin_size, cch_win.times)
assert_array_equal(bin_ids_mem, np.arange(-30, 31, 1))
assert_array_equal(
(bin_ids_mem - 0.5) * self.binned_st1.bin_size, cch_win.times)
assert_array_equal(cch_win, cch_win_mem)
cch_unclipped, _ = sc.cross_correlation_histogram(
self.binned_st1, self.binned_st2, window='full', binary=False)
assert_array_equal(cch_win, cch_unclipped[19:80])
_, bin_ids = sc.cch(
self.binned_st1, self.binned_st2, window=[20, 30])
_, bin_ids_mem = sc.cch(
self.binned_st1, self.binned_st2, window=[20, 30], method='memory')
assert_array_equal(bin_ids, | np.arange(20, 31, 1) | numpy.arange |
"""
"""
from SALib.sample.morris import _sample_groups, SampleMorris
from SALib.sample.morris.local import LocalOptimisation
from SALib.sample.morris.brute import BruteForce
from SALib.util import read_param_file
import numpy as np
import pytest
from numpy.testing import assert_equal, assert_allclose
from pytest import fixture, raises
import numpy.random as rd
@fixture(scope='function')
def setup_input():
input_1 = [[0, 1 / 3.], [0, 1.], [2 / 3., 1.]]
input_2 = [[0, 1 / 3.], [2 / 3., 1 / 3.], [2 / 3., 1.]]
input_3 = [[2 / 3., 0], [2 / 3., 2 / 3.], [0, 2 / 3.]]
input_4 = [[1 / 3., 1.], [1., 1.], [1, 1 / 3.]]
input_5 = [[1 / 3., 1.], [1 / 3., 1 / 3.], [1, 1 / 3.]]
input_6 = [[1 / 3., 2 / 3.], [1 / 3., 0], [1., 0]]
return np.concatenate([input_1, input_2, input_3, input_4, input_5,
input_6])
@fixture(scope='function')
def setup_problem(setup_input):
input_sample = setup_input
num_samples = 6
problem = {'num_vars': 2, 'groups': None}
k_choices = 4
groups = None
num_params = problem.get('num_vars')
input_1 = [[0, 1 / 3.], [0, 1.], [2 / 3., 1.]]
input_3 = [[2 / 3., 0], [2 / 3., 2 / 3.], [0, 2 / 3.]]
input_4 = [[1 / 3., 1.], [1., 1.], [1, 1 / 3.]]
input_6 = [[1 / 3., 2 / 3.], [1 / 3., 0], [1., 0]]
expected = np.concatenate([input_1, input_3,
input_4, input_6])
return (input_sample, num_samples, problem,
k_choices, groups, num_params, expected)
@fixture(scope='function')
def strategy():
return BruteForce()
class TestSharedMethods:
def test_check_input_sample_N(self, strategy, setup_input):
input_sample = setup_input
num_params = 4
N = 5
with raises(AssertionError):
strategy.check_input_sample(input_sample, num_params, N)
def test_check_input_sample_num_vars(self, strategy, setup_input):
input_sample = setup_input
num_params = 3
N = 6
with raises(AssertionError):
strategy.check_input_sample(input_sample, num_params, N)
def test_check_input_sample_range(self, strategy, setup_input):
input_sample = setup_input
input_sample *= 100
num_params = 4
N = 6
with raises(AssertionError):
strategy.check_input_sample(input_sample, num_params, N)
def test_find_maximum(self, strategy):
scores = np.array(range(15))
k_choices = 4
N = 6
output = strategy.find_maximum(scores, N, k_choices)
expected = [2, 3, 4, 5]
assert_equal(output, expected)
def test_distance(self, strategy):
'''
Tests the computation of the distance of two trajectories
'''
input_1 = np.matrix(
[[0, 1 / 3.], [0, 1.], [2 / 3., 1.]], dtype=np.float32)
input_3 = np.matrix([[2 / 3., 0], [2 / 3., 2 / 3.],
[0, 2 / 3.]], dtype=np.float32)
output = strategy.compute_distance(input_1, input_3)
assert_allclose(output, 6.18, atol=1e-2)
def test_distance_of_identical_matrices_is_min(self, strategy):
input_1 = np.matrix([[1., 1.],
[1., 0.33333333],
[0.33333333, 0.33333333]])
input_2 = input_1.copy()
actual = strategy.compute_distance(input_1, input_2)
desired = 0
assert_allclose(actual, desired, atol=1e-2)
def test_distance_fail_with_difference_size_ip(self, strategy):
input_1 = np.matrix([[0, 1 / 3.], [0, 1.]], dtype=np.float32)
input_3 = np.matrix([[2 / 3., 0], [2 / 3., 2 / 3.],
[0, 2 / 3.]], dtype=np.float32)
try:
strategy.compute_distance(input_1, input_3, 2)
except:
pass
else:
raise AssertionError(
"Different size matrices did not trigger error")
def test_compute_distance_matrix(self, strategy, setup_input):
'''
Tests that a distance matrix is computed correctly
for an input of six trajectories and two parameters
'''
sample_inputs = setup_input
output = strategy.compute_distance_matrix(sample_inputs, 6, 2)
expected = np.zeros((6, 6), dtype=np.float32)
expected[1, :] = [5.50, 0, 0, 0, 0, 0]
expected[2, :] = [6.18, 5.31, 0, 0, 0, 0]
expected[3, :] = [6.89, 6.18, 6.57, 0, 0, 0]
expected[4, :] = [6.18, 5.31, 5.41, 5.5, 0, 0]
expected[5, :] = [7.52, 5.99, 5.52, 7.31, 5.77, 0]
assert_allclose(output, expected, rtol=1e-2)
def test_compute_distance_matrix_local(self, strategy, setup_input):
'''
Tests that a distance matrix is computed correctly for the
local distance optimization.
The only change is that the local method needs the upper triangle of
the distance matrix instead of the lower one.
This is for an input of six trajectories and two parameters
'''
sample_inputs = setup_input
output = strategy.compute_distance_matrix(
sample_inputs, 6, 2, local_optimization=True)
expected = np.zeros((6, 6), dtype=np.float32)
expected[0, :] = [0, 5.50, 6.18, 6.89, 6.18, 7.52]
expected[1, :] = [5.50, 0, 5.31, 6.18, 5.31, 5.99]
expected[2, :] = [6.18, 5.31, 0, 6.57, 5.41, 5.52]
expected[3, :] = [6.89, 6.18, 6.57, 0, 5.50, 7.31]
expected[4, :] = [6.18, 5.31, 5.41, 5.5, 0, 5.77]
expected[5, :] = [7.52, 5.99, 5.52, 7.31, 5.77, 0]
assert_allclose(output, expected, rtol=1e-2)
class TestLocallyOptimalStrategy:
def test_local(self, setup_problem):
rd.seed(12345)
(input_sample, num_samples, _,
k_choices, groups, num_params, expected) = setup_problem
local_strategy = LocalOptimisation()
context = SampleMorris(local_strategy)
actual = context.sample(input_sample, num_samples, num_params,
k_choices, groups)
np.testing.assert_equal(actual, expected)
def test_find_local_maximum_distance(self, setup_input):
'''
Test whether finding the local maximum distance equals the global
maximum distance in a simple case for a defined random seed.
From Saltelli et al. 2008, in the solution to exercise 3a,
Chapter 3, page 134.
Note that local and brute force methods are not guaranteed to produce
the same results, even for simple problems,
hence forcing the seed here.
'''
rd.seed(12345)
local_strategy = LocalOptimisation()
brute_strategy = BruteForce()
sample_inputs = setup_input
N = 6
num_params = 2
k_choices = 4
output_global = brute_strategy.brute_force_most_distant(sample_inputs,
N, num_params,
k_choices)
output_local = local_strategy.find_local_maximum(sample_inputs, N,
num_params, k_choices)
assert_equal(output_global, output_local)
def test_random_seed(self, setup_param_groups_prime):
"""Setting the seed before generating a sample results in two
identical samples
"""
N = 8
param_file = setup_param_groups_prime
problem = read_param_file(param_file)
num_levels = 4
grid_jump = num_levels / 2
np.random.seed(12345)
expected = _sample_groups(problem, N, num_levels, grid_jump)
np.random.seed(12345)
actual = _sample_groups(problem, N, num_levels, grid_jump)
assert_equal(actual, expected)
@pytest.mark.parametrize('execution_number', range(1))
def test_local_optimised_groups(self,
setup_param_groups_prime,
execution_number):
"""
Tests that the local optimisation problem gives
the same answer as the brute force problem
(for small values of `k_choices` and `N`)
with groups for a defined random seed.
Note that local and brute force methods are not guaranteed to produce
exact answers, even for small problems.
"""
rd.seed(12345)
N = 8
param_file = setup_param_groups_prime
problem = read_param_file(param_file)
num_levels = 4
grid_jump = num_levels / 2
k_choices = 4
num_params = problem['num_vars']
num_groups = len(set(problem['groups']))
input_sample = _sample_groups(problem, N, num_levels, grid_jump)
local = LocalOptimisation()
# From local optimal trajectories
actual = local.find_local_maximum(input_sample, N, num_params,
k_choices, num_groups)
brute = BruteForce()
desired = brute.brute_force_most_distant(input_sample,
N,
num_params,
k_choices,
num_groups)
print("Actual: {}\nDesired: {}\n".format(actual, desired))
print(input_sample)
assert_equal(actual, desired)
class TestLocalMethods:
def test_sum_distances(self, setup_input):
'''
Tests whether the combinations are summed correctly.
'''
strategy = LocalOptimisation()
dist_matr = strategy.compute_distance_matrix(setup_input, 6, 2,
num_groups=None,
local_optimization=True)
indices = (1, 3, 2)
distance = strategy.sum_distances(indices, dist_matr)
expected = 10.47
assert_allclose(distance, expected, rtol=1e-2)
def test_get_max_sum_ind(self):
'''
Tests whether the right maximum indices are returned.
'''
strategy = LocalOptimisation()
indices = np.array([(1, 2, 4), (3, 2, 1), (4, 2, 1)])
distances = np.array([20, 40, 50])
output = strategy.get_max_sum_ind(indices, distances, 0, 0)
expected = (4, 2, 1)
assert_equal(output, expected)
def test_add_indices(self):
'''
Tests whether the right indices are added.
'''
strategy = LocalOptimisation()
indices = (1, 3, 4)
matr = np.zeros((6, 6), dtype=np.int16)
ind_extra = strategy.add_indices(indices, matr)
expected = [(1, 3, 4, 0), (1, 3, 4, 2), (1, 3, 4, 5)]
assert_equal(ind_extra, expected)
def test_get_max_sum_index_raises_error(self):
strategy = LocalOptimisation()
indices = [(1, 2, 4), (3, 2, 1), (4, 2, 1)]
distances_wrong = [20, 40]
with raises(ValueError):
strategy.get_max_sum_ind(indices, distances_wrong, 0, 0)
def test_combo_from_locally_optimal_method(self, setup_input):
'''
Tests whether the correct combination is picked from the fixture drawn
from Saltelli et al. 2008, in the solution to exercise 3a,
Chapter 3, page 134.
'''
sample_inputs = setup_input
N = 6
num_params = 2
k_choices = 4
strategy = LocalOptimisation()
output = strategy.find_local_maximum(sample_inputs, N,
num_params, k_choices)
expected = [0, 2, 3, 5] # trajectories 1, 3, 4, 6
| assert_equal(output, expected) | numpy.testing.assert_equal |
import numpy as np
import os
import glob
import healpy as hp
from rubin_sim.photUtils import Sed, Bandpass
from .twilightFunc import twilightFunc
from scipy.interpolate import InterpolatedUnivariateSpline, interp1d
from rubin_sim.data import get_data_dir
# Make backwards compatible with healpy
if hasattr(hp, 'get_interp_weights'):
get_neighbours = hp.get_interp_weights
elif hasattr(hp, 'get_neighbours'):
get_neighbours = hp.get_neighbours
else:
print("Could not find appropriate healpy function for get_interp_weight or get_neighbours")
__all__ = ['id2intid', 'intid2id', 'loadSpecFiles', 'BaseSingleInterp', 'ScatteredStar', 'LowerAtm',
'UpperAtm', 'MergedSpec', 'Airglow', 'TwilightInterp', 'MoonInterp',
'ZodiacalInterp']
def id2intid(ids):
"""
take an array of ids, and convert them to an integer id.
Handy if you want to put things into a sparse array.
"""
uids = np.unique(ids)
order = np.argsort(ids)
oids = ids[order]
uintids = np.arange(np.size(uids), dtype=int)
left = np.searchsorted(oids, uids)
right = np.searchsorted(oids, uids, side='right')
intids = np.empty(ids.size, dtype=int)
for i in range(np.size(left)):
intids[left[i]:right[i]] = uintids[i]
result = intids*0
result[order] = intids
return result, uids, uintids
def intid2id(intids, uintids, uids, dtype=int):
"""
convert an int back to an id
"""
ids = np.zeros(np.size(intids))
order = np.argsort(intids)
ointids = intids[order]
left = np.searchsorted(ointids, uintids, side='left')
right = np.searchsorted(ointids, uintids, side='right')
for i, (le, ri) in enumerate(zip(left, right)):
ids[le:ri] = uids[i]
result = np.zeros(np.size(intids), dtype=dtype)
result[order] = ids
return result
def loadSpecFiles(filenames, mags=False):
"""
Load up the ESO spectra.
The ESO npz files contain the following arrays:
filterWave: The central wavelengths of the pre-computed magnitudes
wave: wavelengths for the spectra
spec: array of spectra and magnitudes along with the relevant variable inputs. For example,
airglow has dtype = [('airmass', '<f8'), ('solarFlux', '<f8'), ('spectra', '<f8', (17001,)),
('mags', '<f8', (6,)]
For each unique airmass and solarFlux value, there is a 17001 elements spectra and 6 magnitudes.
"""
if len(filenames) == 1:
temp = np.load(filenames[0])
wave = temp['wave'].copy()
filterWave = temp['filterWave'].copy()
if mags:
# don't copy the spectra to save memory space
dt = np.dtype([(key, temp['spec'].dtype[i]) for
i, key in enumerate(temp['spec'].dtype.names) if key != 'spectra'])
spec = np.zeros(temp['spec'].size, dtype=dt)
for key in temp['spec'].dtype.names:
if key != 'spectra':
spec[key] = temp['spec'][key].copy()
else:
spec = temp['spec'].copy()
else:
temp = np.load(filenames[0])
wave = temp['wave'].copy()
filterWave = temp['filterWave'].copy()
if mags:
# don't copy the spectra to save memory space
dt = np.dtype([(key, temp['spec'].dtype[i]) for
i, key in enumerate(temp['spec'].dtype.names) if key != 'spectra'])
spec = np.zeros(temp['spec'].size, dtype=dt)
for key in temp['spec'].dtype.names:
if key != 'spectra':
spec[key] = temp['spec'][key].copy()
else:
spec = temp['spec'].copy()
for filename in filenames[1:]:
temp = np.load(filename)
if mags:
# don't copy the spectra to save memory space
dt = np.dtype([(key, temp['spec'].dtype[i]) for
i, key in enumerate(temp['spec'].dtype.names) if key != 'spectra'])
tempspec = np.zeros(temp['spec'].size, dtype=dt)
for key in temp['spec'].dtype.names:
if key != 'spectra':
tempspec[key] = temp['spec'][key].copy()
else:
tempspec = temp['spec']
spec = np.append(spec, tempspec)
return spec, wave, filterWave
class BaseSingleInterp(object):
"""
Base class for sky components that only need to be interpolated on airmass
"""
def __init__(self, compName=None, sortedOrder=['airmass', 'nightTimes'], mags=False):
"""
mags: Rather than the full spectrum, return the LSST ugrizy magnitudes.
"""
self.mags = mags
dataDir = os.path.join(get_data_dir(), 'skybrightness', 'ESO_Spectra/'+compName)
filenames = sorted(glob.glob(dataDir+'/*.npz'))
self.spec, self.wave, self.filterWave = loadSpecFiles(filenames, mags=self.mags)
# Take the log of the spectra in case we want to interp in log space.
if not mags:
self.logSpec = np.zeros(self.spec['spectra'].shape, dtype=float)
good = np.where(self.spec['spectra'] != 0)
self.logSpec[good] = np.log10(self.spec['spectra'][good])
self.specSize = self.spec['spectra'][0].size
else:
self.specSize = 0
# What order are the dimesions sorted by (from how the .npz was packaged)
self.sortedOrder = sortedOrder
self.dimDict = {}
self.dimSizes = {}
for dt in self.sortedOrder:
self.dimDict[dt] = np.unique(self.spec[dt])
self.dimSizes[dt] = np.size(np.unique(self.spec[dt]))
# Set up and save the dict to order the filters once.
self.filterNameDict = {'u': 0, 'g': 1, 'r': 2, 'i': 3, 'z': 4, 'y': 5}
def __call__(self, intepPoints, filterNames=['u', 'g', 'r', 'i', 'z', 'y']):
if self.mags:
return self.interpMag(intepPoints, filterNames=filterNames)
else:
return self.interpSpec(intepPoints)
def indxAndWeights(self, points, grid):
"""
for given 1-D points, find the grid points on either side and return the weights
assume grid is sorted
"""
order = np.argsort(points)
indxL = np.empty(points.size, dtype=int)
indxR = np.empty(points.size, dtype=int)
indxR[order] = np.searchsorted(grid, points[order])
indxL = indxR-1
# If points off the grid were requested, just use the edge grid point
offGrid = np.where(indxR == grid.size)
indxR[offGrid] = grid.size-1
fullRange = grid[indxR]-grid[indxL]
wL = np.zeros(fullRange.size, dtype=float)
wR = np.ones(fullRange.size, dtype=float)
good = | np.where(fullRange != 0) | numpy.where |
"""Cross-validation support for GTC and GTR models (also SVM and PCA).
"""
# Authors: <NAME> <<EMAIL>>
# License: MIT
import numpy as np
from sklearn.decomposition import PCA
from sklearn.model_selection import KFold
from sklearn.metrics import recall_score
from sklearn.metrics import precision_score
from sklearn.metrics import f1_score
from sklearn.metrics import r2_score
from sklearn.metrics import mean_squared_error
from sklearn.svm import SVC
from sklearn.svm import SVR
import scipy.stats as st
from scipy.stats import t
from . import ugtm_predictions
from . import ugtm_preprocess
def crossvalidateGTC(data, labels, k=16, m=4, s=-1.0, regul=1.0,
n_neighbors=1, niter=200,
representation="modes",
doPCA=False, n_components=-1,
missing=False, missing_strategy='median',
random_state=1234, predict_mode="bayes",
prior="estimated",
n_folds=5, n_repetitions=10):
r"""Cross-validate GTC model.
Parameters
==========
data : array of shape (n_individuals, n_dimensions)
Train set data matrix.
labels : array of shape (n_individuals, 1)
Labels for train set.
k : int, optional (default = 16)
If k is set to 0, k is computed as sqrt(5*sqrt(n_individuals))+2.
k is the sqrt of the number of GTM nodes.
One of four GTM hyperparameters (k, m, s, regul).
Ex: k = 25 means the GTM will be discretized into a 25x25 grid.
m : int, optional (default = 4)
If m is set to 0, m is computed as sqrt(k).
(generally good rule of thumb).
m is the qrt of the number of RBF centers.
One of four GTM hyperparameters (k, m, s, regul).
Ex: m = 5 means the RBF functions will be arranged on a 5x5 grid.
s : float, optional (default = -1)
RBF width factor. Default (-1) is to try different values.
Parameter to tune width of RBF functions.
Impacts manifold flexibility.
regul : float, optional (default = -1)
Regularization coefficient. Default (-1) is to try different values.
Impacts manifold flexibility.
n_neighbors : int, optional (default = 1)
Number of neighbors for kNN algorithm (number of nearest nodes).
At the moment, n_neighbors for GTC is always equal to 1.
niter : int, optional (default = 200)
Number of iterations for EM algorithm.
representation : {"modes", "means"}
2D GTM representation for the test set, used for kNN algorithms:
"modes" for position with max. responsibility,
"means" for average position (usual GTM representation)
doPCA : bool, optional (default = False)
Apply PCA pre-processing.
n_components : int, optional (default = -1)
Number of components for PCA pre-processing.
If set to -1, keep principal components
accounting for 80% of data variance.
missing : bool, optional (default = True)
Replace missing values (calls scikit-learn functions).
missing_strategy : str, optional (default = 'median')
Scikit-learn missing data strategy.
random_state : int, optional (default = 1234)
Random state.
predict_mode : {"bayes", "knn"}, optional
Choose between nearest node algorithm
("knn", output of :func:`~ugtm.ugtm_predictions.predictNN`)
or GTM Bayes classifier
("bayes", output of :func:`~ugtm.ugtm_predictions.predictBayes`).
NB: the kNN algorithm is limited to only 1 nearest node at the moment
(n_neighbors = 1).
prior : {"estimated", "equiprobable"}, optional
Type of prior used to build GTM class map
(:func:`~ugtm.ugtm_landscape.classMap`).
Choose "estimated" to account for class imbalance.
n_folds : int, optional (default = 5)
Number of CV folds.
n_repetitions : int, optional (default = 10)
Number of CV iterations.
"""
print("")
print("k = sqrt(grid size), m = sqrt(radial basis function grid size), "
"regul = regularization, s = RBF width factor")
print("")
uniqClasses, labels = np.unique(labels, return_inverse=True)
nClasses = len(uniqClasses)
print("Classes: ", uniqClasses)
print("nClasses: %s" % (nClasses))
print("")
print("model\tparameters=k:m:s:regul\t"
"recall with CI\tprecision with CI\tF1-score with CI")
print("")
if k == 0:
k = int(np.sqrt(5*np.sqrt(data.shape[0])))+2
if m == 0:
m = int(np.sqrt(k))
if n_components == -1 and doPCA:
pca = PCA(random_state=random_state)
pca.fit(data)
n_components = np.searchsorted(
pca.explained_variance_ratio_.cumsum(), 0.8)+1
print("Used number of components explaining 80%% of "
"the variance in whole data set = %s\n" %
n_components)
if regul < 0.0:
lvec = [0.00001, 0.0001, 0.001, 0.01, 0.1, 1, 10, 100]
else:
lvec = [regul]
if s <= 0.0:
svec = [0.25, 0.5, 1.0, 1.50, 2.0]
else:
svec = [s]
savemean = -9999
nummodel = 0
savemodel = ""
for s in svec:
for regul in lvec:
modelstring = str(k)+':'+str(m)+":"+str(s)+":"+str(regul)
nummodel += 1
recallvec = []
precisionvec = []
f1vec = []
recallclassvec = np.array([])
precisionclassvec = np.array([])
f1classvec = np.array([])
meanclass = np.zeros(nClasses)
meanprecisionclass = np.zeros(nClasses)
meanf1class = np.zeros(nClasses)
seclass = np.zeros(nClasses)
seprecisionclass = np.zeros(nClasses)
sef1class = np.zeros(nClasses)
hclass = np.zeros(nClasses)
hprecisionclass = np.zeros(nClasses)
hf1class = np.zeros(nClasses)
for j in range(n_repetitions):
ss = KFold(n_splits=n_folds, shuffle=True, random_state=j)
y_true = []
y_pred = []
for train_index, test_index in ss.split(data):
train = np.copy(data[train_index])
test = np.copy(data[test_index])
prediction = ugtm_predictions.GTC(train=train,
labels=labels[train_index],
test=test, k=k,
m=m, s=s, regul=regul,
n_neighbors=n_neighbors,
niter=niter,
representation=representation,
doPCA=doPCA,
n_components=n_components,
random_state=random_state,
missing=missing,
missing_strategy=missing_strategy,
predict_mode=predict_mode,
prior=prior)
y_true = np.append(y_true, labels[test_index])
y_pred = np.append(y_pred, prediction)
recall = recall_score(y_true, y_pred, average='weighted')
precision = precision_score(
y_true, y_pred, average='weighted')
f1 = f1_score(y_true, y_pred, average='weighted')
recallvec = np.append(recallvec, recall)
precisionvec = np.append(precisionvec, precision)
f1vec = np.append(f1vec, f1)
recallclass = recall_score(y_true, y_pred, average=None)
precisionclass = precision_score(y_true, y_pred, average=None)
f1class = f1_score(y_true, y_pred, average=None)
if(j == 0):
recallclassvec = recallclass
precisionclassvec = precisionclass
f1classvec = f1class
else:
recallclassvec = np.vstack([recallclassvec, recallclass])
precisionclassvec = np.vstack(
[precisionclassvec, precisionclass])
f1classvec = np.vstack([f1classvec, f1class])
mean, se = np.mean(recallvec), st.sem(recallvec)
meanprecision, seprecision = np.mean(
precisionvec), st.sem(precisionvec)
meanf1, sef1 = np.mean(f1vec), st.sem(f1vec)
h = se * t._ppf((1+0.95)/2., len(recallvec)-1)
hprecision = seprecision * \
t._ppf((1+0.95)/2., len(precisionvec)-1)
hf1 = sef1 * t._ppf((1+0.95)/2., len(f1vec)-1)
if(meanf1 > savemean):
savemean = meanf1
savemodel = "Model "+str(nummodel)
for i in range(0, nClasses):
meanclass[i] = np.mean(recallclassvec[:, i])
seclass[i] = st.sem(recallclassvec[:, i])
meanf1class[i] = np.mean(f1classvec[:, i])
sef1class[i] = st.sem(f1classvec[:, i])
meanprecisionclass[i] = np.mean(precisionclassvec[:, i])
seprecisionclass[i] = st.sem(precisionclassvec[:, i])
hclass[i] = seclass[i] * \
t._ppf((1+0.95)/2., len(recallclassvec[:, i])-1)
hprecisionclass[i] = seprecisionclass[i] \
* t._ppf((1+0.95)/2., len(precisionclassvec[:, i])-1)
hf1class[i] = sef1class[i] * \
t._ppf((1+0.95)/2., len(f1classvec[:, i])-1)
print("Model %s\t%s\t%.4f +/- %.4f\t%.4f +/- %.4f\t%.4f +/- %.4f"
% (nummodel, modelstring, mean, h,
meanprecision, hprecision, meanf1, hf1))
for i in range(nClasses):
print("Class=%s\t%s\t%.4f +/- %.4f\t%.4f +/- %.4f\t%.4f +/- %.4f"
% (uniqClasses[i], modelstring, meanclass[i],
hclass[i], meanprecisionclass[i],
hprecisionclass[i], meanf1class[i], hf1class[i]))
print('')
print('')
print("########best GTC model##########")
print(savemodel)
print("")
def crossvalidateGTR(data, labels, k=16, m=4, s=-1, regul=-1,
n_neighbors=1, niter=200, representation="modes",
doPCA=False, n_components=-1,
missing=False, missing_strategy='median',
random_state=1234, n_folds=5, n_repetitions=10):
r"""Cross-validate GTR model.
Parameters
==========
data : array of shape (n_individuals, n_dimensions)
Train set data matrix.
labels : array of shape (n_individuals, 1)
Labels for train set.
k : int, optional (default = 16)
If k is set to 0, k is computed as sqrt(5*sqrt(n_individuals))+2.
k is the sqrt of the number of GTM nodes.
One of four GTM hyperparameters (k, m, s, regul).
Ex: k = 25 means the GTM will be discretized into a 25x25 grid.
m : int, optional (default = 4)
If m is set to 0, m is computed as sqrt(k).
(generally good rule of thumb).
m is the qrt of the number of RBF centers.
One of four GTM hyperparameters (k, m, s, regul).
Ex: m = 5 means the RBF functions will be arranged on a 5x5 grid.
s : float, optional (default = -1)
RBF width factor. Default (-1) is to try different values.
Parameter to tune width of RBF functions.
Impacts manifold flexibility.
regul : float, optional (default = -1)
Regularization coefficient. Default (-1) is to try different values.
Impacts manifold flexibility.
n_neighbors : int, optional (default = 1)
Number of neighbors for kNN algorithm (number of nearest nodes).
niter : int, optional (default = 200)
Number of iterations for EM algorithm.
representation : {"modes", "means"}
2D GTM representation for the test set, used for kNN algorithms:
"modes" for position with max. responsibility,
"means" for average position (usual GTM representation)
doPCA : bool, optional (default = False)
Apply PCA pre-processing.
n_components : int, optional (default = -1)
Number of components for PCA pre-processing.
If set to -1, keep principal components
accounting for 80% of data variance.
missing : bool, optional (default = True)
Replace missing values (calls scikit-learn functions).
missing_strategy : str, optional (default = 'median')
Scikit-learn missing data strategy.
random_state : int, optional (default = 1234)
Random state.
n_folds : int, optional (default = 5)
Number of CV folds.
n_repetitions : int, optional (default = 10)
Number of CV iterations.
"""
print("")
print("k = sqrt(grid size), m = sqrt(radial basis function grid size), "
"regul = regularization, s = RBF width factor")
print("")
if k == 0:
k = int(np.sqrt(5*np.sqrt(data.shape[0])))+2
if m == 0:
m = int(np.sqrt(k))
if n_components == -1 and doPCA is True:
pca = PCA(random_state=random_state)
pca.fit(data)
n_components = np.searchsorted(
pca.explained_variance_ratio_.cumsum(), 0.8)+1
print("Used number of components explaining 80%% of the variance = %s\n"
% n_components)
if regul < 0.0:
lvec = [0.00001, 0.0001, 0.001, 0.01, 0.1, 1, 10, 100]
else:
lvec = [regul]
if s <= 0.0:
svec = [0.25, 0.5, 1.0, 1.50, 2.0]
else:
svec = [s]
savemean = 999999999
saveh = 0.0
modelvec = ""
savemeanr2 = 0.0
savehr2 = 0.0
print("k:m:s:regul\tRMSE with CI\tR2 with CI\t")
for s in svec:
for regul in lvec:
modelstring = str(s)+":"+str(regul)
rmsevec = []
r2vec = []
for j in range(n_repetitions):
ss = KFold(n_splits=n_folds, shuffle=True, random_state=j)
y_true = []
y_pred = []
for train_index, test_index in ss.split(data):
train = np.copy(data[train_index])
test = np.copy(data[test_index])
prediction = ugtm_predictions.GTR(train=train,
labels=labels[train_index],
test=test, k=k,
m=m, s=s, regul=regul,
n_neighbors=n_neighbors,
niter=niter,
representation=representation,
doPCA=doPCA,
n_components=n_components,
random_state=random_state,
missing=missing,
missing_strategy=missing_strategy)
y_pred = np.append(y_pred, prediction)
y_true = np.append(y_true, labels[test_index])
rmse = np.sqrt(mean_squared_error(y_true, y_pred))
r2 = r2_score(y_true, y_pred)
rmsevec = np.append(rmsevec, rmse)
r2vec = | np.append(r2vec, r2) | numpy.append |
# Script to make plots from the RJ-MCMC output
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import matplotlib.cm as cm
import numpy as np
from scipy import stats
from matplotlib.colors import LogNorm
import os
import sys
if not os.path.exists('input_file'):
print('*'*50)
print('Cannot find file: input_file')
print('Check that you are running this Python code in the outputs directory \n E.g. cd Outputs \n python ../make_plots.py')
print('*'*50)
sys.exit(0)
# Read some basic data from the input file
# This can be overwritten by either altering this file, or simply hardwiring the various parameters: e.g.
# age_min, age_max = 0, 100
for line in open('input_file','r'):
if not (line[0] == '#' or line == '\n'): #skip comments or blank lines...
if line.split()[0].upper() == 'Intensity_prior'.upper():
I_min, I_max = float(line.split()[1]),float(line.split()[2])
if line.split()[0].upper() == 'Age_bounds'.upper():
age_min, age_max = float(line.split()[1]),float(line.split()[2])
if line.split()[0].upper() == 'Num_change_points'.upper():
K_min, K_max = int(line.split()[1]), int(line.split()[2])
if line.split()[0].upper() == 'Credible'.upper():
credible = float(line.split()[1])
if line.split()[0].upper() == 'output_model'.upper():
output_model_filename = line.split()[1]
if line.split()[0].upper() == 'True_data'.upper():
true_behaviour_file = line.split()[2]
x_cts_true,y_cts_true=np.loadtxt(os.path.join(os.pardir,true_behaviour_file),unpack=True)
if line.split()[0].upper() == 'Plotting_intensity_range'.upper():
I_min,I_max = float(line.split()[1]),float(line.split()[2])
if line.split()[0].upper() == 'Burn_in'.upper():
Burn_in = int(line.split()[1])
if line.split()[0].upper() == 'Outputs_directory'.upper():
outputs_directory = line.split()[1]
if line.split()[0].upper() == 'Data_title'.upper():
data_title = line.split()[1]
# read in the various data files that were output by the RJ-MCMC script
x, x_err, y, y_err, strat = np.loadtxt('data.dat', unpack=True)
strat = [int(a) for a in strat]
lx, ly = np.loadtxt('credible_lower.dat', unpack=True)
ux, uy = np.loadtxt('credible_upper.dat', unpack=True)
mode_x, mode_y = np.loadtxt('mode.dat', unpack=True)
median_x, median_y = np.loadtxt('median.dat', unpack=True)
av_x, av_y = np.loadtxt('average.dat', unpack=True)
best_x, best_y = np.loadtxt('best_fit.dat', unpack=True)
k_index, k_count = np.loadtxt('k_histogram.dat',unpack=True)
print('Building plot of data...')
# plot the data with the density binned by using the number of bins of "num_bins"
num_bins = 20
fig1, ax1 = plt.subplots(figsize=(14,6))
unstratified_index = [index for index,item in enumerate(strat) if item == 0]
stratified_index = [index for index,item in enumerate(strat) if item == 1]
if len(unstratified_index) > 0:
(line, caps, bars) = ax1.errorbar(x[unstratified_index], y[unstratified_index],xerr=x_err[unstratified_index], yerr=y_err[unstratified_index],
fmt='o',markerfacecolor='blue',markeredgecolor='k', markeredgewidth = 0.6, ecolor='k', elinewidth=1, capsize=4, markersize=7)
plt.setp(line,label="Unstratified data") #give label to returned line
if len(stratified_index) > 0:
(line2, caps, bars) = ax1.errorbar(x[stratified_index], y[stratified_index],xerr=x_err[stratified_index], yerr=y_err[stratified_index],
fmt='o',markerfacecolor='red',markeredgecolor='k', markeredgewidth = 0.6, ecolor='k', elinewidth=1, capsize=4, markersize=7)
plt.setp(line2,label="Stratified data") #give label to returned line
ax1.set_xlabel('Time/yr',fontsize=16)
ax1.set_ylabel('Intensity/$\mu$T',fontsize=16)
ax1.xaxis.set_tick_params(labelsize=16)
ax1.yaxis.set_tick_params(labelsize=16)
count_colour = 'g'
ax2 = ax1.twinx()
ax2.hist(x,num_bins,alpha=0.2,color=count_colour,edgecolor='white')
ax2.set_ylabel('Count',fontsize=16,color=count_colour)
for tl in ax2.get_yticklabels():
tl.set_color(count_colour)
ax1.set_xlim(age_min, age_max)
ax2.yaxis.set_tick_params(labelsize=16)
if 'data_title' in locals(): #check to see if data_title is specified in input file
if data_title.upper() == 'Lubeck-Paris700'.upper():
ax2.set_title(r"""$L\"ubeck$-Paris700""",fontsize=20);
else:
ax2.set_title(data_title,fontsize=20)
plt.savefig('Data.pdf', bbox_inches='tight',pad_inches=0.0)
plt.close(fig1)
# Make a single plot of the data with mean/mode/median/credible bounds for the posterior
print('Building plot of posterior...')
fig2, ax = plt.subplots (figsize=(14,5))
ax.fill_between(lx, ly, uy, facecolor='orange', alpha=0.5, edgecolor='g', label='%i%% credible interval' % credible)
#a.errorbar(dx[black_pts_index], dy[black_pts_index],xerr=dx_err[black_pts_index], yerr=dn[black_pts_index],fmt='k.', label='Data', elinewidth=0.5)
(line, caps, bars) = ax.errorbar(x, y,xerr=x_err, yerr=y_err,fmt='o',color='blue',ecolor='k', elinewidth=1, capthick=0.7, capsize=4, markersize=5)
plt.setp(line,label="Data") #give label to returned line
ax.plot(av_x, av_y, 'r', label = 'Average', linewidth=2)
#ax.plot(best_x, best_y, 'b', linewidth=2, label = 'Best fit')
ax.plot(median_x, median_y, 'purple', linewidth=2, label = 'Median')
ax.plot(mode_x, mode_y, 'blue', linewidth=2, label = 'Mode')
if 'x_cts_true' in locals(): #see if "true" data are available to plot --- only for synthetic cases.
plt.plot(x_cts_true,y_cts_true,'k', linewidth=2, label='Real')
ax.set_ylim(I_min,I_max)
ax.set_xlim(age_min, age_max)
ax.set_title('Posterior distribution of intensity',fontsize=20)
ax.set_xlabel('Time/yr',fontsize=16)
ax.set_ylabel('Intensity/$\mu$T',fontsize=16)
ax.legend(loc = 'upper right',fontsize=12,labelspacing=0.2)
ax.xaxis.set_tick_params(labelsize=16)
ax.yaxis.set_tick_params(labelsize=16)
plt.savefig('Posterior.pdf', bbox_inches='tight',pad_inches=0.4)
plt.close(fig2)
# Make a plot of the histogram of the number of change points
print('Building plot of change points...')
fig3, ax = plt.subplots (figsize=(8,5))
k_count = k_count/np.sum(k_count) #normalise
ax.bar(k_index,k_count,align='center')
#ax.set_xticks(k_index[::2])
ax.set_title('Vertices Histogram',fontsize=16)
ax.set_xlabel('Number of vertices',fontsize=16)
ax.set_ylabel('Discrete probability',fontsize=16)
ax.xaxis.set_tick_params(labelsize=16)
ax.yaxis.set_tick_params(labelsize=16)
plt.savefig('K_histogram.pdf', bbox_inches='tight',pad_inches=0.4)
plt.close(fig3)
# Make a plot of the age of the change points
num_bins = 500
vertices = np.loadtxt('changepoints.dat')
fig4, ax = plt.subplots (figsize=(14,3))
ax.hist(vertices, bins = num_bins)
ax.set_title('Vertex position Histogram',fontsize=20)
ax.set_xlabel('Time/yr',fontsize=16)
ax.set_ylabel('Count',fontsize=16)
ax.set_xlim(age_min, age_max)
ax.xaxis.set_tick_params(labelsize=16)
ax.yaxis.set_tick_params(labelsize=16)
plt.savefig('Change_point_histogram.pdf', bbox_inches='tight',pad_inches=0.4)
plt.close(fig4)
# Make a plot of the misfit
print('Building plot of misfit...')
iterations, misfit = np.loadtxt('misfit.dat',unpack=True)
fig5, ax = plt.subplots (figsize=(8,5) )
ax.plot(iterations, misfit,'k')
ax.set_yscale('log')
ax.set_title('Misfit against iteration count',fontsize=16)
ax.set_xlabel('Iteration count',fontsize=16)
ax.set_ylabel('Misfit',fontsize=16)
ax.xaxis.set_tick_params(labelsize=16)
ax.yaxis.set_tick_params(labelsize=16)
# add red bar to indicate the burn-in end
ax.bar(Burn_in,height=misfit.max(),width=iterations.max()/100,bottom = 0, align = 'center',color='red')
plt.savefig('Misfit.pdf', bbox_inches='tight',pad_inches=0.4)
plt.close(fig5)
# Make a plot of the density
print('Building plot of density...')
fig6, ax = plt.subplots ( figsize=(14,5))
ax.set_title('Intensity density')
ax.set_ylabel('Intensity/$\mu$T')
f = open('intensity_density.dat', 'r')
discretise_size, NBINS = [int(x) for x in f.readline().split()]
density_data = [list(map(float, x.split())) for x in f.readlines()]
f.close()
x_density,y_density,intensity_density = list(zip(*density_data))
int_density = np.reshape(intensity_density,[discretise_size,NBINS])
x_density = np.reshape(x_density,[discretise_size,NBINS])
y_density = | np.reshape(y_density,[discretise_size,NBINS]) | numpy.reshape |
import os
import torch
import numpy as np
import cv2
from torch.utils.data import Dataset
from torch.nn.functional import interpolate
import matplotlib.pyplot as plt
class SIDSonyTrainDataset(Dataset):
"""SID Sony Train dataset."""
def __init__(self, list_file ,root_dir, ps, transform=None):
self.ps = ps
self.list_file = open(list_file, "r")
self.list_file_lines = self.list_file.readlines()
self.root_dir = root_dir
self.transform = transform
self.gt_images = [None] * 6000
self.input_images = {}
self.input_images['150'] = [None] * len(self.list_file_lines)
self.input_images['125'] = [None] * len(self.list_file_lines)
self.input_images['50'] = [None] * len(self.list_file_lines)
self.input_gray_images = {}
self.input_gray_images['150'] = [None] * len(self.list_file_lines)
self.input_gray_images['125'] = [None] * len(self.list_file_lines)
self.input_gray_images['50'] = [None] * len(self.list_file_lines)
self.input_edge_images = {}
self.input_edge_images['150'] = [None] * len(self.list_file_lines)
self.input_edge_images['125'] = [None] * len(self.list_file_lines)
self.input_edge_images['50'] = [None] * len(self.list_file_lines)
def __len__(self):
return len(self.list_file_lines)
def __getitem__(self, idx):
img_names = self.list_file_lines[idx].split(' ')
input_img_name = img_names[0]
gt_img_name = img_names[1]
in_exposure = float(input_img_name[28:-5])
gt_exposure = float(gt_img_name[27:-5])
ratio = min(gt_exposure / in_exposure, 300)
ind = int(input_img_name[19:24])
ratio = int(ratio / 2)
in_fn = input_img_name.split('/')[-1]
if self.input_images[str(ratio)[0:3]][ind] is None:
input_img_path = os.path.join(self.root_dir, input_img_name)
input_img = cv2.imread(input_img_path)
input_img = cv2.cvtColor(input_img, cv2.COLOR_BGR2RGB)
self.input_images[str(ratio)[0:3]][ind] = np.expand_dims(np.float32(input_img / 255.0), axis=0) #* ratio
gt_img_path = os.path.join(self.root_dir, gt_img_name)
im = cv2.imread(gt_img_path)
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
self.gt_images[ind] = np.expand_dims(np.float32(im / 255.0), axis=0)
# gray_path = os.path.join(self.root_dir, 'Sony/train/gray', in_fn)
# input_gray = cv2.imread(gray_path, cv2.IMREAD_GRAYSCALE)
# self.input_gray_images[str(ratio)[0:3]][ind] = np.expand_dims(np.expand_dims(np.float32(input_gray / 255.0), axis=2), axis=0)
# edge_path = os.path.join(self.root_dir, 'Sony/train/edge_GT/%05d_00_%0ds.png' % (ind, int(gt_exposure)))
edge_path = os.path.join(self.root_dir, 'Sony/train/edge', in_fn)
input_edge = cv2.imread(edge_path, cv2.IMREAD_GRAYSCALE)
self.input_edge_images[str(ratio)[0:3]][ind] = np.expand_dims(np.expand_dims(np.float32(input_edge / 255.0), axis=2), axis=0)
# crop
H = self.input_images[str(ratio)[0:3]][ind].shape[1]
W = self.input_images[str(ratio)[0:3]][ind].shape[2]
xx = np.random.randint(0, W - self.ps)
yy = np.random.randint(0, H - self.ps)
input_patch = self.input_images[str(ratio)[0:3]][ind][:, yy:yy + self.ps, xx:xx + self.ps, :]
gt_patch = self.gt_images[ind][:, yy:yy + self.ps, xx:xx + self.ps, :]
# input_gray_patch = self.input_gray_images[str(ratio)[0:3]][ind][:, yy:yy + self.ps, xx:xx + self.ps, :]
input_edge_patch = self.input_edge_images[str(ratio)[0:3]][ind][:, yy:yy + self.ps, xx:xx + self.ps, :]
if np.random.randint(2, size=1)[0] == 1: # random flip
input_patch = | np.flip(input_patch, axis=1) | numpy.flip |
import sys, math
import numpy as np
from scipy.misc import imresize as resize
from scipy.misc import toimage as toimage
import gym
from gym import spaces
from gym.spaces.box import Box
from gym.utils import seeding
from gym.envs.classic_control import rendering
import pyglet
from pyglet import gl
import tensorflow as tf
import keras.backend as K
from model import make_model
FPS = 50
SCREEN_X = 64
SCREEN_Y = 64
FACTOR = 8
HIDDEN_UNITS = 256
GAUSSIAN_MIXTURES = 5
Z_DIM = 32
initial_z = np.load('./data/initial_z.npz')
initial_mu = initial_z['initial_mu']
initial_log_var = initial_z['initial_log_var']
initial_mu_log_var = [list(elem) for elem in zip(initial_mu, initial_log_var)]
def get_pi_idx(x, pdf):
# samples from a categorial distribution
N = pdf.size
accumulate = 0
for i in range(0, N):
accumulate += pdf[i]
if (accumulate >= x):
return i
random_value = np.random.randint(N)
#print('error with sampling ensemble, returning random', random_value)
return random_value
class CarRacingDream(gym.Env):
metadata = {
'render.modes': ['human', 'rgb_array'],
'video.frames_per_second' : FPS
}
def __init__(self, model):
self.observation_space = Box(low=-50., high=50., shape=(model.rnn.z_dim,) , dtype = np.float32) # , dtype=np.float32
self.action_space = spaces.Box( np.array([-1,0,0]), np.array([+1,+1,+1]) , dtype = np.float32) # steer, gas, brake
self.seed()
self.model = model
self.viewer = None
self.t = None
self.z = None
self.h = None
self.c = None
self.previous_reward = None
self.reset()
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def sample_z(self, mu, log_sigma):
z = mu + (np.exp(log_sigma)) * self.np_random.randn(*log_sigma.shape)
return z
def reset(self):
idx = self.np_random.randint(0, len(initial_mu_log_var))
init_mu, init_log_var = initial_mu_log_var[idx]
init_log_sigma = init_log_var / 2
self.z = self.sample_z(init_mu, init_log_sigma)
self.h = np.zeros(HIDDEN_UNITS)
self.c = np.zeros(HIDDEN_UNITS)
self.previous_reward = 0
self.t = 0
return self.z
def close(self):
if self.viewer is not None:
self.viewer.close()
self.viewer = None
def get_mixture_coef(self, z_pred):
log_pi, mu, log_sigma = np.split(z_pred, 3, 1)
log_pi = log_pi - np.log(np.sum(np.exp(log_pi), axis = 1, keepdims = True))
return log_pi, mu, log_sigma
def sample_next_mdn_output(self, action):
d = GAUSSIAN_MIXTURES * Z_DIM
z_dim = self.model.rnn.z_dim
input_to_rnn = [np.array([[np.concatenate([self.z, action, [self.previous_reward]])]]),np.array([self.h]),np.array([self.c])]
out = self.model.rnn.forward.predict(input_to_rnn)
y_pred = out[0][0][0]
new_h = out[1][0]
new_c = out[2][0]
mdn_pred = y_pred[:(3*d)]
rew_pred = y_pred[-1]
mdn_pred = np.reshape(mdn_pred, [-1, GAUSSIAN_MIXTURES * 3])
log_pi, mu, log_sigma = self.get_mixture_coef(mdn_pred)
chosen_log_pi = np.zeros(z_dim)
chosen_mu = | np.zeros(z_dim) | numpy.zeros |
import numpy as np
from openrave_manager import OpenraveManager
from potential_point import PotentialPoint
class OpenraveRLInterface:
# class StepResult(enum.Enum):
# free_space = 1
# collision = 2
# close_to_goal = 3
def __init__(self, config):
self.action_step_size = config['openrave_rl']['action_step_size']
self.goal_sensitivity = config['openrave_rl']['goal_sensitivity']
self.keep_alive_penalty = config['openrave_rl']['keep_alive_penalty']
self.truncate_penalty = config['openrave_rl']['truncate_penalty']
self.openrave_manager = OpenraveManager(
config['openrave_rl']['segment_validity_step'], PotentialPoint.from_config(config))
self.current_joints = None
self.goal_joints = None
self.start_joints = None
self.traj = None
def is_below_goal_sensitivity(self, start_joints, goal_joints):
start_pose = self.openrave_manager.get_target_pose(start_joints)
goal_pose = self.openrave_manager.get_target_pose(goal_joints)
pose_distance = np.linalg.norm(np.array(start_pose) - np.array(goal_pose))
return pose_distance < self.goal_sensitivity
def start_specific(self, traj, verify_traj=True):
self.traj = traj
start_joints = traj[0]
goal_joints = traj[-1]
# assert path is legal
if verify_traj:
step_size = self.action_step_size + 0.00001
for i in range(len(traj)-1):
step_i_size = np.linalg.norm(np.array(traj[i]) - np.array(traj[i+1]))
assert step_i_size < step_size, 'step_i_size {}'.format(step_i_size)
steps_required_for_motion_plan = len(traj)
self.current_joints = | np.array(start_joints) | numpy.array |
import math
# import igraph as ig
import random
import time
from collections import defaultdict
import matplotlib.pyplot as plt
import numpy as np
import sdeint
from IPython.display import clear_output
from scipy.integrate import odeint
from scipy.interpolate import UnivariateSpline
from scipy.special import expit as sigmoid
from sklearn.metrics import roc_auc_score
from tqdm import tqdm
def plot_trajectories(data, pred, graph, title=[1, 2.1]):
fig, axs = plt.subplots(1, 3, figsize=(10, 2.3))
fig.tight_layout(pad=0.2, w_pad=2, h_pad=3)
axs[0].plot(data.squeeze())
axs[1].plot(pred.squeeze())
i = 1
axs[1].set_title("Iteration = %i" % title[0] + ", " + "Loss = %1.3f" % title[1])
cax = axs[2].matshow(graph)
fig.colorbar(cax)
plt.show()
# plt.savefig('../Giff/fig'+i+'.png')
def compute_derivatives(y, k=4, s=4, t=None):
"""Compute derivatives of univariate stochastic process by interpolating trajectory with
univariate splines.
Args:
t, y (np.ndarray): time indeces t of time series y
Returns:
dy/dt (np.ndarray): derivative of y(t) evaluated at t
"""
if type(t) == type(None):
t = np.arange(y.shape[0])
temp_list = []
for i in range(y.shape[1]):
spl = UnivariateSpline(t, y[:, i], k=k, s=s) # s=0)
derspl = spl.derivative()
temp_list.append(derspl(t))
return np.transpose(
np.array(temp_list)
) # shape is number of time points x number of variables
def compute_spline(y, k=3, s=0.1):
"""Compute univariate stochastic process by interpolating trajectory with
univariate splines.
Args:
t, y (np.ndarray): time indeces t of time series y
Returns:
dy/dt (np.ndarray): derivative of y(t) evaluated at t
"""
t = np.arange(y.shape[0])
temp_list = []
for i in range(y.shape[1]):
spl = UnivariateSpline(t, y[:, i], k=k, s=s)
temp_list.append(spl(t))
return np.transpose(np.array(temp_list))
def make_var_stationary(beta, radius=0.97):
"""Rescale coefficients of VAR model to make stable."""
p = beta.shape[0]
lag = beta.shape[1] // p
bottom = np.hstack((np.eye(p * (lag - 1)), np.zeros((p * (lag - 1), p))))
beta_tilde = np.vstack((beta, bottom))
eigvals = np.linalg.eigvals(beta_tilde)
max_eig = max(np.abs(eigvals))
nonstationary = max_eig > radius
if nonstationary:
return make_var_stationary(0.95 * beta, radius)
else:
return beta
def simulate_var(p, T, lag, sparsity=0.2, beta_value=1.0, sd=0.1, seed=0):
if seed is not None:
np.random.seed(seed)
# Set up coefficients and Granger causality ground truth.
GC = np.eye(p, dtype=int)
beta = np.eye(p) * beta_value
num_nonzero = int(p * sparsity) - 1
for i in range(p):
choice = np.random.choice(p - 1, size=num_nonzero, replace=False)
choice[choice >= i] += 1
beta[i, choice] = beta_value
GC[i, choice] = 1
beta = np.hstack([beta for _ in range(lag)])
beta = make_var_stationary(beta)
# Generate data.
burn_in = 100
errors = np.random.normal(scale=sd, size=(p, T + burn_in))
X = np.zeros((p, T + burn_in))
X[:, :lag] = errors[:, :lag]
for t in range(lag, T + burn_in):
X[:, t] = np.dot(beta, X[:, (t - lag) : t].flatten(order="F"))
X[:, t] += +errors[:, t - 1]
return X.T[burn_in:], beta, GC
def lorenz(x, t, F=5):
"""Partial derivatives for Lorenz-96 ODE."""
p = len(x)
dxdt = np.zeros(p)
for i in range(p):
dxdt[i] = (x[(i + 1) % p] - x[(i - 2) % p]) * x[(i - 1) % p] - x[i] + F
return dxdt
def simulate_lorenz_96(
p, T, sigma=0.5, F=10.0, delta_t=0.1, sd=0.1, burn_in=1000, seed=None
):
if seed is not None:
np.random.seed(seed)
def GG(x, t):
p = len(x)
return np.diag([sigma] * p)
# Use scipy to solve ODE.
x0 = np.random.normal(scale=0.01, size=p)
t = np.linspace(0, (T + burn_in) * delta_t, T + burn_in)
# X = odeint(lorenz, x0, t, args=(F,))
# X += np.random.normal(scale=sd, size=(T + burn_in, p))
X = sdeint.itoint(lorenz, GG, x0, t)
# Set up Granger causality ground truth.
GC = np.zeros((p, p), dtype=int)
for i in range(p):
GC[i, i] = 1
GC[i, (i + 1) % p] = 1
GC[i, (i - 1) % p] = 1
GC[i, (i - 2) % p] = 1
return X[burn_in:], GC
def lotkavolterra(x, t, r, alpha):
"""Partial derivatives for Lotka-Volterra ODE.
Args:
- r (np.array): vector of self-interaction
- alpha (pxp np.array): matrix of interactions"""
p = len(x)
dxdt = np.zeros(p)
for i in range(p):
dxdt[i] = r[i] * x[i] * (1 - np.dot(alpha[i], x))
return dxdt
def simulate_lotkavolterra(
p, T, r, alpha, delta_t=0.1, sd=0.01, burn_in=1000, seed=None
):
if seed is not None:
np.random.seed(seed)
# Use scipy to solve ODE.
x0 = np.random.normal(scale=0.01, size=p) + 0.25
x0 = np.array([0.0222, 0.0014, 0.0013, 0.0008])
t = np.linspace(0, (T + burn_in) * delta_t, T + burn_in)
X = odeint(lotkavolterra, x0, t, args=(r, alpha,))
X += np.random.normal(scale=sd, size=(T + burn_in, p))
# Set up Granger causality ground truth.
GC = (alpha != 0) * 1
np.fill_diagonal(GC, 1)
return X[burn_in:], GC
def rossler(x, t, a=0, eps=0.1, b=4, d=2):
"""Partial derivatives for rossler ODE."""
p = len(x)
dxdt = np.zeros(p)
dxdt[0] = a * x[0] - x[1]
dxdt[p - 2] = x[(p - 3)]
dxdt[p - 1] = eps + b * x[(p - 1)] * (x[(p - 2)] - d)
for i in range(1, p - 2):
dxdt[i] = np.sin(x[(i - 1)]) - np.sin(x[(i + 1)])
return dxdt
def simulate_rossler(
p,
T,
sigma=0.5,
a=0,
eps=0.1,
b=4,
d=2,
delta_t=0.05,
sd=0.1,
burn_in=1000,
seed=None,
):
if seed is not None:
np.random.seed(seed)
def GG(x, t):
p = len(x)
return np.diag([sigma] * p)
# Use scipy to solve ODE.
x0 = np.random.normal(scale=0.01, size=p)
t = np.linspace(0, (T + burn_in) * delta_t, T + burn_in)
# X = odeint(rossler, x0, t, args=(a,eps,b,d,))
# X += np.random.normal(scale=sd, size=(T + burn_in, p))
X = sdeint.itoint(rossler, GG, x0, t)
# Set up Granger causality ground truth.
GC = np.zeros((p, p), dtype=int)
GC[0, 0] = 1
GC[0, 1] = 1
GC[p - 2, p - 3] = 1
GC[p - 1, p - 1] = 1
GC[p - 1, p - 2] = 1
for i in range(1, p - 2):
# GC[i, i] = 1
GC[i, (i + 1)] = 1
GC[i, (i - 1)] = 1
return 400 * X[burn_in:], GC
def tumor_vaccine(
x,
t,
c2,
t1,
a0=0.1946,
a1=0.3,
c1=100,
c3=300,
delta0=0.00001,
delta1=0.00001,
d=0.0007,
f=0.62,
r=0.01,
):
"""Partial derivatives for rossler ODE."""
dxdt = np.zeros(5)
c0 = 1 / 369
dxdt[0] = (
a0 * x[0] * (1 - c0 * x[0])
- delta0 * x[0] * x[2] / (1 + c1 * x[1])
- delta0 * x[0] * x[4]
)
dxdt[1] = a1 * (x[0] ** 2) / (c2 + x[0] ** 2) - d * x[1]
dxdt[2] = (
f * x[2] * x[0] / (1 + c3 * x[0] * x[1])
- r * x[2]
- delta0 * x[3] * x[2]
- delta1 * x[2]
)
dxdt[3] = r * x[2] - delta1 * x[3]
if math.isclose(t, t1, abs_tol=0.5):
dxdt[4] = 5000 - delta1 * x[4]
else:
dxdt[4] = -delta1 * x[4]
return dxdt
def simulate_tumor(T, c2=300, t1=3, delta_t=0.05, sd=0.1, burn_in=0, seed=None):
if seed is not None:
np.random.seed(seed)
# Use scipy to solve ODE.
x0 = np.zeros(5)
x0[0] = 3
x0[1] = 0
x0[2] = 100
x0[3] = 0
x0[4] = 0
t = np.linspace(0, (T + burn_in) * delta_t, T + burn_in)
X = odeint(tumor_vaccine, x0, t, args=(c2, t1,))
X += np.random.normal(scale=sd, size=(T + burn_in, 5))
# Set up Granger causality ground truth.
p = 5
GC = np.zeros((p, p), dtype=int)
GC[0, 0] = 1
GC[0, 1] = 1
GC[p - 2, p - 3] = 1
GC[p - 1, p - 1] = 1
GC[p - 1, p - 2] = 1
for i in range(1, p - 2):
# GC[i, i] = 1
GC[i, (i + 1)] = 1
GC[i, (i - 1)] = 1
return X[burn_in:], GC
def glycolytic(
x,
t,
k1=0.52,
K1=100,
K2=6,
K3=16,
K4=100,
K5=1.28,
K6=12,
K=1.8,
kappa=13,
phi=0.1,
q=4,
A=4,
N=1,
J0=2.5,
):
"""Partial derivatives for Glycolytic oscillator model.
source:
https://www.pnas.org/content/pnas/suppl/2016/03/23/1517384113.DCSupplemental/pnas.1517384113.sapp.pdf
Args:
- r (np.array): vector of self-interaction
- alpha (pxp np.array): matrix of interactions"""
dxdt = np.zeros(7)
dxdt[0] = J0 - (K1 * x[0] * x[5]) / (1 + (x[5] / k1) ** q)
dxdt[1] = (
(2 * K1 * x[0] * x[5]) / (1 + (x[5] / k1) ** q)
- K2 * x[1] * (N - x[4])
- K6 * x[1] * x[4]
)
dxdt[2] = K2 * x[1] * (N - x[4]) - K3 * x[2] * (A - x[5])
dxdt[3] = K3 * x[2] * (A - x[5]) - K4 * x[3] * x[4] - kappa * (x[3] - x[6])
dxdt[4] = K2 * x[1] * (N - x[4]) - K4 * x[3] * x[4] - K6 * x[1] * x[4]
dxdt[5] = (
(-2 * K1 * x[0] * x[5]) / (1 + (x[5] / k1) ** q)
+ 2 * K3 * x[2] * (A - x[5])
- K5 * x[5]
)
dxdt[6] = phi * kappa * (x[3] - x[6]) - K * x[6]
return dxdt
def simulate_glycolytic(
T, sigma=0.5, delta_t=0.001, sd=0.01, burn_in=0, seed=None, scale=True
):
if seed is not None:
np.random.seed(seed)
def GG(x, t):
p = len(x)
return np.diag([sigma] * p)
x0 = np.zeros(7)
x0[0] = np.random.uniform(0.15, 1.6)
x0[1] = np.random.uniform(0.19, 2.16)
x0[2] = np.random.uniform(0.04, 0.2)
x0[3] = np.random.uniform(0.1, 0.35)
x0[4] = np.random.uniform(0.08, 0.3)
x0[5] = np.random.uniform(0.14, 2.67)
x0[6] = np.random.uniform(0.05, 0.1)
# Use scipy to solve ODE.
t = np.linspace(0, (T + burn_in) * delta_t, T + burn_in)
# X = odeint(glycolytic, x0, t)
# X += np.random.normal(scale=sd, size=(T + burn_in, 7))
X = sdeint.itoint(glycolytic, GG, x0, t)
# Set up ground truth.
GC = np.zeros((7, 7), dtype=int)
GC[0, :] = np.array([1, 0, 0, 0, 0, 1, 0])
GC[1, :] = np.array([1, 1, 0, 0, 1, 1, 0])
GC[2, :] = np.array([0, 1, 1, 0, 1, 1, 0])
GC[3, :] = | np.array([0, 0, 1, 1, 1, 1, 1]) | numpy.array |
#!/usr/bin/python
"""
Module : porcc.py
Authors : <NAME>
Institution : VLIZ (Vlaams Instituut voor de Zee)
Last Accessed : 9/23/2020
"""
__author__ = "<NAME>"
__version__ = "0.1"
__credits__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
import configparser
import itertools
import pickle
from importlib import resources
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from sklearn import metrics, linear_model, model_selection
from pyporcc import click_converter, utils
pd.plotting.register_matplotlib_converters()
plt.style.use('ggplot')
class PorCCModel:
def __init__(self, train_hq_df, train_lq_df, test_df):
"""
Find the click model
Parameters
----------
train_hq_df : DataFrame
DataFrame with the rows used for training the HQ model
train_lq_df : DataFrame
DataFrame with the rows used for training the LQ model
test_df : DataFrame
DataFrame to test the models
"""
self.ind_vars = ['Q', 'duration', 'ratio', 'XC', 'CF', 'BW']
self.dep_vars = ['P']
self.train_hq_df = train_hq_df
self.train_lq_df = train_lq_df
self.test_df = test_df
# Model definition, initialized to None until calculated
self.hq_params = None
self.hq_mod = None
self.lq_params = None
self.lq_mod = None
def pick_df(self, name):
"""
Pick the right DataFrame from the model
Parameters
----------
name : string
String of the right dataframe to pick. It can be 'hq', 'lq' or 'test'
Returns
-------
DataFrame correspondent to the name
"""
if name == 'hq':
df = self.train_hq_df
elif name == 'lq':
df = self.train_lq_df
elif name == 'test':
df = self.test_df
else:
raise Exception('This is not a valid Data Frame name')
return df
def hq_lq_separation(self, df, train_size=0.2):
"""
Separate the df in hq_train, lq_train and test
Parameters
----------
df : DataFrame
Whole dataframe to be separated
train_size : float
From 0 to 1, percentage to use to train
Returns
-------
DataFrame for hq model, DataFrame for loq model and DataFrame for testing
"""
train, self.test_df = model_selection.train_test_split(df, train_size=train_size)
self.train_hq_df = train[train['P'] != 2]
self.train_lq_df = train[train['P'] != 1]
self.train_hq_df['P'].replace(3, 0)
self.train_lq_df['P'].replace(3, 0)
self.train_lq_df['P'].replace(2, 1)
return self.train_hq_df, self.train_lq_df, self.test_df
def load_model_from_config(self, configfile_path):
"""
Load PorCC model coefficients
Parameters
----------
configfile_path : string or Path
.ini file where the coefficients of the LQ and the HQ models are specified
"""
config = configparser.ConfigParser().read(configfile_path)
logitcoef_hq = np.array(config['MODEL']['logitCoefHQ'].split(',')).astype(float)
logitcoef_lq = np.array(config['MODEL']['logitCoefLQ'].split(',')).astype(float)
self.hq_params = np.array(config['MODEL']['hq_params'].split(','))
self.lq_params = np.array(config['MODEL']['lq_params'].split(','))
# Starts and fit to get the classes
logit_hq = linear_model.LogisticRegression()
reg_hq = logit_hq.fit(self.train_hq_df[self.hq_params], self.train_hq_df['P'])
logit_lq = linear_model.LogisticRegression()
reg_lq = logit_lq.fit(self.train_lq_df[self.lq_params], self.train_lq_df['P'])
# Cheat and force the coefficients
reg_hq.coef_ = np.array([logitcoef_hq[:-1]])
reg_hq.intercept_ = logitcoef_hq[-1]
self.hq_mod = reg_hq
reg_lq.coef_ = np.array([logitcoef_lq[:-1]])
reg_lq.intercept_ = logitcoef_lq[-1]
self.lq_mod = reg_lq
def find_best_model(self, name):
"""
Find the best model among the possible models
Parameters
----------
name : string
Set to 'HQ' or 'LQ' to find the according model
Returns
-------
columns, model. Columns are the names of the relevant parameters of the model. Model is an object instance
"""
# Get all the possible models
models = self.find_possible_models(name)
df = self.pick_df(name)
# Select the appropriate columns combination according to AIC
models = models[models[:, -1].argsort()]
comb, mod, aic = models[0]
# y = df['P']
x = df[comb]
if name == 'hq':
self.hq_mod = mod
self.hq_params = x.columns
else:
self.lq_mod = mod
self.lq_params = x.columns
print('The winning combination for %s is %s. AIC: %s' % (name, comb, aic))
# print(mod.summary())
return x.columns, mod
def find_possible_models(self, name):
"""
Create all the regression models
classification ('P') = 0:N, 1:LQ, 2:HQ
Parameters
----------
name : string
Set to 'HQ' or 'LQ' to find the according model
Returns
-------
List of all the possible models as instances (alreay fitted)
"""
models = []
df = self.pick_df(name)
# Go through all the possible combinations (from 1 to all the variables)
for i in np.arange(1, len(self.ind_vars) + 1):
var_combinations = itertools.combinations(self.ind_vars, i)
for comb in var_combinations:
# Regression model
y = df['P']
x = df[list(comb)]
logit = linear_model.LogisticRegression(max_iter=500, tol=1e-5, C=0.1)
reg = logit.fit(x, y)
# Calculate AIC
y_prob = reg.predict_proba(x)
aic = utils.aic_score(y, y_prob, len(comb))
# Append the model
models.append([list(comb), reg, aic])
return np.array(models)
def save(self, save_path):
"""
Save the current models in a file. It will saved as a pickle
Parameters
----------
save_path : string
Path where to save the models
"""
pickle.dump(self, open(save_path, 'wb'))
def calculate_clicks_params(self, df_name, fs, click_model_path, save_path=None):
"""
Add to the df the click parameters calculated by the Click Class
Parameters
----------
df_name : string
name of the dataframe to pick. It can be 'hq', 'lq' or 'test'
fs : int
Sampling frequency of the dataframe entries
click_model_path : string or Path
Path to the wav file containing the click model
save_path : string or Path
Path where to save the output (should be pickle extension)
"""
# Pick the right df
df = self.pick_df(df_name)
# Pass the sampling frequency as metadata
df.fs = fs
# Init a converter to calculate all the params
converter = click_converter.ClickConverter(click_model_path, self.ind_vars)
df_clicks = converter.clicks_df(df)
df_joint = df.join(df_clicks, lsuffix='_mat', rsuffix='')
if save_path is not None:
df_joint.to_pickle(save_path)
return df_joint
class PorCC:
def __init__(self, load_type, class_column='pyPorCC', **kwargs):
"""
Start the classifier
If load_type is set to 'manual', loads the models from the config file. Then config_file must be specified
If load_type is set to 'trained_model', loads the trained model.
Then hq_mod, lq_mod, hq_params, lq_params have to be specified
load_type : string
'manual' or 'trained_model'
"""
self.th1 = 0.9999 # threshold for HQ clicks
self.th2 = 0.55 # threshold for LQ clicks
self.lowcutfreq = 100e3 # Lowcut frequency
self.highcutfreq = 160e3 # Highcut frequency
self.class_column = class_column
self.load_type = load_type
if load_type == 'manual':
if kwargs['config_file'] is 'default':
with resources.path('pyporcc.models', 'log_models.ini') as config_path:
self.manual_models(config_path)
else:
self.manual_models(kwargs['config_file'])
else:
for key, val in kwargs.items():
self.__dict__[key] = val
def manual_models(self, configfile_path):
"""
Load the coefficients of the models to calculate the probability manually
Parameters
----------
configfile_path : string or Path
Path to the config file with the coefficients for the models
"""
config = configparser.ConfigParser()
config.read(configfile_path)
hq_coef = np.array(config['MODEL']['logitCoefHQ'].split(',')).astype(float)
lq_coef = np.array(config['MODEL']['logitCoefLQ'].split(',')).astype(float)
self.hq_mod = ManualLogit(hq_coef)
self.lq_mod = ManualLogit(lq_coef)
self.hq_params = np.array(config['MODEL']['hq_params'].split(','))
self.lq_params = np.array(config['MODEL']['lq_params'].split(','))
def classify_click(self, click):
"""
Classify the click in HQ, LQ, N
Parameters
__________
click : Click object
Click to classify
"""
x = pd.DataFrame(data={'Q': click.Q, 'duration': click.duration, 'ratio': click.ratio, 'XC': click.xc,
'CF': click.cf, 'PF': click.pf, 'BW': click.bw})
porps = self.classify_row(x)
return porps
def classify_row(self, x):
"""
Classify one row according to the params [PF, CF, Q, XC, duration, ratio, BW]
Parameters
----------
x : pandas row or dictionary
Row to be classified
"""
# Add the independent variable
x.at['const'] = 1
if (x['CF'] > self.lowcutfreq) and (x['CF'] < self.highcutfreq) and (x['Q'] > 4):
# Evaluate the model on the given x
prob_hq = self.hq_mod.predict_proba(x[self.hq_params])[0][1]
# Assign clip to a category
if prob_hq >= self.th1:
# HQ click
porps = 1
else:
prob_lq = self.lq_mod.predict_proba(x[self.lq_params])[0][1]
if prob_lq > self.th2:
# LQ click
porps = 2
else:
# HF Noise
porps = 3
else:
porps = 3
return porps
def classify_matrix(self, df):
"""
Classify according to the params [PF, CF, Q, XC, duration, ratio, BW]
Parameters
----------
df : DataFrame
DataFrame to be classified. Parameters PF, CF, Q, XC, duration, ratio and BW must be specified
"""
# Add the independent variable for the regression
# Initialize the prediction column
df = df.assign(const=1)
df[self.class_column] = 0
# Evaluate the model on the given x
df = df.assign(prob_hq=self.hq_mod.predict_proba(df[self.hq_params])[:, 1],
prob_lq=self.lq_mod.predict_proba(df[self.lq_params])[:, 1])
# Decide
loc_idx = (df['CF'] > self.lowcutfreq) & (df['CF'] < self.highcutfreq) & (df['Q'] > 4)
# Add remove duration > 450
df.loc[~loc_idx, self.class_column] = 3 # N Clicks
df.loc[loc_idx & (df['prob_hq'] > self.th1), self.class_column] = 1 # HQ Clicks
df.loc[loc_idx & (df['prob_hq'] < self.th1) & (df['prob_lq'] > self.th2), self.class_column] = 2 # LQ Clicks
df.loc[loc_idx & (df['prob_hq'] < self.th1) & (df['prob_lq'] <= self.th2), self.class_column] = 3 # N Clicks
return df.drop(columns=['const'])
def predict(self, df):
"""
Classify and return the y value
Parameters
----------
df : DataFrame
"""
y_pred = self.classify_matrix(df)[self.class_column]
return y_pred
def predict_proba(self, df):
"""
Return the probability of being classified as HQ or LQ
Parameters
----------
df : DataFrame
DataFrame to predict. Needs to have the columns passed as hq and lq parameters
Returns
-------
DataFrame with prob_hq, prob_lq and total PorCC probability
"""
# Add the independent variable for the regression
df = df.assign(const=1)
# Initialize the prediction column
df = df.assign(prob=0)
# Evaluate the model on the given x
df = df.assign(prob_hq=self.hq_mod.predict_proba(df[self.hq_params])[:, 1])
df = df.assign(prob_lq=self.lq_mod.predict_proba(df[self.lq_params])[:, 1])
# Decide
loc_idx = (df['CF'] > self.lowcutfreq) & (df['CF'] < self.highcutfreq) & (df['Q'] > 4)
df.loc[~loc_idx, 'prob'] = 0
df.loc[loc_idx & (df['prob_hq'] > self.th1), 'prob'] = df.loc[loc_idx & (df['prob_hq'] > self.th1), 'prob_hq']
df.loc[loc_idx & (df['prob_hq'] < self.th1), 'prob'] = df.loc[loc_idx & (df['prob_hq'] < self.th1), 'prob_lq']
return df[['prob_hq', 'prob_lq', 'prob']]
def test_classification(self, test_df, col_name='ClassifiedAs'):
"""
Test the algorithm. With the same parameters, test the prediction output of the algorithm
Parameters
----------
test_df : DataFrame
DataFrame to test, with the necessary columns for classify
col_name : string
Column name of the correct classification
"""
predicted_df = self.classify_matrix(test_df)
# Compare self.class_column vs 'ClassifiedAs' (MATLAB)
error = np.sum(test_df[col_name] != predicted_df[self.class_column]) / len(test_df)
return error, predicted_df
def plot_roc_curves(self, df, dep_var='ManualAsign'):
"""
Plot the ROC curves for HQ, LQ and All
Parameters
---------
df : DataFrame
DataFrame with the classification
dep_var : string
Name of the column where the classification
"""
fig, ax = plt.subplots(1, 3)
# Filter the data frame so it only has HQ-Noise, LQ-Noise and All-Noise
hq_noise = df.loc[df[dep_var] != 2]
lq_noise = df.loc[df[dep_var] != 1]
self._plot_roc_curves(df=hq_noise, dep_var=dep_var, ax0=ax[0])
ax[0].set_title('HQ vs Noise')
self._plot_roc_curves(df=lq_noise, dep_var=dep_var, ax0=ax[1])
ax[1].set_title('LQ vs Noise')
self._plot_roc_curves(df=df, dep_var=dep_var, ax0=ax[2])
ax[2].set_title('All vs Noise')
plt.tight_layout()
plt.show()
plt.close()
def _plot_roc_curves(self, df, dep_var='ManualAsign', ax0=None):
"""`
Plot the ROC curves for each part of the algorithm
Parameters
----------
df : DataFrame
DataFrame with the information
dep_var : string
Name of the column of the dependent variable where the correct labels are
"""
y_test = self.convert2binary(df[dep_var])
if ax0 is None:
fig, ax = plt.subplots()
else:
ax = ax0
probs = self.predict_proba(df)
# Plot ROC
fpr_hq, tpr_hq, thresholds = metrics.roc_curve(y_test, probs['prob_hq'], drop_intermediate=True)
fpr_lq, tpr_lq, thresholds = metrics.roc_curve(y_test, probs['prob_lq'], drop_intermediate=True)
fpr_all, tpr_all, thresholds = metrics.roc_curve(y_test, probs['prob'], drop_intermediate=True)
ax.plot(fpr_hq, tpr_hq, label='PorCC HQ')
ax.plot(fpr_lq, tpr_lq, label='PorCC LQ')
ax.plot(fpr_all, tpr_all, label='PorCC All')
if ax0 is None:
plt.tight_layout()
plt.show()
plt.close()
return ax
@staticmethod
def convert2binary(y):
"""
Return the y sample as binary (no porpoise / porpoise)
Parameters
----------
y : np.array
Array with 3 classes to be converted to 2 classes (from Noise, LQ and HQ to Porpoise/No Porpoise)
"""
y = y.replace(3, 0)
y = y.replace(2, 1)
return y
class ManualLogit:
def __init__(self, coef, th=0.5):
"""
Init a logit probability prediction class
Parameters
----------
coef : list or np.array
Coefficients of the model
th : float
From 0 to 1 threshold to make the classification decision
"""
self.coef_ = coef
self.th_ = th
def predict(self, x):
"""
Predict the classification of X
Parameters
----------
x : np.array
Array with the X coefficients to be predict the class
"""
proba = self.predict_proba(x)[0][:, 1]
y_pred = np.zeros(proba.shape)
y_pred[ | np.where(proba >= self.th_) | numpy.where |
'''
DESCRIPTION
----------
Make scatter plot of Rp vs age using exoplanet archive and TOI 837.
'''
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.ticker as mticker
from matplotlib.ticker import StrMethodFormatter
import pandas as pd, numpy as np
import os
from copy import deepcopy
from astropy.table import Table
from astropy.io import ascii
from astropy.coordinates import SkyCoord
import astropy.units as u
from astroquery.nasa_exoplanet_archive import NasaExoplanetArchive
from cdips.utils import today_YYYYMMDD
from aesthetic.plot import savefig, format_ax, set_style
def arr(x):
return np.array(x)
def plot_mass_vs_period_scatter(active_targets=1, specialyoung=1, show_legend=1):
set_style()
#
# columns described at
# https://exoplanetarchive.ipac.caltech.edu/docs/API_exoplanet_columns.html
#
ea_tab = NasaExoplanetArchive.query_criteria(
table="exoplanets", select="*", cache=True
)
#
# get systems with finite ages (has a value, and +/- error bar)
#
has_age_value = ~ea_tab['st_age'].mask
has_age_errs = (~ea_tab['st_ageerr1'].mask) & (~ea_tab['st_ageerr2'].mask)
has_rp_value = ~ea_tab['pl_rade'].mask
has_rp_errs = (~ea_tab['pl_radeerr1'].mask) & (~ea_tab['pl_radeerr2'].mask)
has_mp_value = ~ea_tab['pl_massj'].mask
has_mp_errs = (~ea_tab['pl_massjerr1'].mask) & (~ea_tab['pl_massjerr2'].mask)
rp_gt_0 = (ea_tab['pl_rade'] > 0)
mp_gt_0 = (ea_tab['pl_massj'] > 0)
transits = (ea_tab['pl_tranflag']==1)
sel = (
has_age_value & has_age_errs & has_mp_value & mp_gt_0
# has_rp_value & has_rp_errs & transits & rp_gt_0
)
t = ea_tab[sel]
tyoung = t[(t['st_age'] < 0.1*u.Gyr) & (t['st_age'] > 0*u.Gyr)]
#
# read params
#
age = t['st_age']
age_perr = t['st_ageerr1']
age_merr = np.abs(t['st_ageerr2'])
age_errs = np.array([age_perr, age_merr]).reshape(2, len(age))
mp = t['pl_massj']
# rp /= 11.2089 # used jupiter radii
period = t['pl_orbper']
#
# plot age vs rp. (age is on y axis b/c it has the error bars, and I at
# least skimmed the footnotes of Hogg 2010)
#
fig,ax = plt.subplots(figsize=(4,3))
label = (
'Exoplanet Archive'
)
print(f'Mean age unc: {np.mean(age_errs):.2f} Gyr')
print(f'Median age unc: { | np.median(age_errs) | numpy.median |
import pandas as pd
import numpy as np
import re
from nltk import word_tokenize
import nltk
from others.logging_utils import init_logger
from itertools import chain
import geojson
import json
from geopy import distance
from tqdm import tqdm
import os
import gc
def free_space(del_list):
for name in del_list:
if not name.startswith('_'):
del globals()[name]
gc.collect()
def sd(col, max_loss_limit=0.001, avg_loss_limit=0.001, na_loss_limit=0, n_uniq_loss_limit=0, fillna=0):
"""
max_loss_limit - don't allow any float to lose precision more than this value. Any values are ok for GBT algorithms as long as you don't unique values.
See https://en.wikipedia.org/wiki/Half-precision_floating-point_format#Precision_limitations_on_decimal_values_in_[0,_1]
avg_loss_limit - same but calculates avg throughout the series.
na_loss_limit - not really useful.
n_uniq_loss_limit - very important parameter. If you have a float field with very high cardinality you can set this value to something like n_records * 0.01 in order to allow some field relaxing.
"""
is_float = str(col.dtypes)[:5] == 'float'
na_count = col.isna().sum()
n_uniq = col.nunique(dropna=False)
try_types = ['float16', 'float32']
if na_count <= na_loss_limit:
try_types = ['int8', 'int16', 'float16', 'int32', 'float32']
for type in try_types:
col_tmp = col
# float to int conversion => try to round to minimize casting error
if is_float and (str(type)[:3] == 'int'):
col_tmp = col_tmp.copy().fillna(fillna).round()
col_tmp = col_tmp.astype(type)
max_loss = (col_tmp - col).abs().max()
avg_loss = (col_tmp - col).abs().mean()
na_loss = np.abs(na_count - col_tmp.isna().sum())
n_uniq_loss = np.abs(n_uniq - col_tmp.nunique(dropna=False))
if max_loss <= max_loss_limit and avg_loss <= avg_loss_limit and na_loss <= na_loss_limit and n_uniq_loss <= n_uniq_loss_limit:
return col_tmp
# field can't be converted
return col
def reduce_mem_usage_sd(df, deep=True, verbose=False, obj_to_cat=False):
numerics = ['int16', 'uint16', 'int32', 'uint32', 'int64', 'uint64', 'float16', 'float32', 'float64']
start_mem = df.memory_usage(deep=deep).sum() / 1024 ** 2
for col in tqdm(df.columns):
col_type = df[col].dtypes
# collect stats
na_count = df[col].isna().sum()
n_uniq = df[col].nunique(dropna=False)
# numerics
if col_type in numerics:
df[col] = sd(df[col])
# strings
if (col_type == 'object') and obj_to_cat:
df[col] = df[col].astype('category')
if verbose:
print(f'Column {col}: {col_type} -> {df[col].dtypes}, na_count={na_count}, n_uniq={n_uniq}')
new_na_count = df[col].isna().sum()
if (na_count != new_na_count):
print(f'Warning: column {col}, {col_type} -> {df[col].dtypes} lost na values. Before: {na_count}, after: {new_na_count}')
new_n_uniq = df[col].nunique(dropna=False)
if (n_uniq != new_n_uniq):
print(f'Warning: column {col}, {col_type} -> {df[col].dtypes} lost unique values. Before: {n_uniq}, after: {new_n_uniq}')
end_mem = df.memory_usage(deep=deep).sum() / 1024 ** 2
percent = 100 * (start_mem - end_mem) / start_mem
print('Mem. usage decreased from {:5.2f} Mb to {:5.2f} Mb ({:.1f}% reduction)'.format(start_mem, end_mem, percent))
return df
def etl_1(data, url_):
#function which return anno in number othwerwise null
def Anno_cleaner(x):
try:
return(float(x))
except:
return(np.nan)
#check if price has da inside price and return --> "Asta" otherwise "no_asta"
def asta(x):
asta = 'no_asta'
try:
if 'da' in x:
asta = 'asta'
except:
return(asta)
return(asta)
#Clean price from.. (Da, Symbol, .)
def clean_price(text):
try:
text = re.sub("da", "", text)
text = re.sub("€", "", text)
text = re.sub(r'\.', '', text)
except:
return(text)
return(text)
#Function which clean sconto by taking out parenthesis, %, -
def clean_sconto(text):
try:
text = re.sub(r"\(", "", text)
text = re.sub(r"\)", "", text)
text = re.sub(r'%', '', text)
text = re.sub(r'-', '', text)
except:
return(text)
return(text)
#Function which clean metri by taking out m2
def clean_metri(text):
try:
text = re.sub(r'm2','', text)
except:
return(text)
return(text)
#function which fill NA with mancante
# def missing_filler(data, char, label = 'mancante'):
# for col in char:
# data[col] = data[col].fillna('mancante')
# return(data)
#Clean out from every special character in special_list
def clean_special(x):
special_list = [r'\:', r'\.', r'\-', r'\_', r'\;', r'\,', r'\'']
for symbol in special_list:
x = re.sub(symbol, ' ', x)
return(x)
#find position from description
def position_cleaner(x):
def cl1(x):
x = re.sub(r'\,', '', x)
x = re.sub(r' +', ' ', x)
return(x)
x = re.sub(r'(\,) +\d+', lambda s: cl1(s.group()), x)
return(x)
#clean string
def formatter(x):
x = x.strip()
x = re.sub(r'\s+', ' ', x)
return(x)
#Clean error from short name
def error_cleaner(x):
x = re.sub(r'v\.le', 'viale', x)
return(x)
#
def address_exctractor(x):
termini_ = ['via privata', 'via', 'viale', 'piazzetta', 'foro', 'cavalcavia',
'giardino', 'vicolo', 'passaggio', 'sito', 'parco', 'sottopasso',
'piazza', 'piazzale', 'largo', 'corso', 'alzaia', 'strada', 'ripa',
'galleria', 'foro', 'bastioni']
x = x.lower()
#find position
x = position_cleaner(x)
#clean error
x = error_cleaner(x)
#find address after termini_
address = ''
for lab_ in termini_:
#search for match
temp = re.search(r'\b%s\b' %lab_, x)
#find address by matching
if (temp is not None):
temp = re.search(r'%s (.*?)\,' %lab_, x)
try:
address_regex = temp.group(0) #if lab_ is not inside the name of the address continue else skip
address = clean_special(address_regex)
except:
pass
#clean ending string
address = formatter(address)
return(address)
#take out number from address to get nome via
def nome_via(x):
return(formatter(re.sub(r'\d+', '', x)))
#take out text and keep number
def numero_via(x):
x = x.lower()
x = re.sub('via 8 ottobre 2001', '', x) #via 8 ottobre exception
digit = re.search(r'\d+', x)
try:
x = digit.group()
except:
return('')
return(re.sub(r'\s+', '', x))
# char = ['Stanze', 'Bagni', 'Piano', 'Garantito', 'stato', 'classe_energetica', 'piano']
data = data.reset_index(drop = True)
url_ = url_.reset_index(drop = True)
#Clean Anno
url_['Anno_Costruzione'] = url_['Anno_Costruzione'].apply(lambda x: Anno_cleaner(x))
url_['Anno_Costruzione'] = url_['Anno_Costruzione'].convert_dtypes()
data = pd.concat([data, url_], axis = 1)
#Clean Prezzo
data['asta'] = data['Prezzo'].apply(lambda s: asta(s))
data['Prezzo'] = data['Prezzo'].apply(lambda s: clean_price(s)).astype(float)
data['Prezzo_Vecchio'] = data['Prezzo_Vecchio'].apply(lambda s: clean_price(s)).astype(float)
data['Sconto'] = data['Sconto'].apply(lambda s: clean_sconto(s)).astype(float)
#Clean Metri
data['Metri'] = data['Metri'].apply(lambda s: clean_metri(s)).astype(float)
data['Prezzo_al_mq'] = data['Prezzo']/data['Metri']
#Clean Piano
data['Piano'] = data['Piano'].replace({'T': 'Terra', 'R': 'Piano Rialzato', 'S': 'Seminterrato', 'A': 'Ultimo'})
# data = missing_filler(data, char)
#extract Indirizzo, Nome Via and numero via
data['indirizzo'] = data['Posizione'].apply(lambda x: address_exctractor(x))
data['nome_via'] = data.indirizzo.apply(lambda s: nome_via(s))
data['numero_via'] = data.indirizzo.apply(lambda s: numero_via(s))
return(data)
def etl_2(args, data):
#Function which calculate intersection score betweem
def scorer(segment_1, segment_2, missing_pos, indirizzo, original, logger):
vec = []
#cycle over each missing position
for m_1 in missing_pos:
vec_2 = np.zeros(indirizzo.shape[0])
#calculate intersection between segment_1, segment_1 to normalize
intersection_top = segment_1[m_1] & segment_1[m_1]
#calculate score of intersection to normalize
top_ = score_intersection(intersection_top)
#iterate over each indirizzo to calculate score of intersection
for m_2 in range(indirizzo.shape[0]):
#calculate intersection set
intersection_try = segment_1[m_1] & segment_2[m_2]
#calculate score
vec_2[m_2] = score_intersection(intersection_try)
#find max
max_ = np.max(vec_2)
#count how many are equal to max score
len_max = np.sum(vec_2 == max_)
#if normalize score assign new indirizzo
if max_/top_ > args.treshold:
if len_max>1:
#in case of ties take indirizzo with nearest number address
number_ = number_intersection(segment_1[m_1], segment_2[vec_2 == max_].values)
#find which address is selected
pos = (np.where(vec_2 == max_)[0])[number_]
#add indirizzo
vec += [indirizzo[pos]]
#print correction with score
logger.info('Segmento errore: {}; via scelta: {}; Match: {}'.format(original[m_1], indirizzo[pos], max_/top_))
else:
#assign indirizzo with max score
vec += [indirizzo[ | np.argmax(vec_2) | numpy.argmax |
import struct
import numpy as np
import json
class BatchTable(object):
def __init__(self):
self.header = BatchTableHeader()
self.body = BatchTableBody()
def add_property(self, property_name, array):
"""
Parameters
----------
property_name : 'str'
name of the property
array : list
array of the properties
"""
self.header.add_property(property_name, array)
def add_binary_property(self, property_name, component_type, batch_type, array):
"""
Parameters
----------
property_name : 'str'
name of the property
component_type : str
"BYTE", "UNSIGNED_BYTE", "SHORT", "UNSIGNED_SHORT","INT","UNSIGNED_INT","FLOAT","DOUBLE"
batch_type : 'str'
'SCALAR', 'VEC2', 'VEC3', 'VEC4'
array : ndarray
array of the properties
"""
header = self.header
self.body.add_property(header, property_name, component_type, batch_type, array)
def add_batch_length(self, batch_length):
self.header.add_batch_length(batch_length)
# returns batch table as binary
def to_array(self, tile_type, ft_len):
header_arr = self.header.to_array(tile_type, ft_len)
bth_len = len(header_arr)
body_arr = self.body.to_array(self.header, tile_type, ft_len, bth_len)
if len(body_arr) == 0:
return header_arr
else:
bt_arr = np.concatenate((header_arr, body_arr))
return bt_arr
@staticmethod
def from_array(th, ft, array):
"""
Parameters
----------
th : TileHeader
array : numpy.array
Returns
-------
bt : BatchTable
"""
# build feature table
bt = BatchTable()
# build batch table header
bth_len = th.bt_json_byte_length
bth_arr = array[0:bth_len]
nbatch = None
if ft.header.batch_length !=None:
nbatch = ft.header.batch_length
elif ft.header.points_length != None:
nbatch = ft.header.points_length
elif ft.header.instances_length != None:
nbatch = ft.header.instances_length
bth = BatchTableHeader.from_array(bth_arr, nbatch)
bt.header = bth
# build batch table body
btb_len = th.bt_bin_byte_length
btb_arr = array[bth_len:bth_len + btb_len]
btb = BatchTableBody.from_array(bth, btb_arr)
bt.body = btb
return bt
class BatchTableHeader(object):
def __init__(self):
self.properties = {}
self.property_names = []
self.batch_length = None
def add_property(self, propertyName, array):
if type(array) == np.ndarray:
array = array.tolist()
self.properties[propertyName] = array
self.property_names.append(propertyName)
def add_batch_length(self, batch_length):
self.batch_length = batch_length
# returns batch table as binary
def to_array(self, tile_type, ft_len):
# convert dict to json string
bt_json = json.dumps(self.properties, separators=(',', ':'))
# header must be 4-byte aligned (refer to batch table documentation)
if tile_type == 'b3dm' or tile_type == 'pnts':
n = 28 + ft_len + len(bt_json)
elif tile_type == 'i3dm':
n = 32 +ft_len + len(bt_json)
if n%8 !=0:
bt_json += ' ' * (8 - n % 8)
# returns an array of binaries representing the batch table
return np.fromstring(bt_json, dtype=np.uint8)
@staticmethod
def from_array(array, batch_length):
"""
Parameters
----------
array : numpy.array
"""
bth = BatchTableHeader()
bth.batch_length = batch_length
bt_json_str = ''.join([c.decode('UTF-8') for c in array.view('c')])
bt_json = json.loads(bt_json_str)
bth.properties = bt_json
bth.property_names = list(bt_json.keys())
return bth
class BatchTableBody(object):
def __init__(self):
self.property_arr = {}
def add_property(self, bth, property_name, component_type, batch_type, array):
"""
Parameters
----------
bth: BatchTableHeader()
batchtable header object
property_name : 'str'
name of the property
component_type : str
"BYTE", "UNSIGNED_BYTE", "SHORT", "UNSIGNED_SHORT","INT","UNSIGNED_INT","FLOAT","DOUBLE"
batch_type : 'str'
'SCALAR', 'VEC2', 'VEC3', 'VEC4'
array : ndarray
array of the properties
"""
if type(array) == np.ndarray:
array = array.tolist()
nbatch = bth.batch_length
narr = len(array)
if nbatch != narr:
raise Exception("number of array does not match batch length")
#figure out the offset
property_names = bth.property_names
prop_arr = self.property_arr
keys = list(prop_arr.keys())
offset = 0
for name in property_names:
if name in keys:
bcnt = len(prop_arr[name])
offset = offset + bcnt
bth.property_names.append(property_name)
bth.properties[property_name] = {'byteOffset' : offset, 'componentType' : component_type, 'type': batch_type}
if component_type == "BYTE":
com_type = np.byte
# prop_dt = create_dt(dt_names, np.byte)
elif component_type == "UNSIGNED_BYTE":
com_type = np.ubyte
elif component_type == "SHORT":
com_type = np.short
elif component_type == "UNSIGNED_SHORT":
com_type = np.ushort
elif component_type == "INT":
com_type = np.intc
elif component_type == "UNSIGNED_INT":
com_type = np.uintc
elif component_type == "FLOAT":
com_type = np.float32
elif component_type == "DOUBLE":
com_type = np.double
prop_arr = np.array(array, dtype=com_type).view('uint8')
self.property_arr[property_name] = prop_arr
# returns batch table as binary
def to_array(self, bth, tile_type, ft_len, bth_len):
prop_arr = self.property_arr
property_names = bth.property_names
keys = list(prop_arr.keys())
arr = | np.array([]) | numpy.array |
"""batemansolvers
Three solvers are enabled here to solve the Bateman equations:
(1) ODEINT solver
-----------------
Integrate a system of ordinary differential equations with RK45 adaptive time
mesh scheme
(2) EXPM solver
---------------
Compute the matrix exponential using Pade approximation
(3) CRAM solver
---------------
CHBV computes the direct action of the matrix exponential on
a vector: y = exp(H)*x. It uses the partial fraction expansion of
the uniform rational Chebyshev approximation of type (14,14).
About 14-digit accuracy is expected if the matrix H is symmetric
negative definite. The algorithm may behave poorly otherwise.
See also PADM, EXPOKIT.
<NAME> (<EMAIL>)
EXPOKIT: Software Package for Computing Matrix Exponentials.
ACM - Transactions On Mathematical Software, 24(1):130-156, 1998
"""
import numpy as np
from pyIsoDep.functions.checkerrors import _ispositive
from scipy.linalg import solve as linsolver
from scipy.linalg import expm
from scipy.integrate import odeint
# -----------------------------------------------------------------------------
# Coefficients and poles of the partial fraction expansion
# -----------------------------------------------------------------------------
# Coefficients for IPF Cram 14
C14_ALPHA = np.array([
+0.557503973136501826E+02 - 0.204295038779771857E+03j,
-0.938666838877006739E+02 + 0.912874896775456363E+02j,
+0.469965415550370835E+02 - 0.116167609985818103E+02j,
-0.961424200626061065E+01 - 0.264195613880262669E+01j,
+0.752722063978321642E+00 + 0.670367365566377770E+00j,
-0.188781253158648576E-01 - 0.343696176445802414E-01j,
+0.143086431411801849E-03 + 0.287221133228814096E-03j, ],
dtype=np.complex128)
C14_THETA = np.array([
-0.562314417475317895E+01 + 0.119406921611247440E+01j,
-0.508934679728216110E+01 + 0.358882439228376881E+01j,
-0.399337136365302569E+01 + 0.600483209099604664E+01j,
-0.226978543095856366E+01 + 0.846173881758693369E+01j,
+0.208756929753827868E+00 + 0.109912615662209418E+02j,
+0.370327340957595652E+01 + 0.136563731924991884E+02j,
+0.889777151877331107E+01 + 0.166309842834712071E+02j, ],
dtype=np.complex128)
C14_ALPHA0 = 0.183216998528140087E-11
class CramSolver:
"""CRAM depletion solver that uses incomplete partial factorization
A method that uses an incomplete partial factorization (IPF) for the
Chebyshev Rational Approximation Method (CRAM), as described in:
<NAME>, "`Higher-Order Chebyshev Rational Approximation Method and
Application to Burnup Equations
<https://doi.org/10.13182/NSE15-26>`_," Nucl. Sci. Eng., 182:3, 297-318.
Parameters
----------
alpha : numpy.ndarray
Complex residues of poles used in the factorization. Must be a
vector with even number of items.
theta : numpy.ndarray
Complex poles. Must have an equal size as ``alpha``.
alpha0 : float
Limit of the approximation at infinity
Attributes
----------
alpha : numpy.ndarray
Complex residues of poles :attr:`theta` in the incomplete partial
factorization. Denoted as :math:`\tilde{\alpha}`
theta : numpy.ndarray
Complex poles :math:`\theta` of the rational approximation
alpha0 : float
Limit of the approximation at infinity
"""
def __init__(self):
"""reset the number of partial factorization"""
self.alpha = -C14_ALPHA
self.theta = -C14_THETA
self.alpha0 = C14_ALPHA0
def solve(self, A, n0, dt):
"""Solve depletion equations using IPF CRAM
Parameters
----------
A : scipy.sparse.csr_matrix
Sparse transmutation matrix ``A[j, i]`` desribing rates at
which isotope ``i`` transmutes to isotope ``j``
n0 : numpy.ndarray
Initial compositions, typically given in number of atoms in some
material or an atom density
dt : float
Time [s] of the specific interval to be solved
Returns
-------
numpy.ndarray
Final compositions after ``dt``
"""
H = A * dt
y = n0 * self.alpha0
ident = np.eye(A.shape[0])
for alpha, theta in zip(self.alpha, self.theta):
y += np.real(linsolver(H - theta*ident, alpha*n0))
y[y < 1E-25] = 0
return y
class expmSolver:
"""Built-in expm solver that relies on the pade approximation"""
def __init__(self):
"""reset values with a complete list of all the nuclides"""
pass
def solve(self, mtx, n0, dt):
"""Solve the exponential of a matrix"""
n1 = np.dot(expm(mtx * dt), n0)
return n1
class adaptiveOdeintSolver:
def __dNdt(self, n0, t, idx):
"""function produces time rate of change for each isotope"""
# Obtain the interpolated fission energy, xs, and transmutation mtx
# -----------------------------------------------------------------
fissE, sigf, transmutationmtx = self.dep._getInterpXS(t,\
self.xsinterp)
# flux is used directly
# -----------------------------------------------------------------
if not self.dep.flagPower:
# calculate power for this step
self.dep.power[idx] = (self.dep.flux[idx] * sigf * n0\
* fissE * self.dep.volume).sum()
# power is provided and needs to be converted to flux
# -----------------------------------------------------------------
else:
self.dep.flux[idx] = self.dep.power[idx] / (
sigf * n0 * fissE * self.dep.volume).sum()
# define the overall matrix to represent Bateman equations
# -----------------------------------------------------------------
mtxA = transmutationmtx*self.dep.flux[idx] + self.dep.decaymtx
# solve and obtain the concentrations after a single depletion
# -----------------------------------------------------------------
dNdt = np.dot(mtxA, n0)
return dNdt
def __init__(self, dep, xsinterp, rtol=1E-10):
"""function initalized apdative time mesh odeint solver
Parameters
----------
dep : object
depletion solver object.
xsinterp : bool
flag for cross section interpolation.
rtol : float, optional
relative convergence tolerance of isotopic concentration. The
default is 1E-10.
Returns
-------
None.
"""
_ispositive(rtol, "relative convergence tolerance")
self.dep = dep
self.rtol = rtol
self.xsinterp = xsinterp
def solve(self, rtol=1.0e-10):
"""solve change in concentration with adaptive time mesh scheme"""
for idx, dt in enumerate(self.dep.timesteps):
self.dep.Nt[:, idx+1] = odeint(self.__dNdt,\
tuple(self.dep.Nt[:, idx]), | np.array([0,dt]) | numpy.array |
import batoid
from test_helpers import timer
import numpy as np
@timer
def test_normalized():
for _ in range(1000):
x = np.random.uniform()
y = np.random.uniform()
z = np.random.uniform()
w = np.random.uniform()
np.testing.assert_allclose(
np.linalg.norm(batoid.utils.normalized([x])),
1.0,
rtol=0, atol=1e-10
)
np.testing.assert_allclose(
np.linalg.norm(batoid.utils.normalized([x, y])),
1.0,
rtol=0, atol=1e-10
)
np.testing.assert_allclose(
np.linalg.norm(batoid.utils.normalized([x, y, z])),
1.0,
rtol=0, atol=1e-10
)
np.testing.assert_allclose(
np.linalg.norm(batoid.utils.normalized([x, y, z, w])),
1.0,
rtol=0, atol=1e-10
)
@timer
def test_gnomicDirCos():
np.random.seed(5)
u = np.random.uniform(-0.1, 0.1, size=1000)
v = np.random.uniform(-0.1, 0.1, size=1000)
# Test round trip
u1, v1 = batoid.utils.dirCosToGnomic(*batoid.utils.gnomicToDirCos(u, v))
np.testing.assert_allclose(u, u1, rtol=1e-10, atol=1e-12)
np.testing.assert_allclose(v, v1, rtol=1e-10, atol=1e-12)
# Test round trip in the other direction
alpha = np.random.uniform(-0.1, 0.1, size=1000)
beta = np.random.uniform(-0.1, 0.1, size=1000)
gamma = np.sqrt(1 - alpha**2 - beta**2)
alpha1, beta1, gamma1 = batoid.utils.gnomicToDirCos(
*batoid.utils.dirCosToGnomic(alpha, beta, gamma)
)
np.testing.assert_allclose(alpha, alpha1, rtol=1e-10, atol=1e-12)
np.testing.assert_allclose(beta, beta1, rtol=1e-10, atol=1e-12)
np.testing.assert_allclose(gamma, gamma1, rtol=1e-10, atol=1e-12)
# For really tiny angles, u/v should be basically the same as alpha/beta
u = | np.random.uniform(-1e-6, 1e-6, size=1000) | numpy.random.uniform |
"""Our Proposed Robust STL based on
Spare Model and Non-local Season bilateral filtering
Input: original_data - T x 1
Output: decomposed_data - T x 3 (trend, season, irregular)
Author: Qingsong <<EMAIL>>
"""
from numpy.linalg import norm, cholesky
import scipy.sparse as sparse
import time
import numbers
# import logging
from numbers import Number
# import scipy.sparse as sp
# from scipy.sparse.linalg import lsqr
# from scipy.sparse import csc_matrix
from sklearn.preprocessing import StandardScaler
from cvxopt import matrix
import pandas as pd
import numpy as np
from scipy.linalg import circulant
from .fast_stl_utils import DetailA, PDHG, gADMMSolver
"""two functions to get trend and season:
Sparse and robust method to get trend
Bilateral season adjust to get season
Author: Qingsong <<EMAIL>>
"""
def gaussian_expfun(x, sigma):
minimal_sigma_n = 1e-6
sigma = minimal_sigma_n if sigma < minimal_sigma_n else sigma
return np.exp(-(x * x) / (2.0 * sigma * sigma))
def gaussian_kernel(sigma, radius):
"""Computes 1D Gaussian kernel weights
The length of weights is 2*radius + 1
"""
if sigma <= 0:
raise ValueError('sigma must be larger than zero.')
tmp_x = np.arange(-radius, radius + 1)
phi_x = gaussian_expfun(tmp_x, sigma)
phi_x /= phi_x.sum()
return phi_x
def calcu_noise_sigma(ts_data):
"""Calculate the eatimated noise's Standard Deviation
This method here is based on the paper:
<NAME>, <NAME>, and <NAME>.
Residual variance and residual pattern in nonlinear regression.
"""
ts_data_len = len(ts_data)
if ts_data_len < 3:
raise Exception('ts_data_len should be at least 3')
y_ip1 = ts_data[2:]
y_i = ts_data[1:-1]
y_im1 = ts_data[:-2]
sigma_n2 = (2.0 / (3 * len(y_ip1))) * sum(
(0.5 * y_im1 - y_i + 0.5 * y_ip1)**2)
sigma_n = np.sqrt(sigma_n2)
# sigma_n = 1e-5 if sigma_n < 1e-5 else sigma_n
minimal_sigma_n = 1e-6
sigma_n = minimal_sigma_n if sigma_n < minimal_sigma_n else sigma_n
return sigma_n
def data_validity_check(ts_data):
# old version-->if isinstance(ts_data, int) or isinstance(ts_data, float):
if isinstance(ts_data, numbers.Number):
ts_data = np.array([ts_data])
if not (isinstance(ts_data, list) or isinstance(ts_data, np.ndarray)
or isinstance(ts_data, pd.DataFrame)):
raise ValueError('For denoising, input data must be list, np.ndarray,'
'or pd.DataFrame (for online, input can be single'
'point with int/flaot data type)!')
# check pandas
if isinstance(ts_data, pd.DataFrame):
if ts_data.shape[1] != 1:
raise ValueError('For denoising, if input data is pandas dataframe'
', it must be 1D data!')
# check array or list
if isinstance(ts_data, list):
ts_data = np.array(ts_data)
if isinstance(ts_data, np.ndarray):
if ts_data.ndim > 2:
raise ValueError('For denoising, input data cannot be 3D or higher'
'dimentional array or list!')
if ts_data.ndim == 2:
if not (ts_data.shape[0] == 1 or ts_data.shape[1] == 1):
raise ValueError('For denoising, if input data is 2D array or'
'list, one of the dim must be 1!')
def bilateral_weighted_data(input_data,
sigma_i=4,
leftmost_dist_w=0.8,
sigma_d=None,
refer_value=None,
normalize_toggle=True):
# check leftmost_dist_w
if leftmost_dist_w <= 0 or leftmost_dist_w >= 1.0:
raise ValueError('leftmost_dist_w should 0 < leftmost_dist_w < 1')
# Ini
ts_data_ori = to_2dnp_array(input_data)
data_len = len(ts_data_ori)
# normalize
if normalize_toggle:
scaler = StandardScaler()
ts_data = scaler.fit_transform(ts_data_ori)
else:
ts_data = ts_data_ori.copy()
if refer_value is None:
refer_value = ts_data[-1]
# distance_weights
refer_idx = -1 # default, the last point for distance_refer_idx
leftmost_idx = data_len + refer_idx
dx = np.arange(-leftmost_idx, -refer_idx).reshape(-1, 1)
if sigma_d is None:
sigma_d = np.sqrt(-0.5 * leftmost_idx * leftmost_idx /
np.log(leftmost_dist_w))
distance_weights = gaussian_expfun(dx, sigma_d)
# intensity_weights
sigma_n = np.std(ts_data)
sig_I = sigma_i * sigma_n
dy = ts_data - refer_value
intensity_weights = gaussian_expfun(dy, sig_I)
# final bilateral_weights
bilateral_weights = distance_weights * intensity_weights
weighted_data = ts_data_ori * bilateral_weights
return weighted_data, distance_weights,\
intensity_weights, bilateral_weights
def to_2dnp_array(X):
"""Convert array-like data (list/numpy array/pandas) to 2D
numpy array.
"""
if isinstance(X, np.ndarray):
if X.ndim == 1:
return X.reshape((-1, 1))
if X.ndim == 2:
return X
if isinstance(X, Number):
X = [X]
X = np.array(X)
X = X.reshape([-1, np.prod(X.shape) // X.shape[0]])
return X
"""Two Bilateral filter classes for denoising time series.
Input: ts_data - T x 1
Output: filterd_ts_data - T x 1
"""
class BilateralFilter():
"""Basic Bilateral Filter for denoising time series
It is used to as a baseline for denoising time signals under different
Bilateral Filters.
The filtering width is around 2*(truncate*sigma) + 1
Parameters
----------
sigma_i : double, optional
[default=2.5]
The standard deviation for intensity used in bilateral gaussian kernel
sigma_d : double, optional
[default=2.5]
The standard deviation for distance used in bilateral gaussian kernel
truncate: double, optional
[default=6.0]
Used for the radius of the filter, which equals to truncate times
sigma_d
pad_mode: str, 'symmetric' or 'reflect'
[default='symmetric']
The scheme used for padding edge data points
Suppose we have data (a b c d ...... e f g h):
For the edge data, if they are extended using 'symmetric' about edge of
the last pixel, we have (c b a | a b c d ...... e f g h | h g f)
For the edge data, if they are extended using 'reflect' about the edge
of the last pixel, we have (d c b | a b c d ...... e f g h | g f e)
"""
def __init__(self,
sigma_i=2.0,
sigma_d=2.5,
truncate=8.0,
pad_mode='symmetric'):
self.sigma_i = sigma_i
self.sigma_d = sigma_d
self.truncate = truncate
self.pad_mode = pad_mode
def fit_transform(self, ts_data, override=True):
"""
This method to do denoising filtering process for time sereis.
Parameters
----------
ts_data : array-like, shape = (n_timelength, 1) or (n_timelength,)
The input data, 1D data: can be numpy array, list,
or pandas dataframe.
override : boolean
To be used for online case, ignore for now.
Returns
-------
filterd_out : array-like, shape = (n_timelength, 1)
The output of denoised time series
"""
sigma_i = self.sigma_i
sigma_d = self.sigma_d
truncate = self.truncate
pad_mode = self.pad_mode
data_validity_check(ts_data)
ts_data = np.array(ts_data).flatten()
radius = int(truncate * sigma_d + 0.5)
dx = np.arange(-radius, radius + 1)
sig_D = sigma_d
exp_D = gaussian_expfun(dx, sig_D)
# calculate the eatimated noise variance
sigma_n = calcu_noise_sigma(ts_data)
sig_I = sigma_i * sigma_n # note sigma_i is just a multiplicative para
if (pad_mode == 'symmetric') or (pad_mode == 'reflect'):
ts_data_pad = np.pad(ts_data, radius, pad_mode)
else:
raise RuntimeError(
'The pad_mode is supported; only support symmetric or reflect')
data_len = len(ts_data)
filterd_out = np.zeros(data_len)
for idx in range(data_len):
idx_pad = idx + radius
data_window = ts_data_pad[(idx_pad - radius):(idx_pad + radius +
1)]
dy = data_window - ts_data_pad[idx_pad]
exp_I = gaussian_expfun(dy, sig_I)
bilateral_weights = exp_I * exp_D
bilateral_weights /= bilateral_weights.sum()
filterd_out[idx] = sum(data_window * bilateral_weights)
filterd_out = filterd_out.reshape(-1, 1)
return filterd_out
class CausalBilateralFilter():
"""Causal Bilateral Filter for denoising time series
It is used in our real situations, and there is no pad_mode compared with
BilateralFilter, since we only use previous points to filter noise
of current point.
The filtering width is around 2*(truncate*sigma) + 1
Parameters
----------
sigma_i : double, optional
[default=2.5]
The standard deviation for intensity used in bilateral gaussian kernel
sigma_d : double, optional
[default=2.5]
The standard deviation for distance used in bilateral gaussian kernel
truncate: double, optional
[default=6.0]
Used for the radius of the filter, which equals to truncate
times sigma_d
"""
def __init__(self, sigma_i=2.0, sigma_d=2.5, truncate=8.0):
self.sigma_i = sigma_i
self.sigma_d = sigma_d
self.truncate = truncate
self.ts_data = None
self.filtered_ts_data = None
def fit_transform(self, ts_data, override=True):
"""
This method generates the latent variables on seasonal data
using T statistics and frequency.
Parameters
----------
X : array-like, shape = (n_timelength, 1)
The input data, only support 1D data currently.
override : boolean
To be used for online case, ignore for now.
Returns
-------
X_t : array-like, shape = (n_timelength, n_t_dim)
The output latent variable data, could be high dimensional
latent representation
debug: dict
Store the intermediate variables for debugging purpose
"""
sigma_i = self.sigma_i
sigma_d = self.sigma_d
truncate = self.truncate
data_validity_check(ts_data)
ts_data = np.array(ts_data).flatten()
radius = int(truncate * sigma_d + 0.5)
dx = np.arange(-radius, 1)
sig_D = sigma_d
exp_D = gaussian_expfun(dx, sig_D)
# calculate the eatimated noise variance
sigma_n = calcu_noise_sigma(ts_data)
sig_I = sigma_i * sigma_n # note sigma_i is just a multiplicative para
filterd_out = np.copy(ts_data)
data_len = len(ts_data)
for idx in range(radius, data_len):
data_window = ts_data[(idx - radius):(idx + 1)]
dy = data_window - ts_data[idx]
exp_I = gaussian_expfun(dy, sig_I)
bilateral_weights = exp_I * exp_D
bilateral_weights /= bilateral_weights.sum()
filterd_out[idx] = sum(data_window * bilateral_weights)
filterd_out = filterd_out.reshape(-1, 1)
self.radius = radius
self.exp_D = exp_D
self.ts_data = ts_data
self.filterd_out = filterd_out
return filterd_out
def fit_transform_online(self, new_ts_minibatch):
"""Update the model with newX to transform the data
Online version to do the fit transform with much faster speed.
Parameters
---------
new_ts_minibatch : array-like, shape = (n_minibatch, 1)
or (n_minibatch,)
Single time series of length mini_batch_timelength.
Returns
-------
X_t : array-like, shape = (n_timelength,)
A transformed time series of the same time length as the
original ts_data fed into the model. Will need to maintain the
state of the old data in order to return the transformed
time series with the correct timelength.
"""
if self.ts_data is None:
raise ValueError("fit_transform_online can not be called in the\
very first time, should followed fit_transform.")
radius = self.radius
exp_D = self.exp_D
ts_data = self.ts_data
filterd_out = self.filterd_out
sigma_i = self.sigma_i
# check new_ts_minibatch and change format to 1d np.array
data_validity_check(new_ts_minibatch)
new_ts_minibatch = np.array(new_ts_minibatch).flatten()
# update ts_data
mini_batch_len = len(new_ts_minibatch)
ts_data = np.concatenate([ts_data[mini_batch_len:], new_ts_minibatch])
# calculate the eatimated noise variance
sigma_n = calcu_noise_sigma(ts_data)
sig_I = sigma_i * sigma_n # note sigma_i is just a multiplicative para
new_filterd_out = np.copy(new_ts_minibatch)
data_len = len(ts_data)
for idx in range((data_len - mini_batch_len), data_len):
data_window = ts_data[(idx - radius):(idx + 1)]
dy = data_window - ts_data[idx]
exp_I = gaussian_expfun(dy, sig_I)
bilateral_weights = exp_I * exp_D
bilateral_weights /= bilateral_weights.sum()
new_filterd_out[idx - (data_len - mini_batch_len)] = sum(
data_window * bilateral_weights)
# update filterd_out
new_filterd_out = new_filterd_out.reshape(-1, 1)
filterd_out = np.concatenate(
[filterd_out[mini_batch_len:], new_filterd_out])
filterd_out = filterd_out.reshape(-1, 1)
self.ts_data = ts_data
self.filterd_out = filterd_out
return filterd_out
def _get_prev_transform(self):
# debug purpose
return self.filterd_out
class BilateralSeasonTstat():
"""
This method generates the latent variables on seasonal data
using T statistics and frequency with the idea of Bilateral Filter.
Parameters
----------
num_period : int
The number of previous history periods data.
num_period can be 0,1,2,3...,
when num_period=0, it only uses local data
period_len : int
The length of one period in terms of data points.
When num_period=0, this para is not used
neighbour_wdw_size : int
The half length of the window used in historical period data
sigma_i : double, optional
[default=2.5]
The standard deviation for intensity used in bilateral gaussian kernel
sigma_d : double, optional
[default=2.5]
The standard deviation for distance used in bilateral gaussian kernel
adj_win_open : Boolean, [default=False]
A toggle to control if we will use adjacent data (local data, not
period data).
adj_win_size : int
The length of the window used in local adjacent data
adj_win_weight : float
the weight used to control the contribution from local adjacent data
lamb: float, [0.,1.], optional
[default=0]
The float number to control the ratio of local/global variance.
Default 0 means we only use the local variance from seasonal data.
Set the value to 0.5 means we are using the geometric mean of
local/global variance. Set to 1 to use only global variance.
div_var_toggle: boolean
[default=False]
The toggle that determines whether to divide (x - _mean) by the
harmonic mean of sigma_global and sigma when calculating the latent
representation. Default False means that it will not divide.
"""
def __init__(self,
num_period=2,
period_len=1440,
neighbour_wdw_size=10,
sigma_i=1.0,
sigma_d=1e10,
adj_win_open=False,
adj_win_size=10,
adj_win_weight=0.001,
lamb=0,
div_var_toggle=False,
fit_transform_len_spec=None):
self.period_len = period_len
self.num_period = num_period
self.sigma_i = sigma_i
self.sigma_d = sigma_d
self.neighbour_wdw_size = neighbour_wdw_size
self.adj_win_open = adj_win_open
self.adj_win_size = adj_win_size
self.adj_win_weight = adj_win_weight
self.lamb = lamb
self.div_var_toggle = div_var_toggle
self.ts_data = None
self.filtered_out = None
self.fit_transform_len_spec = fit_transform_len_spec
self._required_len = neighbour_wdw_size + period_len * num_period
def fit_transform(self, ts_data, override=True):
"""
This method to do denoising filtering process for time sereis.
Parameters
----------
ts_data : array-like, shape = (n_timelength, 1) or (n_timelength,)
The input data, 1D data: can be numpy array, list,
or pandas dataframe.
override : boolean
To be used for online case, ignore for now.
Returns
-------
filtered_out : array-like, shape = (n_timelength, 1)
The output of denoised time series
"""
period_len = self.period_len
num_period = self.num_period
sigma_i = self.sigma_i
sigma_d = self.sigma_d
adj_win_open = self.adj_win_open
adj_win_weight = self.adj_win_weight
adj_win_size = self.adj_win_size
# truncate = self.truncate
neighbour_wdw_size = self.neighbour_wdw_size
lamb = self.lamb
div_var_toggle = self.div_var_toggle
fit_transform_len_spec = self.fit_transform_len_spec
data_validity_check(ts_data)
ts_data = np.array(ts_data).flatten()
# radius = int(truncate * sigma_d + 0.5)
adj_radius = adj_win_size
radius = neighbour_wdw_size
sig_D = sigma_d
dx = np.arange(-adj_radius, 0)
exp_D = gaussian_expfun(dx, sig_D)
dx_2s = np.arange(-radius, radius + 1)
exp_D_2s = gaussian_expfun(dx_2s, sig_D)
# calculate the eatimated noise variance
# sigma_n = calcu_noise_sigma(ts_data)
sigma_n = np.std(ts_data)
# minimal_sigma_n = 1e-2
# sigma_n = minimal_sigma_n if sigma_n < minimal_sigma_n else sigma_n
sig_I = sigma_i * sigma_n # note sigma_i is just a multiplicative para
# filtered_out = np.copy(ts_data)
filtered_out = np.zeros(ts_data.shape)
data_len = len(ts_data)
if data_len <= radius + period_len * num_period:
raise Exception('data length is not right')
# specific fit_transform_len if necessary
if fit_transform_len_spec is None:
start_point = radius + period_len * 1
else:
start_point = data_len - fit_transform_len_spec
# do filtering
for idx in range(start_point, data_len):
# this part indicates using local neiborhood data (diff)
data_window_combined = np.array([])
bilateral_weights_combined = np.array([])
if adj_win_open:
data_window = ts_data[(idx - adj_radius):idx]
dy = data_window - ts_data[idx]
exp_I = gaussian_expfun(dy, sig_I)
bilateral_weights = adj_win_weight * exp_I * exp_D
# concatenate data and weights
data_window_combined = np.concatenate(
(data_window_combined, data_window))
bilateral_weights_combined = np.concatenate(
(bilateral_weights_combined, bilateral_weights))
# this part indicates using non-local neiborhood data
# from previous period signals
actual_num_period = 0
for idx_period in range(1, num_period + 1):
if idx - radius - period_len * idx_period >= 0:
actual_num_period += 1
data_window = ts_data[(idx - radius -
period_len * idx_period):(
idx + radius + 1 -
period_len * idx_period)]
dy = data_window - ts_data[idx]
exp_I = gaussian_expfun(dy, sig_I)
bilateral_weights = exp_I * exp_D_2s
# concatenate data and weights
data_window_combined = np.concatenate(
(data_window_combined, data_window))
bilateral_weights_combined = np.concatenate(
(bilateral_weights_combined, bilateral_weights))
weights_sum = bilateral_weights_combined.sum()
if weights_sum == 0:
bilateral_weights_combined = 1.0 / len(
bilateral_weights_combined)
else:
bilateral_weights_combined /= weights_sum
_mean = np.sum(data_window_combined * bilateral_weights_combined)
# If div_var_toggle is True, calculate the necessary statistic
# and divide (x - _mean) by the harmonic mean of 2 var
if div_var_toggle:
# adjusted local var
_local_diff_square = np.square(data_window_combined - _mean)
_local_var = np.sum(_local_diff_square *
bilateral_weights_combined)
_local_var = 1e-10 if _local_var < 1e-10 else _local_var
# adjusted global var
_data_global = ts_data[(idx - radius -
period_len * actual_num_period):idx]
_global_var = np.var(_data_global)
_global_var = 1e-10 if _global_var < 1e-10 else _global_var
# harmonic mean of sigma_local and sigma_global
_harmonic_std_inv = np.sqrt(lamb / _global_var +
(1 - lamb) / _local_var)
# multipy it by _harmonic_std_inv
filtered_out[idx] = (ts_data[idx] - _mean) * _harmonic_std_inv
# Otherwise, only use (x - _mean)
else:
filtered_out[idx] = ts_data[idx] - _mean
filtered_out = filtered_out.reshape(-1, 1)
self.radius = radius
self.adj_radius = adj_radius
self.exp_D = exp_D
self.exp_D_2s = exp_D_2s
self.ts_data = ts_data
self.filtered_out = filtered_out
return filtered_out
def fit_transform_online(self, new_ts_minibatch):
"""Update the model with newX to transform the data
Online version to do the fit transform with much faster speed.
Parameters
---------
new_ts_minibatch : array-like, shape = (n_minibatch, 1)
or (n_minibatch,)
Single time series of length mini_batch_timelength.
Returns
-------
X_t : array-like, shape = (n_timelength,)
A transformed time series of the same time length as the
original ts_data fed into the model. Will need to maintain the
state of the old data in order to return the transformed
time series with the correct timelength.
"""
if self.ts_data is None:
raise ValueError("fit_transform_online can not be called in the\
very first time, should followed fit_transform.")
adj_radius = self.adj_radius
radius = self.radius
exp_D = self.exp_D
exp_D_2s = self.exp_D_2s
ts_data = self.ts_data
filtered_out = self.filtered_out
sigma_i = self.sigma_i
period_len = self.period_len
num_period = self.num_period
adj_win_open = self.adj_win_open
adj_win_weight = self.adj_win_weight
lamb = self.lamb
div_var_toggle = self.div_var_toggle
# check new_ts_minibatch and change format to 1d np.array
data_validity_check(new_ts_minibatch)
new_ts_minibatch = np.array(new_ts_minibatch).flatten()
# update ts_data
mini_batch_len = len(new_ts_minibatch)
ts_data = np.concatenate([ts_data[mini_batch_len:], new_ts_minibatch])
# calculate the eatimated noise variance
# sigma_n = calcu_noise_sigma(ts_data)
sigma_n = np.std(ts_data)
# minimal_sigma_n = 1e-2
# sigma_n = minimal_sigma_n if sigma_n < minimal_sigma_n else sigma_n
sig_I = sigma_i * sigma_n # note sigma_i is just a multiplicative para
new_filtered_out = np.copy(new_ts_minibatch)
data_len = len(ts_data)
for idx in range((data_len - mini_batch_len), data_len):
# # this part indicates using local neiborhood data
data_window_combined = np.array([])
bilateral_weights_combined = np.array([])
if adj_win_open:
data_window = ts_data[(idx - adj_radius):idx]
dy = data_window - ts_data[idx]
exp_I = gaussian_expfun(dy, sig_I)
bilateral_weights = adj_win_weight * exp_I * exp_D
# concatenate data and weights
data_window_combined = np.concatenate(
(data_window_combined, data_window))
bilateral_weights_combined = np.concatenate(
(bilateral_weights_combined, bilateral_weights))
# this part indicates using non-local neiborhood data
# from previous period signals
actual_num_period = 0
for idx_period in range(1, num_period + 1):
if idx - radius - period_len * idx_period >= 0:
actual_num_period += 1
data_window = ts_data[(idx - radius -
period_len * idx_period):(
idx + radius + 1 -
period_len * idx_period)]
dy = data_window - ts_data[idx]
exp_I = gaussian_expfun(dy, sig_I)
bilateral_weights = exp_I * exp_D_2s
# concatenate data and weights
data_window_combined = np.concatenate(
(data_window_combined, data_window))
bilateral_weights_combined = np.concatenate(
(bilateral_weights_combined, bilateral_weights))
weights_sum = bilateral_weights_combined.sum()
if weights_sum == 0:
bilateral_weights_combined = 1.0 / len(
bilateral_weights_combined)
else:
bilateral_weights_combined /= weights_sum
_mean = np.sum(data_window_combined * bilateral_weights_combined)
# If div_var_toggle is True, calculate the necessary statistic
# and divide (x - _mean) by the harmonic mean of 2 var
if div_var_toggle:
# adjusted local var
_local_diff_square = np.square(data_window_combined - _mean)
_local_var = np.sum(_local_diff_square *
bilateral_weights_combined)
_local_var = 1e-10 if _local_var < 1e-10 else _local_var
# adjusted global var
_data_global = ts_data[(idx - radius -
period_len * actual_num_period):idx]
_global_var = | np.var(_data_global) | numpy.var |
# -*- coding: utf-8 -*-
# @Time : 2019/10/9 11:31
# @Author : Esbiya
# @Email : <EMAIL>
# @File : img_locate.py
# @Software: PyCharm
import os
import requests
from PIL import Image
import cv2
import numpy as np
def _pic_download(url, type):
"""
图片下载
:param url:
:param type:
:return:
"""
save_path = os.path.abspath('...') + '\\' + 'images'
if not os.path.exists(save_path):
os.mkdir(save_path)
img_path = save_path + '\\' + '{}.jpg'.format(type)
img_data = requests.get(url).content
with open(img_path, 'wb') as f:
f.write(img_data)
return img_path
def merge_captcha(init_data):
"""
还原验证码背景图
:param init_data: 验证码初始化数据
:return:
"""
per_width = init_data['imageBlockPerWidth']
per_height = init_data['imageBlockPerHeight']
merge_array = init_data['imageBlockOffset']
captcha_url = 'https://qcaptcha.iqiyi.com' + init_data['imageBgUrl']
captcha_path = _pic_download(captcha_url, 'captcha')
image = Image.open(captcha_path)
new_image = Image.new('RGB', image.size)
for s in range(2):
for p in range(len(merge_array[0])):
d = merge_array[s]
h = len(d)
l = d[h - 1]
G = d[p]
v = G['t1'] * per_width
Z = G['t2'] * per_height
g = per_width
W = per_height
if G['t1'] > l['t1']:
v += 290 - per_width * h
if p == h - 1:
g = 290 - per_width * p
if s == 1:
W = 170 - per_height * s
imgcrop = image.crop((v, Z, v + g, Z + W))
new_image.paste(imgcrop, (p * per_width, s * per_height))
new_image.save(captcha_path)
# new_image.show()
return captcha_path
def _cut_slider(path):
"""
滑块切割
:return:
"""
image = Image.open(path)
x = []
y = []
for i in range(image.size[0]):
for j in range(image.size[1]):
pix = image.load()[i, j]
if pix != 255:
x.append(i)
y.append(j)
z = (np.min(x), np.min(y), | np.max(x) | numpy.max |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 10 11:14:55 2017
Solving the nuclear wavepacket dynamics on 1D adiabatic potential energy surface.
@author: <NAME>
History:
2/12/18 : fix a bug with the FFT frequency
Possible improvements:
1. use pyFFTW to replace the Scipy
"""
import numpy as np
from matplotlib import pyplot as plt
# from matplotlib import animation
from scipy.fftpack import fft,ifft,fftshift
# from scipy.linalg import expm, sinm, cosm
import scipy
# import sys
# sys.path.append(r'C:\Users\Bing\Google Drive\lime')
# sys.path.append(r'/Users/bing/Google Drive/lime')
from lime.phys import dagger, rk4
class NAMD:
def __init__(self, x, nstates, mass, v, nac):
"""
Non-adiabatic molecular dynamics (NAMD) simulations for one nuclear dof
and many electronic states.
Args:
x: real array of size N
grid points
psi0: complex array [N, ns]
initial wavefunction
mass: float, nuclear mass
nstates: integer, number of states
v: ndarray [nx, nstates]
adiabatic potential energy surfaces
nac: ndarray (nx, nstates, nstates)
non-adiabatic couplings. Diagonal elements vanish by definition.
"""
self.x = x
# self.psi0 = psi0
self.mass = mass
self.V_x = v
self.v = v
self.nstates = nstates
self.nac = nac
def x_evolve(self, psi, vpsi):
"""
vpsi = exp(-i V dt)
"""
# for i in range(len(x)):
# tmp = psi_x[i, :]
# utmp = U[i,:,:]
# psi_x[i,:] = np.dot(U,V.dot(dagger(U))).dot(tmp)
psi = np.einsum('imn, in -> im', vpsi, psi)
return psi
def k_evolve(self, dt, k, psi_x):
"""
one time step for exp(-i * K * dt)
"""
mass = self.mass
#x = self.x
for n in range(nstates):
psi_k = fft(psi_x[:,n])
psi_k *= np.exp(-0.5 * 1j / mass * (k * k) * dt)
psi_x[:,n] = ifft(psi_k)
return psi_x
def spo(self, dt, psi_x, Nsteps = 1):
"""
solve the time-dependent Schrodinger Equation with split-operator method.
Parameters
----------
dt : float
time interval over which to integrate
Nsteps : float, optional
the number of intervals to compute. The total change
in time at the end of this method will be dt * Nsteps.
default is N = 1
"""
if dt > 0.0:
f = open('density_matrix.dat', 'w')
else:
f = open('density_matrix_backward.dat', 'w')
x = self.x
V_x = self.V_x
nx = len(x)
nstates = self.nstates
dt2 = 0.5 * dt
vpsi = np.zeros((nx, nstates, nstates), dtype=complex)
vpsi2 = np.zeros((nx, nstates, nstates), dtype=complex)
for i in range(nx):
Vmat = np.reshape(V_x[i,:], (nstates, nstates))
w, u = scipy.linalg.eigh(Vmat)
#print(np.dot(U.conj().T, Vmat.dot(U)))
v = np.diagflat(np.exp(- 1j * w * dt))
v2 = np.diagflat(np.exp(- 1j * w * dt2))
vpsi[i,:,:] = u.dot(v.dot(dagger(u)))
vpsi2[i,:,:] = u.dot(v2.dot(dagger(u)))
dx = x[1] - x[0]
k = 2.0 * np.pi * scipy.fftpack.fftfreq(nx, dx)
print('Propagating the wavefunction ...')
t = 0.0
self.x_evolve(psi_x, vpsi2) # evolve V half step
for i in range(Nsteps - 1):
t += dt
psi_x = self.k_evolve(dt, k, psi_x)
psi_x = self.x_evolve(psi_x, vpsi)
rho = density_matrix(psi_x, dx)
# store the density matrix
f.write('{} {} {} {} {} \n'.format(t, *rho))
# psi_x = self.k_evolve(dt, psi_x)
# psi_x = self.x_evolve(dt2, psi_x, vpsi2)
f.close()
return psi_x
def evolve(self, psi0, dt=0.001, Nt=1, t0=0., nout=1, coordinates='linear'):
"""
Propagate the wavepacket dynamics
Parameters
----------
psi0 : TYPE
DESCRIPTION.
dt : TYPE, optional
DESCRIPTION. The default is 0.001.
Nt : TYPE, optional
DESCRIPTION. The default is 1.
t0 : TYPE, optional
DESCRIPTION. The default is 0..
nout : TYPE, optional
DESCRIPTION. The default is 1.
coordinates : TYPE, optional
DESCRIPTION. The default is 'linear'.
Raises
------
NotImplementedError
DESCRIPTION.
Returns
-------
psi : TYPE
DESCRIPTION.
"""
psi = psi0
t = t0
x = self.x
nx = len(x)
dx = x[1] - x[0]
vmat = self.v
nac = self.nac
# momentum k-space
k = 2.0 * np.pi * scipy.fftpack.fftfreq(nx, dx)
if coordinates == 'linear':
print('The nuclear coordinate is linear.')
elif coordinates == 'curvilinear':
raise NotImplementedError('Kinetic energy operator for curvilinear\
coordinates has not been implemented.')
fig, ax = plt.subplots()
for j in range(Nt//nout):
for i in range(nout):
t += dt
psi = rk4(psi, hpsi, dt, x, k, vmat, nac)
#output_tmp = density_matrix(psi)
#f.write('{} {} {} {} {} \n'.format(t, *rho))
#purity[i] = output_tmp
# ax.plot(x, np.abs(psi[:,0]) + 0.1 * j)
ax.plot(x, np.abs(psi[:,1]))
return psi
def density_matrix(psi_x,dx):
"""
compute purity from the wavefunction
"""
rho00 = np.sum(np.abs(psi_x[:,0])**2)*dx
rho01 = np.vdot(psi_x[:,1], psi_x[:,0])*dx
rho11 = 1. - rho00
return rho00, rho01, rho01.conj(), rho11
def hpsi(psi, x, k, vmat, nac, coordinates='linear', use_nac2=False):
"""
evaluate H \psi
input:
v: 1d array, adiabatic surfaces
d: nonadiabatic couplings, matrix
use_nac2: bool
indicator whether to include the second-order nonadiabatic couplings
output:
hpsi: H operators on psi
"""
# v |psi>
# for i in range(len(x)):
# for j in range(len(y)):
# v_tmp = np.diagflat(vmat[:][i,j])
# array_tmp = np.array([psi[0][i, j], psi[1][i, j]])
# vpsi = vmat.dot(array_tmp)
# if nstates != len(vmat):
# sys.exit('Error: number of electronic states does not match
# the length of PPES matrix!')
# APESs act on the wavefunction
vpsi = np.einsum('in, in -> in', vmat, psi)
#vpsi = [vmat[i] * psi[i] for i in range(nstates)]
# T |psi> = - \grad^2/2m * psi(x) = k**2/2m * psi(k)
# D\grad |psi> = D(x) * F^{-1} F
psi_k = np.zeros((nx, nstates), dtype=complex)
dpsi = np.zeros((nx, nstates), dtype=complex)
tpsi = np.zeros((nx, nstates), dtype=complex)
kpsi = | np.zeros((nx, nstates), dtype=complex) | numpy.zeros |
import sys
import tqdm
import numpy as np
from pathlib import Path
import random
import torch
from scipy.optimize import minimize
import matplotlib.pyplot as plt
file_list = sys.argv[1]
SENSOR_H = 480
SENSOR_W = 640
IMAGE_H = 224
IMAGE_W = 224
VISUALIZE = True
LENGTH = 50000
START_IDX = 0
OBJECTIVE = 'gradient'
def load_event(event_path):
# Returns time-shifted numpy array event from event_path
event = np.load(event_path)['event_data']
event = np.vstack([event['x'], event['y'], event['t'], event['p'].astype(np.uint8)]).T
event = event.astype(np.float)
# Account for non-zero minimum time
if event[:, 2].min() != 0:
event[:, 2] -= event[:, 2].min()
# Account for int-type timestamp
# event[:, 2] /= 1000000
# Account for zero polarity
if event[:, 3].min() >= -0.5:
event[:, 3][event[:, 3] <= 0.5] = -1
event[:, 0] *= (IMAGE_W / SENSOR_W)
event[:, 1] *= (IMAGE_H / SENSOR_H)
return event
def display_event(event):
event_image = np.zeros([IMAGE_H, IMAGE_W])
coords = event[:, :2].astype(np.int32)
event_image[(coords[:, 1], coords[:, 0])] = 1.0
plt.imshow(event_image)
plt.show()
def warp_event(event_path):
event = load_event(event_path)
speed = np.zeros(2)
display_event(event)
def tgt_func(x):
tgt_event = np.array(event[START_IDX:START_IDX + LENGTH])
tgt_event[:, 0] = tgt_event[:, 0] + x[0] * (tgt_event[START_IDX, 2] - tgt_event[:, 2])
tgt_event[:, 1] = tgt_event[:, 1] + x[1] * (tgt_event[START_IDX, 2] - tgt_event[:, 2])
coords = tgt_event[:, :2].astype(np.int32)
coords[:, 0] = np.clip(coords[:, 0], 0, IMAGE_W - 1)
coords[:, 1] = np.clip(coords[:, 1], 0, IMAGE_H - 1)
event_image = np.zeros([IMAGE_H, IMAGE_W])
event_image[(coords[:, 1], coords[:, 0])] = 1.0
plt.imshow(event_image)
plt.show()
obj_value = 0.0
if OBJECTIVE == 'proj_cnt':
obj_value = np.average(event_image)
elif OBJECTIVE == 'gradient':
gy, gx = np.gradient(event_image)
gnorm = | np.sqrt(gx**2 + gy**2) | numpy.sqrt |
import numpy as np
from scipy import signal
from . import kernel_functions
from . import utils
def cusplet(
arr,
widths,
kernel_args=None,
kernel_func=kernel_functions.power_cusp,
method='fft',
reflection=0,
width_weights=None,
):
"""Implements the discrete cusplet transform.
Args:
arr(list): array of shape (n,) or (n,1).
This array should not contain inf-like values.
The transform will still be computed but infs propagate.
Nan-like values will be linearly interpolated, which is okay for subsequent
time-based analysis but will introduce ringing in frequency-based analyses.
widths(iterable): iterable of integers that specify the window widths (L above).
Assumed to be in increasing order.
If widths is not in increasing order the results will be garbage.
kernel_args(list or tuple, optional): arguments for the kernel function.
kernel_func(callable): A kernel factory function.
See kernel_functions.py for the required interface and available options.
method(str, optional): one of 'direct' or 'fft' (Default value = 'fft')
reflection(int, optional): Element of the reflection group applied to the kernel function.
Default is 0, corresponding to the identity element.
width_weights(list or None, optional): Relative importance of the different window widths.
Returns:
: tuple -- (numpy array of shape (L, n) -- the cusplet transform, k -- the calculated kernel function)
"""
if kernel_args is None:
kernel_args = []
elif type(kernel_args) is float:
kernel_args = [kernel_args]
if width_weights is None:
width_weights = np.ones_like(widths)
else:
width_weights = np.array(width_weights)
arr = utils.fill_na(np.array(arr), mode='interpolate')
cc = np.zeros((len(widths), len(arr)))
for i, width in enumerate(widths):
kernel = kernel_func(width, *kernel_args)
kernel = utils.apply_reflection_action(kernel, reflection)
cc[i] = signal.correlate(arr, kernel, mode='same', method=method)
cc = width_weights[..., np.newaxis] * cc
return cc, kernel
def cusplet_parameter_sweep(
arr,
widths,
kernel_weights=None,
kernel_args=None,
kernel_func=kernel_functions.power_cusp,
reflection=0,
width_weights=None,
):
"""Sweeps over values of parameters (kernel arguments) in the discrete cusplet transform.
Args:
arr(list): numpy array of shape (n,) or (n,1), time series
kernel_func(callable): kernel function. Must take an integer L > 0 as an argument and any number of additional, nonkeyword arguments, and returns a numpy array of shape (L,) that implements the kernel. The returned array should sum to zero; use the zero_norm function for this.
widths(iterable): iterable of integers that specify the window widths (L above). Assumed to be in increasing order; if widths is not in increasing order the results will be garbage.
kernel_args(list or tuple of lists or tuples): iterable of iterables of arguments for the kernel function. Each top-level iterable is treated as a single parameter vector.
reflection(int, optional): integer n evaluates to n %4, element of the reflection group that left-multiplies the kernel function. Default is 0 (identity element).
width_weights (list or None, optional):
kernel_weights(list or None, optional):
Returns:
: numpy.ndarray -- numpy array of shape (L, n, len(k_args)), the cusplet transform
"""
kernel_args = np.array(kernel_args)
if kernel_weights is None:
kernel_weights = np.ones(kernel_args.shape[0])
cc = np.zeros((len(widths), len(arr), len(kernel_args)))
for i, k_arg in enumerate(kernel_args):
cres, _ = cusplet(
arr,
widths,
kernel_args=k_arg,
kernel_func=kernel_func,
reflection=reflection,
width_weights=width_weights,
)
cc[:, :, i] = cres * kernel_weights[i]
return cc
def classify_cusps(cc, b=1, geval=False):
"""Classifies points as belonging to cusps or not.
Args:
cc(numpy.ndarray): numpy array of shape (L, n), the cusplet transform of a time series
b(int or float, optional): multiplier of the standard deviation. (Default value = 1)
geval(float >= 0, optional): If geval is an int or float, classify_cusps will return (in addition to the cusps and cusp intensity function) an array of points where the cusp intensity function is greater than geval. (Default value = False)
Returns:
: tuple --- (numpy.ndarray of indices of the cusps; numpy.ndarray representing the cusp intensity function) or, if geval is not False, (extrema; the cusp intensity function; array of points where the cusp intensity function is greater than geval)
"""
sum_cc = utils.zero_norm(np.nansum(cc, axis=0))
mu_cc = np.nanmean(sum_cc)
std_cc = np.nanstd(sum_cc)
extrema = np.array(signal.argrelextrema(sum_cc, np.greater))[0]
extrema = [x for x in extrema if sum_cc[x] > mu_cc + b * std_cc]
if geval is False:
return extrema, sum_cc
else:
gez = np.where(sum_cc > geval)
return extrema, sum_cc, gez
def _make_components(indicator, cusp_points=None):
"""Get individual windows from array of indicator indices.
Takes cusp indicator function and returns windows of contiguous cusp-like behavior.
If an array of hypothesized deterministic peaks of cusp-like behavior is passed,
thins these points so that there is at most one point per window.
Args:
indicator(list): array of the points where the cusp intensity function exceeds some threshold
cusp_points(list or numpy.ndarray, optional): optional, array of points that denote the hypothesized deterministic peaks of cusps (Default value = None)
Returns:
list -- the contiguous cusp windows; or, if cusp_points is not None, tuple -- (the contiguous cusp windows, the thinned cusp points)
"""
windows = []
indicator = np.array(indicator)
if len(indicator.shape) > 1:
indicator = indicator[0]
j = 0
for i, x in enumerate(indicator):
if i == len(indicator) - 1:
window = indicator[j: i]
if len(window) >= 2:
windows.append(window)
break
elif indicator[i + 1] == x + 1:
continue # still part of the same block
else: # block has ended
window = indicator[j: i]
if len(window) >= 2:
windows.append(window)
j = i + 1
if cusp_points is None:
return windows
pt_holder = [[] for _ in range(len(windows))]
for pt in cusp_points:
for i, window in enumerate(windows):
if (pt >= window[0]) and (pt <= window[-1]):
pt_holder[i].append(pt)
break
windows_ = []
estimated_cusp_points = []
for holder, window in zip(pt_holder, windows):
if holder:
windows_.append(window)
estimated_cusp_points.append(int(np.median(holder)))
estimated_cusp_points = np.array(estimated_cusp_points, dtype=int)
return windows_, estimated_cusp_points
def make_components(indicator, cusp_points=None, scan_back=0):
"""Get individual windows from array of indicator indices.
Takes cusp indicator function and returns windows of contiguous cusp-like behavior.
If an array of hypothesized deterministic peaks of cusp-like behavior is passed,
thins these points so that there is at most one point per window.
The scan_back parameter connects contiguous windows if they are less than or equal to
scan_back indices from each other.
Args:
indicator(list): array of the points where the cusp intensity function exceeds some threshold
cusp_points(list or numpy.ndarray, optional): optional, array of points that denote the hypothesized deterministic peaks of cusps (Default value = None)
scan_back(int >= 0, optional): number of indices to look back. If cusp windows are within scan_back indices of each other, they will be connected into one contiguous window. (Default value = 0)
Returns:
list -- the contiguous cusp windows; or, if cusp_points is not None, tuple -- (the contiguous cusp windows, the thinned cusp points)
"""
windows = _make_components(indicator, cusp_points=cusp_points)
if cusp_points is not None:
windows, estimated_cusp_points = windows
if (len(windows) > 1) and (scan_back > 0):
windows_ = []
for i in range(len(windows)):
if len(windows_) == 0:
windows_.append(list(windows[i]))
else:
if windows[i][0] <= windows_[-1][-1] + scan_back:
fill_between = list(range(windows_[-1][-1] + 1,
windows[i][0]))
windows_[-1].extend(fill_between)
windows_[-1].extend(list(windows[i]))
else:
windows_.append(list(windows[i]))
else:
windows_ = windows
if cusp_points is None:
return windows_
return windows_, estimated_cusp_points
def setup_corr_mat(k, N):
"""Sets up linear operator corresponding to cross correlation.
The cross-correlation operation can be just viewed as a linear operation from R^K to R^K as
Ax = C. The operator A is a banded matrix that represents the rolling add operation that
defines cross-correlation. To execute correlation of the kernel with data
one computes np.dot(A, data).
Args:
k(numpy.ndarray): the cross-correlation kernel
N(positive int): shape of data array with which k will be cross-correlated.
Returns:
numpy.ndarray -- NxN array, the cross-correlation operator
"""
def _sliding_windows(a, N):
"""Generates band numpy array *quickly*
Taken from https://stackoverflow.com/questions/52463972/generating-banded-matrices-using-numpy.
Args:
a:
N:
Returns:
"""
a = | np.asarray(a) | numpy.asarray |
"""
Data transformation (:mod:`ts_train.data`)
==========================================
.. currentmodule:: ts_train.data
.. autosummary::
clean_time_series
interpolate_series
series2matrix
create_sequences
window_generation_for_tsfresh
transfrom_all_data
make_features
reduce_mem_usage
deduplicate_column_names
.. autofunction:: clean_time_series
.. autofunction:: interpolate_series
.. autofunction:: series2matrix
.. autofunction:: create_sequences
.. autofunction:: window_generation_for_tsfresh
.. autofunction:: transfrom_all_data
.. autofunction:: make_features
.. autofunction:: reduce_mem_usage
.. autofunction:: deduplicate_column_names
"""
from typing import Union, List
import numpy as np
import pandas as pd
from scipy.interpolate import CubicSpline, interp1d
__docformat__ = 'restructuredtext'
__all__ = ['series2matrix']
def clean_time_series(inSeries: pd.Series) -> pd.Series:
"""
Remove duplicated based on timestamp index and
perform linear interpolation for missing timestamp index
:Parameters:
inSeries: pd.Series
The time series to be clean from duplicates and fill missing
by interpolation
:Returns:
return: pd.Series
Returns clean series
"""
inSeries.index = pd.to_datetime(inSeries.index)
mask_duplicated = inSeries.index.duplicated()
print("Duplicated data points found:", sum(mask_duplicated))
inSeries = inSeries[~mask_duplicated]
new_idx = pd.date_range(inSeries.index.min(), inSeries.index.max(), freq="s")
outSeries = inSeries.reindex(new_idx)
print("Missing data points found:", sum(outSeries.isna()))
outSeries = outSeries.interpolate()
return outSeries
def interpolate_series(time_series: pd.Series, n_points: int, method: str="spline") -> pd.Series:
"""
Up-sample & Interpolate the pattern to `n_points` number of values.
:Parameters:
time_series: pd.Series
Time series data to model & interpolated to n_points.
n_points: int
Number of points to interpolate for.
:Returns:
return: pd.Series
Series with index `n_points` and the interpolated values.
"""
if method=="spline":
spline = CubicSpline(time_series.index, time_series.values)
else:
spline = interp1d(time_series.index, time_series.values, kind="nearest")
interpolation_points = np.linspace(0, time_series.index.max(), n_points).round(5)
return pd.Series(
data=spline(interpolation_points),
index=interpolation_points
)
def series2matrix(in_series: Union[pd.Series, np.ndarray, list], w: int=50, padding: str="valid") -> pd.DataFrame:
"""
Generate matrix with rolling window from 1D iterator.
:Parameters:
in_series: pd.Series, np.array or list
1D iterator
w: int
rolling window size
padding: str
["valid", "same"]
:Returns:
return: pd.DataFrame
DataFrame of rows as moving windows
:Examples:
>>> import numpy as np
>>> import pandas as pd
>>> ls = [np.random.random() for i in range(10_000)]
>>> sr = pd.Series(ls) # sr = np.array(ls) # sr = ls
>>> data_df = series2matrix(sr, w=2526)
>>> assert data.shape == (7475, 2526)
"""
in_series = in_series.copy()
in_series.name = None
df = pd.DataFrame(in_series, columns=["t"])
for i in range(1, w):
df['t+'+str(i)] = df['t'].shift(-i)
if padding == "same":
return df.fillna(0)
return df.dropna()
def window_generation_for_tsfresh(series, w=21, id_shift=None):
"""
Window generation for tsfresh
w = odd windowsize
window is right aligned inclusive of current position (row)
:Example:
>>> df = pd.DataFrame()
>>> df["t"] = np.arange(1, 200)
>>> sample_windows_df = window_generation_for_tsfresh(df["t"], w=5)
>>> print(sample_windows_df.shape)
>>> sample_windows_df[:6]
sample_id order t
2 0 0.0
1 1.0
2 2.0
3 0 1.0
1 2.0
2 3.0
"""
org_col = f"t+{(w//2)}"
df = pd.DataFrame({org_col: series})
if not id_shift:
id_shift = -(w // 2) - 1
# left-shift
for i in range(1, (w // 2) + 1):
df["t+" + str((w // 2) - i)] = df[org_col].shift(i)
# right-shift
for i in range(1, (w // 2) + 1):
df["t+" + str(i + w // 2)] = df[org_col].shift(-i)
df = df.fillna(0)
df["sample_id"] = df.index.to_series().shift(periods=id_shift)
df = df.dropna()
df["sample_id"] = df["sample_id"].astype("i")
df = pd.wide_to_long(df, stubnames="t", i=["sample_id"], j="order", sep="+")
return df.sort_index(0)
def create_sequences(values: np.array, time_steps: int=32, skip_steps: int=1, ignore_last: int=0):
"""
Generated training sequences for use in the model.
:Examples:
>>> x_train = create_sequences(train_feature.values, time_steps=TIME_STEPS, ignore_last=0)
>>> print("Training input shape: ", x_train.shape)
"""
output = []
for i in range(0, len(values) - time_steps, skip_steps):
output.append(values[i : (i + time_steps - ignore_last)])
return np.stack(output)
def transfrom_all_data(transformer, train: pd.DataFrame, test: pd.DataFrame, feature_list=None):
"""
Apply transformer to train and test features
:Example:
>>> logTrans = FunctionTransformer(np.log1p)
>>> train_trans, test_trans = transfrom_all_data(transformer, train, test, feature_list)
"""
train_trans = transformer.fit_transform(train[feature_list])
if len(test):
test_trans = transformer.transform(test[feature_list])
else:
test_trans = None
if type(train_trans) != np.ndarray:
train_trans = np.array(train_trans)
if len(test):
test_trans = np.array(test_trans)
return train_trans, test_trans
def make_features(transformer, train: pd.DataFrame, test: pd.DataFrame, feature_list: List[str], name: str,
normalize: bool=False, scaler=None):
"""
Add newly generated transformed features to train and test dataframe
:Example:
>>> scaler = StandardScaler()
>>> logTrans = FunctionTransformer(np.log1p)
>>> train_X, val_X = make_features(qTrans, train_X, val_X, feature_list=range(10), name="qTrans", normalize=False, scaler=scaler)
"""
train, test = train.copy(), test.copy()
train_trans, test_trans = transfrom_all_data(transformer, train, test, feature_list)
if normalize and scaler is not None:
train_trans = scaler.fit_transform(train_trans).astype(np.float32)
test_trans = scaler.transform(test_trans).astype(np.float32)
for i in range(train_trans.shape[1]):
train['{0}_{1}'.format(name, i)] = train_trans[:, i]
if len(test):
test['{0}_{1}'.format(name, i)] = test_trans[:, i]
return train, test
def deduplicate_column_names(df):
"""
Deduplicate column names by adding suffix f"_{i}"
"""
df.columns = [
x[1]
if x[1] not in df.columns[: x[0]]
else f"{x[1]}_{list(df.columns[:x[0]]).count(x[1])}"
for x in enumerate(df.columns)
]
return df
def reduce_mem_usage(df: pd.DataFrame, verbose: bool = True) -> pd.DataFrame:
"""
Reduce memory usage by converting data types of numerical columns
:Example:
>>> df = reduce_mem_usage(df)
Mem. usage decreased to 124.30 MB (6.0 % reduction)
"""
numerics = ["int16", "int32", "int64", "float16", "float32", "float64"]
start_mem = df.memory_usage().sum() / 1024 ** 2
for col in df.columns:
col_type = df[col].dtypes
if col_type in numerics:
c_min = df[col].min()
c_max = df[col].max()
if str(col_type)[:3] == "int":
if c_min > np.iinfo(np.int32).min and c_max < | np.iinfo(np.int32) | numpy.iinfo |
import os, sys
from logs import logDecorator as lD
import json
from scipy.interpolate import interp1d
from scipy.integrate import odeint
import numpy as np
import tensorflow as tf
import time
from tensorflow.python.client import timeline
config = json.load(open('../config/config.json'))
logBase = config['logging']['logBase'] + '.lib.multipleODE.multipleODE_tf'
class multipleODE:
'''[summary]
[description]
'''
@lD.log(logBase + '.__init__')
def __init__(logger, self, Npat, Nnt, Nl, tspan, Atimesj, Btimesj, fj, rj, mj,
stress_t, stress_v, layers, activations, gpu_device='0'):
'''[summary]
[description]
Parameters
----------
logger : {[type]}
[description]
self : {[type]}
[description]
'''
try:
self.Npat = Npat # --> 1 number
self.Nnt = Nnt # --> 1 number
self.Nl = Nl # --> 1 number
self.NperUser = Nnt + Nl # --> 1 number
self.tspan = tspan # --> 1D array
self.fj = fj # --> Npat arrays
self.rj = rj # --> Npat arrays
self.mj = mj # --> Npat arrays
os.environ["CUDA_VISIBLE_DEVICES"] = gpu_device
self.device = ['/device:GPU:{}'.format(g) for g in gpu_device.split(',')]
self.stressInterp = [interp1d(t_vec, s_vec) for t_vec, s_vec in zip(stress_t, stress_v)]
self.AjInterp = [interp1d(tspan, a_vec) for a_vec in Atimesj]
self.BjInterp = [interp1d(tspan, b_vec) for b_vec in Btimesj]
activation_map = { 'tanh' : tf.nn.tanh,
'sigmoid' : tf.nn.sigmoid,
'relu' : tf.nn.relu,
'linear' : tf.identity }
activations = [ activation_map[a] for a in activations ]
start = time.time()
for d in self.device:
with tf.device(d):
self.tf_opsFlow(layers=layers, activations=activations)
timespent = time.time() - start
print('graphTime', timespent)
# self.options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
# self.run_metadata = tf.RunMetadata()
except Exception as e:
logger.error('Unable to initialize multipleODE \n{}'.format(str(e)))
@lD.log(logBase + '.tf_opsFlow')
def tf_opsFlow(logger, self, layers, activations):
try:
with tf.variable_scope('weights'):
self.fj_tf = [ tf.Variable(fj_vec, dtype=tf.float32, name='fj_{}'.format(index))
for index, fj_vec in enumerate(self.fj)]
self.rj_tf = [ tf.Variable(rj_vec, dtype=tf.float32, name='rj_{}'.format(index))
for index, rj_vec in enumerate(self.rj)]
self.mj_tf = [ tf.Variable(mj_vec, dtype=tf.float32, name='mj_{}'.format(index))
for index, mj_vec in enumerate(self.mj)]
self.NNwts_tf = []
self.NNb_tf = []
self.NNact_tf = []
self.Taus_tf = []
prev_l = self.Nnt + 1
for i, l in enumerate(layers):
wts = tf.Variable( | np.random.random(size=(l, prev_l)) | numpy.random.random |
#!/usr/bin/evn python
import numpy as np
import scipy.linalg
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from os.path import join, isfile
from DataCube import DataCube
import pickle
folder = './data'
data_loc = 'measurements.pkl'
shape = [
(('ramp_dist', np.float64), { 'min': 5, 'max': 16, 'inc': 1 }),
(('drive', int), { 'min': 40, 'max': 101, 'inc': 3 }),
(('voltage', int), { 'min': 7.7, 'max': 8.11, 'inc': 0.01 }),
]
data = None
if isfile(join(folder, data_loc)):
print('loading data...')
with open(join(folder, data_loc), 'rb') as file:
data = pickle.load(file)
print('data is loaded.')
voi = int((shape[2][1]['max'] - shape[2][1]['min']) / shape[2][1]['inc'])
dri = int((shape[1][1]['max'] - shape[1][1]['min']-1) / shape[1][1]['inc'])
dii = int((shape[0][1]['max'] - shape[0][1]['min']-1) / shape[0][1]['inc'])
drives = np.zeros((dii, dri))
jumpdist = np.zeros((dii, dri))
x = []
y = []
z = []
for vo in range(voi):
for dr in range(dri):
for di in range(dii):
jd = data.data[di][dr][vo]
if jd > 1:
x.append((di*shape[0][1]['inc']) + shape[0][1]['min'])
y.append(jd)
z.append((dr*shape[1][1]['inc']) + shape[1][1]['min'])
# some 3-dim points
data = np.c_[x, y, z]
# regular grid covering the domain of the data
# regular grid covering the domain of the data
mn = np.min(data, axis=0)
mx = np.max(data, axis=0)
X,Y = np.meshgrid(np.linspace(mn[0], mx[0], 20), np.linspace(mn[1], mx[1], 20))
XX = X.flatten()
YY = Y.flatten()
order = 2 # 1: linear, 2: quadratic
if order == 1:
# best-fit linear plane
A = np.c_[data[:,0], data[:,1], np.ones(data.shape[0])]
C,_,_,_ = scipy.linalg.lstsq(A, data[:,2]) # coefficients
# evaluate it on grid
Z = C[0]*X + C[1]*Y + C[2]
# or expressed using matrix/vector product
#Z = np.dot(np.c_[XX, YY, np.ones(XX.shape)], C).reshape(X.shape)
elif order == 2:
# best-fit quadratic curve
A = np.c_[np.ones(data.shape[0]), data[:,:2], | np.prod(data[:,:2], axis=1) | numpy.prod |
# coding: utf-8
# In[4]:
from math import sqrt
from numpy import concatenate
from matplotlib import pyplot
import pandas as pd
from datetime import datetime
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import mean_squared_error
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
import plotly.offline as py
import plotly.graph_objs as go
import numpy as np
import seaborn as sns
py.init_notebook_mode(connected=True)
get_ipython().magic('matplotlib inline')
# In[5]:
import matplotlib
import pylab
import matplotlib.pyplot as plt
import matplotlib.dates as md
import datetime
import seaborn as sns
from scipy.stats import pearsonr
from matplotlib import cm as cm
import calendar
import warnings
import itertools
from statsmodels.tsa.stattools import adfuller
import statsmodels.formula.api as smf
import statsmodels.api as sm
import statsmodels.tsa.api as smt
from sklearn.metrics import mean_squared_error
import pandas as pd
import seaborn as sb
import itertools
import numpy as np
from statsmodels.tsa.seasonal import seasonal_decompose
from statsmodels.tsa.arima_model import ARIMA
from numpy import loadtxt
import os
import json
import numpy as np
import pandas as pd
from sklearn.externals import joblib
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.ensemble import RandomForestClassifier
from sklearn.pipeline import make_pipeline
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style('whitegrid')
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
from sklearn.externals import joblib
import warnings
warnings.filterwarnings("ignore")
y = 2016
new_data = pd.DataFrame()
sample_times = []
for y in range(2014,2016,1):
print (y)
for m in range(1,13,1):
no_of_days = calendar.monthrange(2014,m)[1]
for d in range (1,no_of_days+1,1):
## for d in range (1,2,1):
# data = pd.read_csv("C:\\Users\\ahilan\\Dropbox\\Research\\Solar Forecast\\Solar Asia 2018\\Data\\Year %d\\D120318_%d%02d%02d_0000.csv"%(y,y,m, d));
data = pd.read_csv("F:\edit\Data\data\Predicting\\D120318_%d%02d%02d_0000.csv"%(y,m,d));
pd
if (pd.to_datetime(data['Date/time'][2]) -pd.to_datetime(data['Date/time'][1])).seconds ==600:
new_data_temp = data[['Date/time','Anemometer;wind_speed;Avg','Wind Vane;wind_direction;Avg','Hygro/Thermo;humidity;Avg', 'Hygro/Thermo;temperature;Avg','Barometer;air_pressure;Avg','Pyranometer-Diffused;solar_irradiance;Avg', 'Pyranometer-Global;solar_irradiance;Avg', 'Silicon;voltage;Avg']][0:144].copy()
new_data = new_data.append(new_data_temp)
for i in range(len(new_data_temp)):
sample_times.append(datetime.datetime(y, m, d, 6, 00, 0)+ i*datetime.timedelta(minutes=10))
elif (pd.to_datetime(data['Date/time'][2]) -pd.to_datetime(data['Date/time'][1])).seconds ==60:
new_data_temp = data[['Date/time','Anemometer;wind_speed;Avg','Wind Vane;wind_direction;Avg','Hygro/Thermo;humidity;Avg', 'Hygro/Thermo;temperature;Avg','Barometer;air_pressure;Avg','Pyranometer-Diffused;solar_irradiance;Avg', 'Pyranometer-Global;solar_irradiance;Avg', 'Silicon;voltage;Avg']][0:1440].copy()
new_data = new_data.append(new_data_temp)
for i in range(len(new_data_temp)):
sample_times.append(datetime.datetime(y, m, d, 6, 00, 0)+ i*datetime.timedelta(minutes=1))
new_data.columns=['time','wind_speed','wind_dir','humidity','temperature','pressure','dhi','ghi','voltage']
sample_times_series = pd.Series(sample_times)
new_data['time'] = sample_times_series.values
new_data = new_data.reset_index().set_index('time').resample('10min').mean()
# In[8]:
new_data.drop('index', axis=1, inplace=True)
# In[9]:
new_data.drop('ghi', axis=1, inplace=True)
# In[10]:
new_data.drop('voltage', axis=1, inplace=True)
# In[14]:
btc_trace = go.Scatter(x=new_data.index, y=new_data['dhi'], name= 'Price')
py.iplot([btc_trace])
# In[15]:
new_data['dhi'].replace(0, np.nan, inplace=True)
new_data['dhi'].fillna(method='ffill', inplace=True)
# In[16]:
btc_trace = go.Scatter(x=new_data.index, y=new_data['dhi'], name= 'dhi')
py.iplot([btc_trace])
# In[23]:
from sklearn.preprocessing import MinMaxScaler
values = new_data['dhi'].values
np.random.seed(1)
values = values.reshape(-1,1)
# In[26]:
scaler = MinMaxScaler(feature_range=(0, 1))
# In[28]:
y=new_data
y = y.fillna(y.bfill())
values =y.values
# In[29]:
scaled = scaler.fit_transform(values)
# In[30]:
train_size = int(len(scaled) * 0.75)
test_size = len(scaled) - train_size
train, test = scaled[0:train_size,:], scaled[train_size:len(scaled),:]
print(len(train), len(test))
# In[31]:
def create_dataset(dataset, look_back=1):
dataX, dataY = [], []
for i in range(len(dataset) - look_back):
a = dataset[i:(i + look_back), 0]
dataX.append(a)
dataY.append(dataset[i + look_back, 0])
print(len(dataY))
return np.array(dataX), np.array(dataY)
# In[32]:
look_back = 1
trainX, trainY = create_dataset(train, look_back)
testX, testY = create_dataset(test, look_back)
# In[33]:
trainX = np.reshape(trainX, (trainX.shape[0], 1, trainX.shape[1]))
testX = np.reshape(testX, (testX.shape[0], 1, testX.shape[1]))
# In[34]:
model = Sequential()
model.add(LSTM(100, input_shape=(trainX.shape[1], trainX.shape[2])))
model.add(Dense(1))
model.compile(loss='mae', optimizer='adam')
history = model.fit(trainX, trainY, epochs=300, batch_size=100, validation_data=(testX, testY), verbose=0, shuffle=False)
# In[35]:
pyplot.plot(history.history['loss'], label='train')
pyplot.plot(history.history['val_loss'], label='test')
pyplot.legend()
pyplot.show()
# In[36]:
yhat = model.predict(testX)
pyplot.plot(yhat, label='predict')
pyplot.plot(testY, label='true')
pyplot.legend()
pyplot.show()
# In[62]:
yhat = model.predict(testX)
# In[66]:
# Let`s import all packages that we may need:
import sys
import numpy as np # linear algebra
from scipy.stats import randint
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv), data manipulation as in SQL
import matplotlib.pyplot as plt # this is used for the plot the graph
import seaborn as sns # used for plot interactive graph.
from sklearn.model_selection import train_test_split # to split the data into two parts
from sklearn.cross_validation import KFold # use for cross validation
from sklearn.preprocessing import StandardScaler # for normalization
from sklearn.preprocessing import MinMaxScaler
from sklearn.pipeline import Pipeline # pipeline making
from sklearn.model_selection import cross_val_score
from sklearn.feature_selection import SelectFromModel
from sklearn import metrics # for the check the error and accuracy of the model
from sklearn.metrics import mean_squared_error,r2_score
## for Deep-learing:
import keras
from keras.layers import Dense
from keras.models import Sequential
from keras.utils import to_categorical
from keras.optimizers import SGD
from keras.callbacks import EarlyStopping
from keras.utils import np_utils
import itertools
from keras.layers import LSTM
from keras.layers.convolutional import Conv1D
from keras.layers.convolutional import MaxPooling1D
from keras.layers import Dropout
# In[67]:
new_data.columns
# In[71]:
## finding all columns that have nan:
droping_list_all=[]
for j in range(0,6):
if not new_data.iloc[:, j].notnull().all():
droping_list_all.append(j)
#print(df.iloc[:,j].unique())
droping_list_all
# In[73]:
for j in range(0,6):
new_data.iloc[:,j]=new_data.iloc[:,j].fillna(new_data.iloc[:,j].mean())
# In[74]:
new_data.isnull().sum()
# In[75]:
new_data.dhi.resample('D').sum().plot(title='DHI resampled over day for sum')
#df.Global_active_power.resample('D').mean().plot(title='Global_active_power resampled over day', color='red')
plt.tight_layout()
plt.show()
new_data.dhi.resample('D').mean().plot(title='DHI resampled over day for mean', color='red')
plt.tight_layout()
plt.show()
# In[77]:
# Below I compare the mean of different featuresresampled over day.
# specify columns to plot
cols = [0, 1, 2, 3, 4, 5]
i = 1
groups=cols
values = new_data.resample('D').mean().values
# plot each column
plt.figure(figsize=(15, 10))
for group in groups:
plt.subplot(len(cols), 1, i)
plt.plot(values[:, group])
plt.title(new_data.columns[group], y=0.75, loc='right')
i += 1
plt.show()
# In[78]:
new_data.dhi.resample('W').mean().plot(color='y', legend=True)
plt.show()
# In[79]:
new_data.wind_speed.resample('W').mean().plot(color='r', legend=True)
# In[80]:
new_data.humidity.resample('W').mean().plot(color='b', legend=True)
# In[81]:
new_data.temperature.resample('W').mean().plot(color='g', legend=True)
# In[82]:
new_data.pressure.resample('W').mean().plot(color='y', legend=True)
# In[84]:
new_data.wind_dir.resample('W').mean().plot(color='r', legend=True)
# In[85]:
def series_to_supervised(data, n_in=1, n_out=1, dropnan=True):
n_vars = 1 if type(data) is list else data.shape[1]
dff = pd.DataFrame(data)
cols, names = list(), list()
# input sequence (t-n, ... t-1)
for i in range(n_in, 0, -1):
cols.append(dff.shift(i))
names += [('var%d(t-%d)' % (j+1, i)) for j in range(n_vars)]
# forecast sequence (t, t+1, ... t+n)
for i in range(0, n_out):
cols.append(dff.shift(-i))
if i == 0:
names += [('var%d(t)' % (j+1)) for j in range(n_vars)]
else:
names += [('var%d(t+%d)' % (j+1, i)) for j in range(n_vars)]
# put it all together
agg = pd.concat(cols, axis=1)
agg.columns = names
# drop rows with NaN values
if dropnan:
agg.dropna(inplace=True)
return agg
# In[86]:
## resampling of data over hour
df_resample = new_data.resample('h').mean()
df_resample.shape
# In[88]:
## * Note: I scale all features in range of [0,1].
## If you would like to train based on the resampled data (over hour), then used below
values = df_resample.values
## full data without resampling
#values = df.values
# integer encode direction
# ensure all data is float
#values = values.astype('float32')
# normalize features
scaler = MinMaxScaler(feature_range=(0, 1))
scaled = scaler.fit_transform(values)
# frame as supervised learning
reframed = series_to_supervised(scaled, 1, 1)
# drop columns we don't want to predict
print(reframed.head())
# In[89]:
# split into train and test sets
values = reframed.values
n_train_time = 365*24
train = values[:n_train_time, :]
test = values[n_train_time:, :]
##test = values[n_train_time:n_test_time, :]
# split into input and outputs
train_X, train_y = train[:, :-1], train[:, -1]
test_X, test_y = test[:, :-1], test[:, -1]
# reshape input to be 3D [samples, timesteps, features]
train_X = train_X.reshape((train_X.shape[0], 1, train_X.shape[1]))
test_X = test_X.reshape((test_X.shape[0], 1, test_X.shape[1]))
print(train_X.shape, train_y.shape, test_X.shape, test_y.shape)
# We reshaped the input into the 3D format as expected by LSTMs, namely [samples, timesteps, features].
# In[92]:
model = Sequential()
model.add(LSTM(100, input_shape=(train_X.shape[1], train_X.shape[2])))
model.add(Dropout(0.2))
# model.add(LSTM(70))
# model.add(Dropout(0.3))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam')
# fit network
history = model.fit(train_X, train_y, epochs=20, batch_size=70, validation_data=(test_X, test_y), verbose=2, shuffle=False)
# summarize history for loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper right')
plt.show()
# In[122]:
yhat = model.predict(test_X)
# In[128]:
test_X = test_X.reshape((test_X.shape[0], test_X.shape[2]))
# In[132]:
inv_yhat = concatenate((yhat, test_X[:, 1:]), axis=1)
inv_yhat = np.concatenate((yhat, test_X[:, -6:]), axis=1)
# In[130]:
test_y = test_y.reshape((len(test_y), 1))
inv_y = concatenate((test_y, test_X[:, 1:]), axis=1)
# In[131]:
rmse = np.sqrt(mean_squared_error(inv_y, inv_yhat))
print('Test RMSE: %.3f' % rmse)
# In[140]:
inv_yhat = np.concatenate((yhat, test_X[:, -5:]), axis=1)
# In[141]:
inv_yhat = scaler.inverse_transform(inv_yhat)
# In[142]:
inv_yhat = inv_yhat[:,0]
# In[143]:
test_y = test_y.reshape((len(test_y), 1))
# In[144]:
inv_y = | np.concatenate((test_y, test_X[:, -5:]), axis=1) | numpy.concatenate |
import numpy as np
import tensorflow as tf
import time
import os
import matplotlib.pyplot as plt
import matplotlib as mpt
import colorsys as cls
import statistics as stat
from sklearn.model_selection import train_test_split
import csv
import pickle
from mmd import rbf_mmd2, median_pairwise_distance, mix_rbf_mmd2_and_ratio
import Synth_data as sd
class RGAN:
def generator(self, z, c=None):
with tf.variable_scope("generator") as scope:
# each step of the generator takes a random seed + the conditional embedding
# repeated_encoding = tf.tile(c, [1, tf.shape(z)[1]])
# repeated_encoding = tf.reshape(repeated_encoding, [tf.shape(z)[0], tf.shape(z)[1],
# cond_dim])
# generator_input = tf.concat([repeated_encoding, z], 2)
cell = tf.contrib.rnn.LSTMCell(num_units=hidden_units_g, state_is_tuple=True)
rnn_outputs, rnn_states = tf.nn.dynamic_rnn(
cell=cell,
dtype=tf.float32,
sequence_length=[seq_length] * batch_size,
inputs=z)
rnn_outputs_2d = tf.reshape(rnn_outputs, [-1, hidden_units_g])
logits_2d = tf.matmul(rnn_outputs_2d, W_out_G) + b_out_G
output_2d = tf.nn.tanh(logits_2d)
output_3d = tf.reshape(output_2d, [-1, seq_length, num_generated_features])
return output_3d
def discriminator(self, x, c=None, reuse=False):
with tf.variable_scope("discriminator") as scope:
# correct?
if reuse:
scope.reuse_variables()
# each step of the generator takes one time step of the signal to evaluate +
# its conditional embedding
# repeated_encoding = tf.tile(c, [1, tf.shape(x)[1]])
# repeated_encoding = tf.reshape(repeated_encoding, [tf.shape(x)[0], tf.shape(x)[1],
# cond_dim])
# decoder_input = tf.concat([repeated_encoding, x], 2)
cell = tf.contrib.rnn.LSTMCell(num_units=self.hidden_units_d, state_is_tuple=True,
reuse=tf.get_variable_scope().reuse)
rnn_outputs, rnn_states = tf.nn.dynamic_rnn(
cell=cell,
dtype=tf.float32,
inputs=x)
rnn_outputs_flat = tf.reshape(rnn_outputs, [-1, self.hidden_units_g])
logits = tf.matmul(rnn_outputs_flat, W_out_D) + b_out_D
# logits = tf.einsum('ijk,km', rnn_outputs, W_out_D) + b_out_D
output = tf.nn.sigmoid(logits)
return output, logits
# Latent Space Sampler
def sample_Z(self, batch_size, seq_length, latent_dim, use_time=False, use_noisy_time=False):
sample = np.float32( | np.random.normal(size=[batch_size, seq_length, latent_dim]) | numpy.random.normal |
from hyperopt import STATUS_OK
from hyperopt import hp
from timeit import default_timer as timer
import numpy as np
import lightgbm as lgb
from hyperopt import tpe
from hyperopt import Trials
from hyperopt import fmin
from sklearn.metrics import average_precision_score
from hyperopt.pyll.stochastic import sample
from sklearn.metrics import roc_auc_score, f1_score, precision_recall_curve, auc
def focal_isoform_binary_object(pred, dtrain, alpha=0.5, beta=0.0, gamma=2.0):
# alpha controls weight of positives
# (0,1) less
# >1 more or(0-0.5 less, 0.5-1 more)
# beta controls the shift of loss function
# >0 to left(less weight to well-trained samples)
# gamma controls the steepness of loss function
# >0
label = dtrain.get_label()
x = beta + (2.0 * label - 1) * gamma * pred
p = 1. / (1. + np.exp(-x))
# grad = (1 + (alpha - 1) * label) * (2 * label - 1) * (p - 1)
grad = (1 - label + (label * 2 - 1) * alpha) * (2 * label - 1) * (p - 1)
# hess = (1 + (alpha - 1) * label) * gamma * (1 - p) * p
hess = (1 - label + (label * 2 - 1) * alpha) * gamma * (1 - p) * p
return grad, hess
def lgb_auprc_score_sklearn(y_hat, data):
y_true = data.get_label()
precision, recall, _ = precision_recall_curve(y_true, y_hat)
return 'auprc', auc(recall, precision), True
def lgb_auprc_score(y_hat, data):
y_true = data.get_label()
# TODO try not to round yhat
# y_hat = np.round(y_hat) # scikits f1 doesn't like probabilities
return 'auprc', average_precision_score(y_true, y_hat), True
params = {
'boosting_type': 'gbdt',
# 'ignore_column': list(range(500)),
# 'boosting_type': 'dart',
# 'drop_rate': 0.3,
# 'max_drop': 50,
# 'skip_drop': 0.5,
# 'drop_seed': 6,
# 'pos_bagging_fraction': 0.001,
# 'neg_bagging_fraction': 0.001,
# 'bagging_freq': 10000,
# 'bagging_seed': 6,
'objective': 'binary',
# 'objective': focal_binary_object,
# 'metric': ['binary_error', 'binary_logloss', "auc"],
'metric': ["auc"],
# 'first_metric_only': True,
# 'is_unbalance': True,
# "scale_pos_weight": 100,
'metric_freq': 10,
'num_leaves': 31,
'min_data_in_leaf': 20,
# 'max_bin': 255,
'num_threads': 32,
'learning_rate': 0.1,
'feature_fraction': 1,
'boost_from_average': False,
'verbose': 1
}
evals_result = {}
gbm = lgb.train(params=params,
train_set=dtrain_subset,
num_boost_round=1000,
fobj=lambda x, y: focal_isoform_binary_object(x, y, alpha=0.5, beta=-1.5, gamma=1.01),
# fobj=lambda x,y:logistic_obj(x,y,imbalance_alpha=1.0),
valid_sets=[dtrain_subset,dtrain_subset_2],
valid_names=['test'],
feval=lgb_auprc_score,
# early_stopping_rounds=1,
evals_result=evals_result,
keep_training_booster=False,
learning_rates=lambda x: 0.2 * (0.98 ** x),
callbacks=[early_stopping(1, first_metric_only=False, verbose=True)]
# init_model=gbm
# init_model=init_gbm
)
N_FOLDS = 6
# dtrain = lgb.Dataset('train/GABPA/binary_files/lightGBM.all.MCF-7.chrom_set_test.bin').construct()
dtrain = lgb.Dataset('train/JUND/binary_files/lightGBM.all.HepG2.chrom_set_test.bin').construct()
subset_index = np.random.choice(np.arange(dtrain.num_data()), int(dtrain.num_data() / 10), replace=False)
dtrain_subset = dtrain.subset(subset_index).construct()
# subset_index = np.random.choice(np.arange(dtrain.num_data()), int(dtrain.num_data() / 10), replace=False)
# dtrain_subset_2 = dtrain.subset(subset_index).construct()
subset_index = np.random.choice(np.arange(dtrain_subset.num_data()), int(dtrain_subset.num_data() / 10), replace=False)
dtrain_subset_2 = dtrain_subset.subset(subset_index).construct()
from scripts.early_stopping_avg import early_stopping
cv_results = lgb.cv(params, dtrain_subset, num_boost_round=10000,
nfold=6,
fobj=lambda x, y: focal_isoform_binary_object(x, y, alpha=0.5, beta=1, gamma=1.01),
feval=lgb_auprc_score,
# early_stopping_rounds=20,
seed=6, verbose_eval=1,
callbacks=[early_stopping(1, first_metric_only=False, verbose=True)])
start = timer()
# cv_results = lgb.cv(params, dtrain_subset, num_boost_round=100,
# nfold=2,
# fobj=lambda x, y: focal_isoform_binary_object(x, y, alpha=0.5, beta=1, gamma=1.01),
# feval=lgb_auprc_score,
# # early_stopping_rounds=20,
# seed=6, verbose_eval=1,
# # callbacks=[early_stopping(1, first_metric_only=False, verbose=True)]
# )
evals_result = {}
gbm = lgb.train(params=params,
# train_set=dtrain_subset,
train_set=dtrain,
num_boost_round=100,
fobj=lambda x, y: focal_isoform_binary_object(x, y, alpha=0.5, beta=-1.5, gamma=1.01),
# fobj=lambda x,y:logistic_obj(x,y,imbalance_alpha=1.0),
# valid_sets=[dtrain_subset, dtrain_subset_2],
# valid_names=['train', 'test'],
# valid_sets=[dtrain_subset],
valid_sets=[dtrain],
valid_names=['train'],
feval=lgb_auprc_score,
early_stopping_rounds=1,
evals_result=evals_result,
keep_training_booster=False,
learning_rates=lambda x: 0.2 * (0.98 ** x),
# callbacks=[lgb.early_stopping(1, first_metric_only=False, verbose=True)]
# init_model=gbm
# init_model=init_gbm
)
run_time = timer() - start
print(run_time)
112.17675656080246
57.596633109264076
37.916644868440926
16.787454077973962
# def hyperopt_objective(argsDict, dtrain=dtrain, n_folds=6):
def hyperopt_objective(argsDict, dtrain=dtrain_subset, n_folds=3):
"""Objective function for Gradient Boosting Machine Hyperparameter Optimization"""
# Keep track of evals
global ITERATION
ITERATION += 1
# Make sure parameters that need to be integers are integers
for parameter_name in ['num_leaves',
'min_data_in_leaf',
# 'max_depth'
]:
argsDict[parameter_name] = int(argsDict[parameter_name])
start = timer()
params = {
'boosting_type': 'gbdt',
# 'ignore_column': list(range(500)),
# 'boosting_type': 'dart',
# 'drop_rate': 0.3,
# 'max_drop': 50,
# 'skip_drop': 0.5,
# 'drop_seed': 6,
# 'pos_bagging_fraction': 0.001,
# 'neg_bagging_fraction': 0.001,
# 'bagging_freq': 10000,
# 'bagging_seed': 6,
'objective': 'binary',
# 'objective': focal_binary_object,
# 'metric': ['binary_error', 'binary_logloss', "auc"],
# 'metric': ["auc"],
# 'first_metric_only': True,
# 'is_unbalance': True,
# "scale_pos_weight": 100,
# 'feature_fraction_bynode': True,
'metric_freq': 10,
'num_leaves': argsDict['num_leaves'],
'min_data_in_leaf': argsDict['min_data_in_leaf'],
# 'min_data_in_leaf': 20,
# 'max_depth': argsDict['max_depth'],
# 'min_sum_hessian_in_leaf': argsDict['min_sum_hessian_in_leaf'],
# 'bagging_fraction': argsDict['bagging_fraction'],
# 'feature_fraction': argsDict['feature_fraction'],
# 'lambda_l1': argsDict['lambda_l1'],
# 'lambda_l2': argsDict['lambda_l2'],
# 'max_bin': 255,
'num_threads': 32,
# 'learning_rate': argsDict['learning_rate'],
'learning_rate': 0.1,
'bagging_freq': 1,
'boost_from_average': False,
'verbose': -1
}
# Perform n_folds cross validation
# cv_results = lgb.cv(params, dtrain, num_boost_round=10000, nfold=n_folds,
# early_stopping_rounds=20, metrics='auc', seed=50)
cv_results = lgb.cv(params, dtrain, num_boost_round=300, nfold=n_folds,
fobj=lambda x, y: focal_isoform_binary_object(x, y,
# alpha=float(
# np.clip(argsDict['alpha'], 0.001, 0.999)),
alpha=1. / (1. + np.exp(
-argsDict['alpha_isoform'])),
beta=argsDict['beta'],
gamma=argsDict['gamma']),
feval=lgb_auprc_score,
early_stopping_rounds=20, seed=6,
# verbose_eval=10
)
run_time = timer() - start
# Extract the best score
best_score = np.max(cv_results['auprc-mean'])
# Loss must be minimized
loss = 1 - best_score
# Boosting rounds that returned the highest cv score
n_estimators = int(np.argmax(cv_results['auprc-mean']) + 1)
print('auprc:{} ITERATION:{} n_estimators:{} run_time:{}'.format(best_score, ITERATION, n_estimators, run_time),
end="\n")
# Dictionary with information for evaluation
return {'loss': loss,
'params': argsDict,
'iteration': ITERATION,
'estimators': n_estimators,
'train_time': run_time,
'status': STATUS_OK}
# return loss
# Define the search space
space = {
# 'class_weight': hp.choice('class_weight', [None, 'balanced']),
'num_leaves': hp.qloguniform('num_leaves', np.log(15), | np.log(1023) | numpy.log |
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 21 18:02:49 2017
@author: JonLee
"""
#==============================================================================
# Eyelink data plotter
#==============================================================================
# Add (to gui):
# Option to select potting of (in gaze map):
# start saccades
# end saccades
# Saccade traces
#
# fixation start
# fixation end
# fixation trace
#
# All trial data
#
# Color:
# Color option for lines and saccades (later)
#
# Bug fix:
# The scalling when using differetnt x/y values in imshow(gauss)
# Import modules
import matplotlib.pyplot as plt
import numpy as np
import astropy.convolution as krn
from matplotlib import cm
import matplotlib.patches as patches
import traceback
def uniqueRows(x):
y = np.ascontiguousarray(x).view(np.dtype((np.void, x.dtype.itemsize * x.shape[1])))
_, idx, counts = np.unique(y, return_index=True, return_counts = True)
uniques = x[idx]
return uniques, idx, counts
def plotTrial(timeStamp, xPos, yPos, euclidDist, **par):
try:
# Get constants
pltType = par.pop('pltType','gaze') # options: 'gaze', 'heat'
pltStyle = par.pop('pltStyle', 'Scatter') # scatter or line
pltBg = par.pop('pltBg', False)
bgImLoc = par.pop('bgImage' , False)
bgAspect = par.pop('bgAspect', 'equal') # 'auto','equal'
trial = par.pop('trial', 48)
dataScaling = par.pop('dataScaling', 5)
kernel = par.pop('kernel', 'Gaussian2DKernel')
kernelPar = par.pop('kernelPar', 25)
kernelCM = par.pop('kernelCM', 'hot')
kernelCMInverse = par.pop('kernelCMInverse', False)
kernelThreshold = par.pop('kernelThreshold', 0.3)
kernelAlpha = par.pop('kernelAlpha', 0.50)
xMax = par.pop('xMax', 1680)
xMin = par.pop('xMin', 0)
yMax = par.pop('yMax', 1050)
yMin = par.pop('yMin', 0)
included = par.pop('included', 'True')
highlight = par.pop('highlight', 'None')
addLabel = str(par.pop('addLabel', False))
addInfo = str(par.pop('addInfo', False))
xLabel = par.pop('xLabel', 'Pixel position')
ylabel = par.pop('yLabel', 'Pixel position')
speedLabel = par.pop('speedLabel', 'Speed')
figAx = par.pop('figAx', False)
if highlight == 'Saccade':
sHighL = par.pop('ssacc')
durHighL = par.pop('saccDur')
elif highlight == 'Fixation':
sHighL = par.pop('sFix')
durHighL = par.pop('fixDur')
elif highlight == 'None':
sHighL = par.pop('sFix', [])
durHighL = par.pop('fixDur', [])
#==========================================================================
# Plotting
#==========================================================================
#recalculateTime to zero for each trial
trialStart = timeStamp[0]
normTime = timeStamp - trialStart
if len(normTime) == len(xPos):
xTime = normTime
else:
xTime = np.arange(len(xPos))
# lets plot x position over time
ax1 = figAx[1]
#ax1.set_title('Xgaze(time)')
ax1.set_xlabel('Time (ms)')
ax1.set_ylabel(xLabel)
ax1.set_ylim([xMin,xMax])
if pltStyle == 'Line':
ax1.plot(xTime, xPos)
elif pltStyle == 'Scatter':
ax1.scatter(xTime, xPos,marker = 'p', s = 1)
ax1.set_xlim([xTime[0], xTime[-1]])
if highlight != 'None':
# Add rectangles for Saccades
for i in range(0,len(sHighL)):
ax1.add_patch(patches.Rectangle((sHighL[i] - trialStart, ax1.get_ylim()[0]),
durHighL[i],
abs(ax1.get_ylim()[1] - ax1.get_ylim()[0]),
fill=True, alpha = 0.3))
# lets plot y position over time
if len(normTime) == len(yPos):
yTime = normTime
else:
yTime = np.arange(len(yPos))
ax2 = figAx[2]
#ax2.set_title('Ygaze(time)')
ax2.set_xlabel('Time (ms)')
ax2.set_ylabel(ylabel)
ax2.set_ylim([yMin,yMax])
if pltStyle == 'Line':
ax2.plot(yTime, yPos)
elif pltStyle == 'Scatter':
ax2.scatter(yTime, yPos, marker = 'p', s = 1)
ax2.set_xlim([yTime[0], yTime[-1]])
if highlight != 'None':
# Add rectangles for Saccades
for i in range(0,len(sHighL)):
ax2.add_patch(patches.Rectangle((sHighL[i] - trialStart, ax2.get_ylim()[0]),
durHighL[i],
abs(ax2.get_ylim()[1] - ax2.get_ylim()[0]),
fill=True, alpha = 0.3))
# Lets plot speed over time (distance between points)
if len(normTime) == len(euclidDist):
speedTime = normTime
else:
speedTime = np.arange(len(euclidDist))
ax3 = figAx[3]
#ax3.set_title('Speed(time)')
ax3.set_xlabel('Time (ms)')
ax3.set_ylabel(speedLabel)
if pltStyle == 'Line':
ax3.plot(speedTime, euclidDist)
elif pltStyle == 'Scatter':
ax3.scatter(speedTime, euclidDist, marker = 'p', s = 1)
ax3.set_xlim([speedTime[0], speedTime[-1]])
ax3.set_ylim([np.min(euclidDist)-20,np.max(euclidDist)+20])
if highlight != 'None':
# Add rectangles for Saccades
for i in range(0,len(sHighL)):
ax3.add_patch(patches.Rectangle((sHighL[i] - trialStart, ax3.get_ylim()[0]),
durHighL[i],
abs(ax3.get_ylim()[1] - ax3.get_ylim()[0]),
fill=True, alpha = 0.3))
# Lets get make a timeseries to plot over time.
timeCol = np.linspace(1,0,len(xPos))
# Lets plot the gaze position during trial
ax4 = figAx[4]
#ax4.set_title('Gaze position')
ax4.set_xlabel('X position (px)')
ax4.set_ylabel('Y position (px)')
ax4.axis([xMin, xMax, yMin, yMax])
if pltType == 'gaze':
if pltBg == True:
bgIm = plt.imread(bgImLoc)
ax4.imshow(np.flipud(bgIm), aspect=bgAspect, extent = [xMin, xMax, yMin, yMax])
if pltStyle == 'Line':
ax4.plot(xPos, yPos)
elif pltStyle == 'Scatter':
ax4.scatter(xPos, yPos, c = timeCol, edgecolors = 'face', marker = 'p', s = 5, cmap = 'hot')
else:
if pltStyle == 'Line':
ax4.plot(xPos, yPos)
elif pltStyle == 'Scatter':
ax4.scatter(xPos, yPos,c = timeCol, edgecolors = 'face', marker = 'p', s = 5, cmap='hot')
ax4.set(aspect = bgAspect)
elif pltType == 'heat' :
#======================================================================
# Make gaussian image
#======================================================================
if pltBg == True:
bgIm = plt.imread(bgImLoc)
ax4.imshow( | np.flipud(bgIm) | numpy.flipud |
import tensorflow as tf
import sys
import numpy as np
import os
import logging
import parameters as pa
def load_label(csv_file):
"""
Label file is a csv file using number to mark gesture for each frame
example content: 0,0,0,2,2,2,2,2,0,0,0,0,0
:param csv_file:
:return: list of int
"""
with open(csv_file, 'r') as label_file:
labels = label_file.read()
labels = labels.split(",")
new_l = []
for l in labels:
new_l.append(int(l))
# Labels is a list of int, representing gesture for each frame
return new_l
def load_label_cls_ges(cls_file, ges_file, delay=0):
"""
Label file is a csv file using number to mark gesture for each frame
example content: 0,0,0,2,2,2,2,2,0,0,0,0,0
:param csv_file:
:return: list of int
"""
with open(cls_file, 'r') as cls:
cls_labels = cls.read()
with open(ges_file, 'r') as ges:
ges_labels = ges.read()
cls_labels = cls_labels.split(",")
print(len(cls_labels))
if delay!=0:
cls_pad = ['0'] * delay
cls_labels = cls_labels[:-1*delay]
cls_pad.extend(cls_labels)
else:
cls_pad = cls_labels
ges_labels = ges_labels.split(",")
print(len(ges_labels))
labels = []
for a, (cls, ges) in enumerate(zip(cls_pad, ges_labels)):
if cls == '0' or ges == '0':
labels.append(0)
else:
labels.append((int(cls)- 1) * 8 + int(ges))
# Labels is a list of int, representing gesture for each frame
return labels
def cooc_map_clip(npy_cooc, start):
clip_length = pa.time_step
cooc = np.load(npy_cooc)
cooc_cut = np.expand_dims(cooc[start:start + clip_length], -1)
return cooc_cut
def feat_clip(npy, start):
clip_length = pa.time_step
feat = np.load(npy)
feat_cut = feat[start:start + clip_length]
return feat_cut
def jc_label_clip(npy_joints, label_file, start):
"""
:param npy_joints: # feature sequence
:param label_file: # csv labels
:param clip_length: # clip frame length
:return:
"""
clip_length = pa.time_step
joints = np.load(npy_joints)
joints_cut = joints[start:start + clip_length]
labels = load_label(label_file)
labels = np.array(labels, dtype=np.int64)
assert len(labels) == joints.shape[0]
labels_cut = labels[start:start + clip_length]
labels_cut = labels_delay(labels_cut, pa.label_delay_frames)
return joints_cut, labels_cut
def label_clip(label_file, start, delay=pa.label_delay_frames):
"""
:param label_file: # csv labels
:param clip_length: # clip frame length
:return:
"""
clip_length = pa.time_step
labels = load_label(label_file)
labels = np.array(labels, dtype=np.int64)
labels_cut = labels[start:start + clip_length]
if delay != 0:
labels_cut = labels_delay(labels_cut, delay)
return labels_cut
def labels_delay(labels, delay_frames):
z = np.zeros((delay_frames), dtype=np.int64)
l = len(labels) # Original label len
labels = np.concatenate((z, labels)) # len: delay + origin
labels = labels[:l]
return labels
def extract_data_list(data_list, clip_length, label_list, interval = 15):
"""
:param data_list: the list file of train/val/test set, path defined in parameters.py
:return: zip of [video_no., frame_no.]
"""
video_frame_list = []
with open(data_list, 'r') as f:
videos = f.readlines()
for b, video_path in enumerate(videos):
video_base = os.path.basename(video_path).split(".")[0]
labels = load_label(os.path.join(pa.label_abs_folder, video_base+'.csv'))
frame_idx = np.arange(0, len(labels)-clip_length+1, interval)
video_idx = b * np.ones(len(labels)-clip_length+1, dtype=np.int32)
if label_list == ['ref_cls']:
ref_labels = load_label(os.path.join(pa.label_ref_folder, video_base+'.csv'))
frame_idx = [frame_idx[i] for i in range(len(frame_idx)) if ref_labels[i] > 0]
video_idx = [video_idx[i] for i in range(len(video_idx)) if ref_labels[i] > 0]
zipped_idx = zip(video_idx, frame_idx)
video_frame_list.extend(list(zipped_idx))
return video_frame_list
def extract_cooc_map(data_idx, batch_idx, batch_size, use_folder='train'):
if use_folder == 'train':
csv_list = pa.train_list
elif use_folder == 'val':
csv_list = pa.val_list
elif use_folder == 'test':
csv_list = pa.test_list
with open(csv_list, "r") as f:
csv_files = f.readlines()
btcooc = []
idxes = data_idx[batch_idx*batch_size:(batch_idx+1)*batch_size]
for b in range(batch_size):
idx = idxes[b]
label_path = csv_files[idx[0]][:-1]
base_name = label_path[-7:-4]
cooc_path = os.path.join(pa.rnn_saved_cooc_folder, base_name+'.npy')
cooc_cut = cooc_map_clip(cooc_path, idx[1])
btcooc.append(cooc_cut)
return np.asarray(btcooc)
def extract_jcla(data_idx, batch_idx, batch_size, use_folder='train'):
if use_folder == 'train':
csv_list = pa.train_list
elif use_folder == 'val':
csv_list = pa.val_list
elif use_folder == 'test':
csv_list = pa.test_list
with open(csv_list, "r") as f:
csv_files = f.readlines()
btjcla = []
idxes = data_idx[batch_idx*batch_size:(batch_idx+1)*batch_size]
for b in range(batch_size):
idx = idxes[b]
label_path = csv_files[idx[0]][:-1]
base_name = label_path[-7:-4]
jcla_path = os.path.join(pa.rnn_saved_jcla_folder, base_name+'.npy')
jcla_cut = feat_clip(jcla_path, idx[1])
btjcla.append(jcla_cut)
return np.asarray(btjcla)
def extract_jc_labels(data_idx, batch_idx, batch_size, labels, use_folder='train'):
if use_folder == 'train':
csv_list = pa.train_list
elif use_folder == 'val':
csv_list = pa.val_list
elif use_folder == 'test':
csv_list = pa.test_list
with open(csv_list, "r") as f:
csv_files = f.readlines()
btjc = [] # batch time joint coordinate
btl = []
idxes = data_idx[batch_idx*batch_size:(batch_idx+1)*batch_size]
for b in range(batch_size):
idx = idxes[b]
label_path = csv_files[idx[0]][:-1]
base_name = label_path[-7:-4]
joints_path = os.path.join(pa.rnn_saved_joints_folder, base_name + '.npy')
if labels == ['ges']:
label_path = os.path.join(pa.label_ges_folder, base_name+'.csv')
elif labels == ['abs_cls']:
label_path = os.path.join(pa.label_abs_folder, base_name + '.csv')
elif labels == ['cmd_cls']:
label_path = os.path.join(pa.label_cmd_folder, base_name + '.csv')
elif labels == ['com_cls']:
label_path = os.path.join(pa.label_com_folder, base_name + '.csv')
joints_cut, labels_cut = jc_label_clip(joints_path, label_path, idx[1])
btl.append(labels_cut)
btjc.append(joints_cut)
return np.asarray(btjc), np.asarray(btl)
def extract_labels(data_idx, batch_idx, batch_size, labels, use_folder='train'):
if use_folder == 'train':
csv_list = pa.train_list
elif use_folder == 'val':
csv_list = pa.val_list
elif use_folder == 'test':
csv_list = pa.test_list
with open(csv_list, "r") as f:
csv_files = f.readlines()
btl = []
idxes = data_idx[batch_idx*batch_size:(batch_idx+1)*batch_size]
for b in range(batch_size):
idx = idxes[b]
label_path = csv_files[idx[0]][:-1]
base_name = label_path[-7:-4]
if 'ges' in labels:
ges_path = os.path.join(pa.label_ges_folder, base_name+'.csv')
elif 'abs_cls' in labels:
ges_path = os.path.join(pa.label_abs_folder, base_name+'.csv')
elif 'cmd_cls' in labels:
ges_path = os.path.join(pa.label_cmd_folder, base_name+'.csv')
elif 'com_ges' in labels:
ges_path = os.path.join(pa.label_com_folder, base_name + '.csv')
labels_cut = label_clip(ges_path, idx[1])
btl.append(labels_cut)
return | np.asarray(btl) | numpy.asarray |
# coding: utf-8
#
# Project: X-ray image reader
# https://github.com/silx-kit/fabio
#
#
# Copyright (C) European Synchrotron Radiation Facility, Grenoble, France
#
# Principal author: <NAME> (<EMAIL>)
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
"""Authors: <NAME> & <NAME>
Center for Fundamental Research: Metal Structures in Four Dimensions
Risoe National Laboratory
Frederiksborgvej 399
DK-4000 Roskilde
email:<EMAIL>
<NAME>, ESRF, Grenoble, France
Sigmund Neher, GWDG, Göttingen, Germany
"""
# get ready for python3
from __future__ import absolute_import, print_function, with_statement, division
__authors__ = ["<NAME>" , "<NAME>", "<NAME>",
"<NAME>", "<NAME>" ]
__date__ = "05/09/2016"
__status__ = "production"
__copyright__ = "2007-2009 Risoe National Laboratory; 2015-2016 ESRF, 2016 GWDG"
__licence__ = "MIT"
import numpy
import logging
import os
from math import ceil
logger = logging.getLogger("bruker100image")
try:
from PIL import Image
except ImportError:
logger.warning("PIL is not installed ... trying to do without")
Image = None
from .brukerimage import BrukerImage
from .readbytestream import readbytestream
from .fabioutils import pad, StringTypes
class Bruker100Image(BrukerImage):
bpp_to_numpy = {1: numpy.uint8,
2: numpy.uint16,
4: numpy.int32}
version = 100
def __init__(self, data=None, header=None):
BrukerImage.__init__(self, data, header)
self.nover_one = self.nover_two = 0
def _readheader(self, infile):
"""
The bruker format uses 80 char lines in key : value format
In the first 512*5 bytes of the header there should be a
HDRBLKS key, whose value denotes how many 512 byte blocks
are in the total header. The header is always n*5*512 bytes,
otherwise it wont contain whole key: value pairs
"""
line = 80
blocksize = 512
nhdrblks = 5 # by default we always read 5 blocks of 512
self.__headerstring__ = infile.read(blocksize * nhdrblks).decode("ASCII")
self.header = self.check_header()
for i in range(0, nhdrblks * blocksize, line):
if self.__headerstring__[i: i + line].find(":") > 0:
key, val = self.__headerstring__[i: i + line].split(":", 1)
key = key.strip() # remove the whitespace (why?)
val = val.strip()
if key in self.header:
# append lines if key already there
self.header[key] = self.header[key] + os.linesep + val
else:
self.header[key] = val
# we must have read this in the first 5*512 bytes.
nhdrblks = int(self.header['HDRBLKS'])
self.header['HDRBLKS'] = nhdrblks
# Now read in the rest of the header blocks, appending
self.__headerstring__ += infile.read(blocksize * (nhdrblks - 5)).decode("ASCII")
for i in range(5 * blocksize, nhdrblks * blocksize, line):
if self.__headerstring__[i: i + line].find(":") > 0: # as for first 512 bytes of header
key, val = self.__headerstring__[i: i + line].split(":", 1)
key = key.strip()
val = val.strip()
if key in self.header:
self.header[key] = self.header[key] + os.linesep + val
else:
self.header[key] = val
# set the image dimensions
self.dim1 = int(self.header['NROWS'].split()[0])
self.dim2 = int(self.header['NCOLS'].split()[0])
self.version = int(self.header.get('VERSION', "100"))
def toPIL16(self, filename=None):
if not Image:
raise RuntimeError("PIL is not installed !!! ")
if filename:
self.read(filename)
PILimage = Image.frombuffer("F", (self.dim1, self.dim2), self.data, "raw", "F;16", 0, -1)
return PILimage
def read(self, fname, frame=None):
'''data is stored in three blocks: data (uint8), overflow (uint32), underflow (int32). The blocks are
zero paded to a multiple of 16 bits '''
with self._open(fname, "rb") as infile:
try:
self._readheader(infile)
except:
raise
rows = self.dim1
cols = self.dim2
npixelb = int(self.header['NPIXELB'][0])
# you had to read the Bruker docs to know this!
# We are now at the start of the image - assuming bruker._readheader worked
# Get image block size from NPIXELB.
# The total size is nbytes * nrows * ncolumns.
self.data = readbytestream(infile, infile.tell(), rows, cols, npixelb,
datatype="int", signed='n', swap='n')
# now process the overflows
for k, nover in enumerate(self.header['NOVERFL'].split()):
if k == 0:
# read the set of "underflow pixels" - these will be completely disregarded for now
continue
nov = int(nover)
if nov <= 0:
continue
bpp = 1 << k # (2 ** k)
datatype = self.bpp_to_numpy[bpp]
# upgrade data type
self.data = self.data.astype(datatype)
# pad nov*bpp to a multiple of 16 bytes
nbytes = (nov * bpp + 15) & ~(15)
# Multiple of 16 just above
data_str = infile.read(nbytes)
# ar without zeros
ar = numpy.fromstring(data_str[:nov * bpp], datatype)
# insert the the overflow pixels in the image array:
lim = (1 << (8 * k)) - 1
# generate an array comprising of the indices into data.ravel()
# where its value equals lim.
flat = self.data.ravel()
mask = | numpy.where(flat >= lim) | numpy.where |
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
import cv2
from skimage import metrics
import os
fake_dirs = ['results/psnr/fake_B_{}'.format(i) for i in range(5, 205, 5)]
images = [img.split('.')[0] for img in os.listdir(fake_dirs[0]) ]
res = []
for image in images:
if not 'checkpoint' in image:
res.append(image.split('_fake')[0])
gts = [ np.asarray(Image.open('datasets/sample/trainB/'+image+'.jpg').resize((256, 256))) for image in res]
images = [[ np.asarray(Image.open(fake+'/'+image+'_fake_B.png')) for fake in fake_dirs ] for image in res]
psnr = []
ssim = []
hist_r = []
hist_g = []
hist_b = []
for i, gt in enumerate(gts):
actual_psnr = []
actual_ssim = []
actual_hist_r = []
actual_hist_g = []
actual_hist_b = []
for j in range(len(images[i])):
psnr_value = metrics.peak_signal_noise_ratio(gt, images[i][j])
ssim_value = metrics.structural_similarity(gt, images[i][j], multichannel=True)
color = ('b','g','r')
gt_hst = {}
img_hst = {}
for channel,col in enumerate(color):
gt_hst[col] = cv2.calcHist([gt],[channel],None,[256],[0,256])
img_hst[col] = cv2.calcHist([images[i][j]],[channel],None,[256],[0,256])
hist_value_b = cv2.compareHist(gt_hst['b'], img_hst['b'], cv2.HISTCMP_BHATTACHARYYA )
hist_value_g = cv2.compareHist(gt_hst['g'], img_hst['g'], cv2.HISTCMP_BHATTACHARYYA )
hist_value_r = cv2.compareHist(gt_hst['r'], img_hst['r'], cv2.HISTCMP_BHATTACHARYYA )
actual_psnr.append(psnr_value)
actual_ssim.append(ssim_value)
actual_hist_r.append(hist_value_r)
actual_hist_g.append(hist_value_g)
actual_hist_b.append(hist_value_b)
psnr.append(np.array(actual_psnr))
ssim.append(np.array(actual_ssim))
hist_r.append(np.array(actual_hist_r))
hist_g.append(np.array(actual_hist_g))
hist_b.append(np.array(actual_hist_b))
psnr = np.array(psnr)
ssim = np.array(ssim)
hist_r = np.array(hist_r)
hist_g = | np.array(hist_g) | numpy.array |
import unittest
import numpy as np
from openmdao.utils.assert_utils import assert_near_equal
from wisdem.optimization_drivers.dakota_driver import DakotaOptimizer
try:
import dakota
except ImportError:
dakota = None
@unittest.skipIf(dakota is None, "only run if Dakota is installed.")
class TestDakotaOptimization(unittest.TestCase):
def test_2D_opt_max_iterations(self):
bounds = {"x": np.array([[0.0, 1.0], [0.0, 1.0]])}
desvars = {"x": np.array([0.0, 0.25])}
outputs = ["y"]
template_dir = "template_dir/"
model_string = "from weis.multifidelity.models.testbed_components import simple_2D_high_model as model"
output_scalers = [1.0]
options = {"method": "coliny_cobyla", "max_function_evaluations": 3}
opt = DakotaOptimizer(template_dir)
results = opt.optimize(desvars, outputs, bounds, model_string, output_scalers, options)
assert_near_equal(np.min(np.array(results["y"])), -9.5)
def test_2D_opt_EGO(self):
bounds = {"x": np.array([[0.0, 1.0], [0.0, 1.0]])}
desvars = {"x": | np.array([0.0, 0.25]) | numpy.array |
import numpy as np
import datetime as dt
from os import listdir, path
def gather_mats(
split_mat, avg_5_mat, avg_25_mat, avg_50_mat, dates_mat, min_year
):
"""
Collects chosen columns from split and avg matrices and adds dates_mat
indicator data for each row (each day).
:param split_mat: original company data matrix
:param avg_5_mat: matrix with EMA of length 5 of closing prices
:param avg_25_mat: matrix with EMA of length 25 of closing prices
:param avg_50_mat: matrix with EMA of length 50 of closing prices
:param dates_mat: matrix of profit indicators for each date
:return: matrix of gathered data
"""
# Gather matrix columns indices.
gather_split_i = 0
gather_avg_5_i = 1
gather_avg_25_i = 2
gather_avg_50_i = 3
gather_volume_i = 4
gather_dates_indicator_i = 5
# Indices of date fragment columns in split matrix.
dates_indices = [1, 2, 3]
# Indices of elements in dates matrix.
all_i = 0
profit_i = 1
# Index of close price column and volume column.
close_i = 5
volume_i = 6
# Number of gathered values. Original close price + 3 averages profit
# indicator and volume will be collected.
gathered_row_len = 6
# Create gathered mat with row count of avg_50_mat as it is the shortest
# of all input matrices.
gathered_mat = np.zeros([avg_50_mat.shape[0], gathered_row_len])
for i in range(avg_50_mat.shape[0]):
# Gather split, avg_5, avg_25, avg_50 and volume columns.
gathered_mat[-(i + 1), gather_split_i] = split_mat[-(i + 1), close_i]
gathered_mat[-(i + 1), gather_avg_5_i] = avg_5_mat[-(i + 1), close_i]
gathered_mat[-(i + 1), gather_avg_25_i] = avg_25_mat[-(i + 1), close_i]
gathered_mat[-(i + 1), gather_avg_50_i] = avg_50_mat[-(i + 1), close_i]
gathered_mat[-(i + 1), gather_volume_i] = split_mat[-(i + 1), volume_i]
# Construct the date of current row and access dates matrix indicator.
date = dt.date(*(split_mat[-(i + 1), dates_indices].astype(np.int32)))
all_count = dates_mat[
date.year - min_year, date.month - 1,
date.day - 1, all_i
]
profit_count = dates_mat[
date.year - min_year, date.month - 1,
date.day - 1, profit_i
]
# Set indicator column element of current row to calculated indicator.
gathered_mat[-(i + 1), gather_dates_indicator_i] = profit_count / \
all_count
return gathered_mat
def label_mat(mat):
"""
Assign labels to each row of gathered matrix.
:param mat: previously gathered matrix
:return: labels for gathered matrix rows
"""
# Index and range of average used for labeling.
gather_avg_25_i = 2
avg_range = 25
# Labels for rising and falling price.
rising_i = 1
falling_i = 0
num_classes = 2
labels = | np.zeros([mat.shape[0] - avg_range + 1, num_classes]) | numpy.zeros |
import joblib
import numpy as np
import pandas as pd
import streamlit as st
APP_FILE = "app.py"
MODEL_JOBLIB_FILE = "model.joblib"
def main():
"""This function runs/ orchestrates the Machine Learning App Registry"""
st.markdown(
"""
# Machine Learning App
The main objective of this app is building a customer segmentation based on credit card
payments behavior during the last six months to define marketing strategies.
You can find the source code for this project in the following [Github repository](https://github.com/andreshugueth/credit_card_clustering).
"""
)
html_temp = """
<div style="text-align: right"> <strong> Author: </strong> <a href=https://www.linkedin.com/in/carlosbarros7/ target="_blank"><NAME></a> </div>
"""
st.markdown(html_temp, unsafe_allow_html=True)
st.markdown('## Dataset')
if st.checkbox('Show sample data'):
st.write(show_data)
customer_predictor()
def customer_predictor():
"""## Customer predictor
A user may have to input data about the customer's finances to predict which cluster he belongs to.
"""
st.markdown("## Customer segmentation model based on credit behavior")
balance = st.number_input("Balance")
purchases = st.number_input("Purchases")
cash_advance = st.number_input("Cash Advance")
credit_limit = st.number_input("Credit Limit")
payments = st.number_input("Payments")
prediction = 0
if st.button("Predict"):
model = joblib.load(MODEL_JOBLIB_FILE)
features = [balance, purchases, cash_advance, credit_limit, payments]
final_features = [ | np.array(features) | numpy.array |
import numpy as np
import cochlea
import scipy.signal
#import matplotlib.pyplot as plt
def peripheralSpikes(sound, par, fs = -1):
if fs == -1:
fs = par['periphFs']
anfTrains = cochlea.run_zilany2014(sound, fs,
anf_num = [60, 25, 15],
cf = par['cochChanns'],
species = 'human', seed = 0);
return(anfTrains)
def peripheral(sound, par, fs = -1):
if fs == -1:
fs = par['periphFs']
ANRts = cochlea.run_zilany2014_rate(sound, fs,
anf_types = ('lsr', 'msr', 'hsr'),
cf = par['cochChanns'],
species = 'human',
cohc = 1,
cihc = 1)
ANRts = .6 * ANRts['hsr'] + .25 * ANRts['msr'] + .15 * ANRts['lsr']
if par['subCortFs'] == fs:
p = ANRts.get_values() / par['subCortFs']
else:
resampleN = len(sound) * par['subCortFs'] / fs
p = scipy.signal.resample(ANRts, num = resampleN) / par['subCortFs']
p[p < 0] = 0
p[p > 1] = 1
return(p)
def subcortical(prob, lagSpace, par):
# Processing constants
dt = 1./par['subCortFs']
timeSpace = np.arange(start = dt, stop = (len(prob) + 1) * dt, step = dt)
if par['SACFTau'] <= 0:
tauFactor = 2 # [Wiegriebe2000]
taus = tauFactor * lagSpace
taus = np.maximum(taus, 0.0025) # [Wiegriebe2000]
else:
taus = par['SACFTau'] * np.ones(lagSpace.shape)
# Initalising variables
a = np.zeros((len(lagSpace), par['cochChanns'][2]))
B0 = np.zeros((len(lagSpace), par['cochChanns'][2]))
B = np.zeros(len(lagSpace))
z0 = np.zeros(par['cochChanns'][2])
z = np.zeros(len(prob))
k = 0.5 * np.ones(len(prob))
C = np.zeros((len(prob), len(lagSpace)))
for ti in range(1, len(prob)):
# SACF
for li in range(len(lagSpace)):
if (timeSpace[ti - 1] - lagSpace[li] - par['solvOnset']) > dt:
tiL = int(max(round(ti - par['subCortFs'] * lagSpace[li]), 1))
a[li] = prob[ti] * prob[tiL] * par['subCortFs']
B0 = B0 * np.exp(-dt / np.tile(taus, (1, par['cochChanns'][2])))
B0 = B0 * dt / np.tile(taus, (1, par['cochChanns'][2])) + a
B0[B0 < 0] = 0
B = B + (dt / par['subCortTau']) * (B0.sum(1) - B)
if par['regularise'] == 1:
# Normalisation factor (multiplicative)
a0 = (prob[ti]**2) * par['subCortFs']
z0 = z0 * np.exp(-dt / taus.min()) * (dt / taus.min()) + a0
z0[z0 < 0] = 0
z[ti] = z[ti-1] + (dt/par['subCortTau']) * (z0.sum(0) - z[ti-1])
# Normalisation factor (additive)
sInd = np.argmin((timeSpace[ti - 1] - 1.25 * lagSpace)**2)
if sInd > (len(lagSpace)):
k[ti] = 0.5
else:
k[ti] = B[sInd:].mean() / (z[ti] + 0.01)
if z[ti] > 5:
C[ti] = B / (z[ti] + 0.01)
else:
C[ti] = (B - 1 / (z[ti] + 0.1)) / (z[ti] + 0.01)
else:
C[ti] = B
z[ti] = 0
k[ti] = 0
# Recomputing additive normalisation factor k and multiplicative gain A0
if (par['regularise'] == 1):
if (par['SACFGround'] < 0):
if (len(prob) * dt < 0.075):
print('Warning: dur(stim) < 75ms; Using default baseline 0.5')
k0 = 0.5
else:
k0 = np.mean(k[int(0.05/dt) : int(min(0.10/dt, len(prob)))])
k[0:int(np.ceil(0.075 / dt))] = k0
for ti in range(1, len(prob)):
k[ti] = k[ti-1] + (dt/par['subCortTau']) * (k[ti] - k[ti-1])
else:
k[:] = par['SACFGround']
kMat = np.transpose(np.tile(k, [len(lagSpace), 1]))
A0 = par['mu0'] / | np.maximum(0.1, 1 - kMat) | numpy.maximum |
from aiida.orm import Code, DataFactory, WorkflowFactory
from aiida.orm.workflow import Workflow
from aiida.orm.calculation.inline import make_inline
#from aiida.workflows.wf_phonon import WorkflowPhonon
from aiida.orm import load_node, load_workflow
import numpy as np
WorkflowPhonon = WorkflowFactory('wf_phonon')
StructureData = DataFactory('structure')
ParameterData = DataFactory('parameter')
ArrayData = DataFactory('array')
def thermal_expansion(volumes, electronic_energies, gruneisen, stresses=None, t_max=1000, t_step=10):
fit_ve = np.polyfit(volumes, electronic_energies, 2)
test_volumes = np.arange(volumes[0] * 0.8, volumes[0] * 1.2, volumes[0] * 0.01)
electronic_energies = np.array([np.polyval(fit_ve, i) for i in test_volumes])
gruneisen.set_thermal_properties(test_volumes, t_min=0, t_max=t_max, t_step=t_step)
tp = gruneisen.get_thermal_properties()
gruneisen.get_phonon()
normalize = gruneisen.get_phonon().unitcell.get_number_of_atoms() / gruneisen.get_phonon().primitive.get_number_of_atoms()
free_energy_array = []
cv_array = []
entropy_array = []
total_free_energy_array = []
for energy, tpi in zip(electronic_energies, tp.get_thermal_properties()):
temperatures, free_energy, entropy, cv = tpi.get_thermal_properties()
free_energy_array.append(free_energy)
entropy_array.append(entropy)
cv_array.append(cv)
total_free_energy_array.append(free_energy / normalize + energy)
total_free_energy_array = | np.array(total_free_energy_array) | numpy.array |
from ceo.tools import ascupy
from ceo.pyramid import Pyramid
import numpy as np
import cupy as cp
from scipy.ndimage import center_of_mass
class PyramidWFS(Pyramid):
def __init__(self, N_SIDE_LENSLET, N_PX_LENSLET, modulation=0.0, N_GS=1, throughput=1.0, separation=None):
Pyramid.__init__(self)
self._ccd_frame = ascupy(self.camera.frame)
self._SUBAP_NORM = 'MEAN_FLUX_PER_SUBAP'
self.camera.photoelectron_gain = throughput
def calibrate(self, src, calib_modulation=10.0, calib_modulation_sampling=64, percent_extra_subaps=0.0, thr=0.0):
"""
Perform the following calibration tasks:
1) Acquire a CCD frame using high modulation (default: 10 lambda/D);
2) Estimate center of the four sub-pupil images;
3) Calibrate an initial pupil registration assuming a circular pupil.
4) Refines pupil registration by selecting only sub-apertures with flux above threshold thr.
5) Stores pupil registration in self._indpup
6) Computes and stores the reference slope null vector for a flat WF
Parameters
----------
src : Source
The Source object used for Pyramid sensing
gmt : GMT_MX
The GMT object
calib_modulation: modulation radius applied during calibration (default 10 lambda/D).
percent_extra_subaps: percent of extra subapertures across the pupil for initial pupil registration (default: 0).
thr : Threshold for pupil registration refinement: select only SAs with flux percentage above thr.
"""
#-> Insert a sub-pupil image into a CCD frame
def insert_subpupil(this_frame, this_pup):
fr = np.zeros((nx,ny))
sz = this_frame.shape
fr[yra[this_pup][0]:yra[this_pup][1]+1,xra[this_pup][0]:xra[this_pup][1]+1] = this_frame
return fr
#-> Acquire CCD frame applying high modulation:
self.reset()
cl_modulation = self.modulation # keep track of selected modulation radius
cl_modulation_sampling = self.modulation_sampling
self.modulation = calib_modulation
self.modulation_sampling = calib_modulation_sampling
self.propagate(src)
ccd_frame = self._ccd_frame.get()
self.modulation = cl_modulation
self.modulation_sampling = cl_modulation_sampling
#-> Find center of four sup-pupil images:
nx, ny = ccd_frame.shape
x = np.linspace(0, nx-1, nx)
y = np.linspace(0, ny-1, ny)
xx, yy = np.meshgrid(x, y)
mqt1 = np.logical_and(xx< (nx/2), yy< (ny/2)) # First quadrant (lower left)
mqt2 = | np.logical_and(xx>=(nx/2), yy< (ny/2)) | numpy.logical_and |
import os, sys
sys.path.append(os.getcwd())
import numpy as np
import tensorflow as tf
import time
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from tensorflow import layers
from keras.datasets import mnist
from keras.datasets import fashion_mnist
num_epochs = 200
BATCH_SIZE = 64
TRAINING_RATIO = 5 # The training ratio is the number of discriminator updates per generator update. The paper uses 5.
GRADIENT_PENALTY_WEIGHT = 10 # As per the paper
OUTPUT_DIM = 784
disc_iters = 5
num_labels = 10
latent_dim = 128
DIM = 64
channel_first = False
def generate_images(images, epoch):
# output gen: (-1,1) --> (-127.5, 127.5) --> (0, 255)
# shape 10x784
# plt.figure()
plt.figure(figsize=(100, 10))
test_image_stack = np.squeeze((np.array(images, dtype=np.float32) * 127.5) + 127.5)
for i in range(10):
new_image = test_image_stack[i].reshape(28, 28)
plt.subplot(1, 10, i + 1)
plt.axis("off")
plt.imshow(new_image)
plt.axis("off")
plt.axis("off")
plt.savefig("epoch_" + str(epoch) + ".png")
def generator(n_samples, noise_with_labels, reuse=None):
"""
:param n_samples: number of samples
:param noise_with_labels: latent noise + labels
:return: generated images
"""
with tf.variable_scope('Generator', reuse=reuse): # Needed for later, in order to get variables of discriminator
# ----- Layer1, Dense, Batch, Leaky ----- #
alpha = 0.01
output = layers.dense(inputs=noise_with_labels, units=4 * 4 * 4 * 64)
output = layers.batch_normalization(output)
output = tf.maximum(alpha * output, output)
if channel_first:
# size: 128 x 7 x 7
output = tf.reshape(output, (-1, 4 * 64, 4, 4))
bn_axis = 1 # [0, 2, 3] # first
else:
# size: 7 x 7 x 128
output = tf.reshape(output, (-1, 4, 4, 4 * 64))
bn_axis = -1 # [0, 1, 2] # last
# ----- Layer2, deConv, Batch, Leaky ----- #
output = layers.conv2d_transpose(output, filters=4 * DIM, kernel_size=(5, 5), strides=2, padding='same')
output = layers.batch_normalization(output, axis=bn_axis)
output = tf.maximum(alpha * output, output)
if channel_first:
output = output[:, :, :7, :7]
else:
output = output[:, :7, :7, :]
# ----- Layer3, deConv, Batch, Leaky ----- #
output = layers.conv2d_transpose(output, filters=2 * DIM, kernel_size=(5, 5), strides=2, padding='same')
output = layers.batch_normalization(output, axis=bn_axis)
output = tf.maximum(alpha * output, output)
# ----- Layer4, deConv, Batch, Leaky ----- #
output = layers.conv2d_transpose(output, filters=DIM, kernel_size=(5, 5), strides=2, padding='same')
output = layers.batch_normalization(output, axis=bn_axis)
output = tf.maximum(alpha * output, output)
# ----- Layer5, deConv, Batch, Leaky ----- #
output = layers.conv2d_transpose(output, filters=1, kernel_size=(5, 5), strides=1, padding='same')
output = tf.nn.tanh(output)
output = tf.reshape(output, [-1, OUTPUT_DIM])
print('Generator output size:')
print(output)
return output
def discriminator(images, reuse=None):
"""
:param images: images that are input of the discriminator
:return: likeliness of the image
"""
with tf.variable_scope('Discriminator', reuse=reuse): # Needed for later, in order to get variables of generator
if channel_first:
output = tf.reshape(images, [-1, 1, 28, 28])
else:
output = tf.reshape(images, [-1, 28, 28, 1])
# ----- Layer1, Conv, Leaky ----- #
alpha = 0.01
output = layers.conv2d(output, filters=DIM, kernel_size=(5, 5), strides=2, padding='same')
output = tf.maximum(alpha * output, output)
# ----- Layer2, Conv, Leaky ----- #
output = layers.conv2d(output, filters=2 * DIM, kernel_size=(5, 5), strides=2, padding='same')
output = tf.maximum(alpha * output, output)
# ----- Layer3, Conv, Leaky ----- #
output = layers.conv2d(output, filters=4 * DIM, kernel_size=(5, 5), strides=2, padding='same')
output = tf.maximum(alpha * output, output)
output = tf.reshape(output, [-1, 4 * 4 * 4 * DIM])
# ----- Layer4, Dense, Linear ----- #
output = layers.dense(output, units=11)
print('Discriminator output size:')
print(output)
scores_out = tf.identity(output[:, :1], name='scores_out')
labels_out = tf.identity(output[:, 1:], name='labels_out')
return scores_out, labels_out
def get_trainable_variables():
"""
:return: trainable variables (d_vars, g_vars)
"""
tvars = tf.trainable_variables()
d_vars = [var for var in tvars if 'Discriminator' in var.name]
g_vars = [var for var in tvars if 'Generator' in var.name]
return d_vars, g_vars
# --------------------------------- Placeholders ------------------------------- #
# GENERATOR
# ----- Noise + Labels(G) ----- #
input_generator = tf.placeholder(tf.float32, shape=[BATCH_SIZE, latent_dim + num_labels])
test_input = tf.placeholder(tf.float32, shape=[10, latent_dim + num_labels])
# DISCRIMINATOR
# ------ Real Samples(D) ------ #
real_samples = tf.placeholder(tf.float32, shape=[BATCH_SIZE, OUTPUT_DIM])
# -------- Labels(D) ---------- #
labels = tf.placeholder(tf.float32, shape=[BATCH_SIZE, num_labels])
# ----------------------------------- Outputs ----------------------------------- #
fake_samples = generator(BATCH_SIZE, input_generator)
test_samples = generator(10, test_input, reuse=True)
disc_real_score, disc_real_labels = discriminator(real_samples)
disc_fake_score, disc_fake_labels = discriminator(fake_samples, reuse=True)
# Trainable variables
d_vars, g_vars = get_trainable_variables()
# ---------------------------------- Losses ------------------------------------ #
# - - - - - Gen Loss - - - - - #
# wasserstein gen
gen_wasserstein_loss = -tf.reduce_mean(disc_fake_score) # WASSERSTEIN
# labels gen
labels_penalty_fakes = tf.nn.softmax_cross_entropy_with_logits(labels=labels, # (deprecated)
logits=disc_fake_labels)
generator_loss = gen_wasserstein_loss + labels_penalty_fakes
# - - - - - Disc Loss - - - - - #
# wasserstein disc
disc_wasserstein_loss = tf.reduce_mean(disc_fake_score) - tf.reduce_mean(disc_real_score)
# labels disc
labels_penalty_fakes = tf.nn.softmax_cross_entropy_with_logits(labels=labels, # (deprecated)
logits=disc_fake_labels)
labels_penalty_real = tf.nn.softmax_cross_entropy_with_logits(labels=labels, # (deprecated)
logits=disc_real_labels)
# gradient penalty disc
alpha = tf.random_uniform(shape=[BATCH_SIZE, 1], minval=0., maxval=1.)
differences = fake_samples - real_samples
interpolates = real_samples + alpha * differences
gradients = tf.gradients(discriminator(interpolates, reuse=True)[0], [interpolates])[0]
slopes = tf.sqrt(tf.reduce_sum(tf.square(gradients), reduction_indices=[1]))
gradient_penalty = tf.reduce_mean((slopes - 1.) ** 2)
# sum losses
fake_labels_weight = 0.1
discriminator_loss = disc_wasserstein_loss + fake_labels_weight * labels_penalty_fakes + labels_penalty_real + gradient_penalty
# ---------------------------------- Optimizers ----------------------------------- #
generator_optimizer = tf.train.AdamOptimizer(learning_rate=1e-4,
beta1=0.5,
beta2=0.9).minimize(generator_loss, var_list=g_vars)
discriminator_optimizer = tf.train.AdamOptimizer(learning_rate=1e-4,
beta1=0.5,
beta2=0.9).minimize(discriminator_loss, var_list=d_vars)
# -------------------------------- Load Dataset ---------------------------------- #
# (X_train, y_train), (X_test, y_test) = mnist.load_data()
(X_train, y_train), (X_test, y_test) = fashion_mnist.load_data()
X_train = np.reshape(X_train, newshape=[-1, OUTPUT_DIM])
X_test = np.reshape(X_test, newshape=[-1, OUTPUT_DIM])
X_train = np.concatenate((X_train, X_test), axis=0)
X_train = (X_train - 127.5) / 127.5
y_train = np.concatenate((y_train, y_test), axis=0)
y_hot = np.zeros((y_train.shape[0], 10))
b = np.arange(y_train.shape[0])
y_hot[b, y_train] = 1
y_train = y_hot
# ------------------------------------ Train ------------------------------------- #
with tf.Session() as session:
session.run(tf.global_variables_initializer())
indices = np.arange(X_train.shape[0])
# big batch size
macro_batches_size = BATCH_SIZE * disc_iters
# num of batches
num_macro_batches = int((X_train.shape[0]) // macro_batches_size)
discriminator_history = []
generator_history = []
# EPOCHS FOR
for iteration in range(num_epochs):
start_time = time.time()
print("epoch: ", iteration)
np.random.shuffle(indices)
X_train = X_train[indices]
y_train = y_train[indices]
# MACRO BATCHES FOR
for i in range(num_macro_batches): # macro batches
# print("macro_batch: ", i)
discriminator_macro_batches = X_train[i * macro_batches_size:(i + 1) * macro_batches_size]
labels_macro_batches = y_train[i * macro_batches_size:(i + 1) * macro_batches_size]
noise_macro_batches = np.random.rand(macro_batches_size, latent_dim)
disc_cost_sum = 0
if i % (num_macro_batches // 10) == 0:
# print progress
print(100 * i // num_macro_batches, '%')
# (MICRO) BATCHES FOR
for j in range(disc_iters): # batches
# print("micro batches: ", j)
# DISCRIMINATOR TRAINING
img_samples = discriminator_macro_batches[j * BATCH_SIZE:(j + 1) * BATCH_SIZE]
img_labels = labels_macro_batches[j * BATCH_SIZE:(j + 1) * BATCH_SIZE]
noise = noise_macro_batches[j * BATCH_SIZE:(j + 1) * BATCH_SIZE]
discriminator_labels_with_noise = np.concatenate((img_labels, noise), axis=1)
disc_cost, _ = session.run([discriminator_loss,
discriminator_optimizer],
feed_dict={input_generator: discriminator_labels_with_noise,
real_samples: img_samples,
labels: img_labels})
disc_cost_sum += disc_cost
# END FOR MICRO BATCHES
discriminator_history.append(np.mean(disc_cost_sum))
# GENERATOR TRAINING
generator_noise = | np.random.rand(BATCH_SIZE, latent_dim) | numpy.random.rand |
import gym
import numpy as np
import matplotlib.pyplot as plt
from windy_gridworld import WindyGridworldEnv
import time, os
def create_state_action_dictionary(env):
Q = {}
for key in range(env.nS):
Q[key] = {a: 0.0 for a in range(env.nA)}
return Q
def epsilon_greedy_action(env, epsilon, s, Q):
if np.random.random() < epsilon:
return env.action_space.sample()
else:
greedy_action = np.argmax(list(Q[s].values()))
return greedy_action
def n_step_sarsa(env, n_episodes, n, epsilon, alpha, gamma):
Q = create_state_action_dictionary(env)
visits_per_state = np.zeros(env.nS)
for _ in range(n_episodes):
s_0 = env.reset()
T = np.inf
t = 0
a_t = epsilon_greedy_action(env, epsilon, s_0, Q)
states = [s_0]
actions = [a_t]
rewards = [0]
while True:
#env._render()
#time.sleep(0.05)
#os.system('cls')
if t < T:
next_state, r, done, info = env.step(actions[t])
visits_per_state[next_state] += 1
rewards.append(r)
states.append(next_state)
if done:
T = t + 1
else:
next_action = epsilon_greedy_action(env, epsilon, next_state, Q)
actions.append(next_action)
tau = t - n + 1
if tau >= 0:
G = 0
for i in range(tau+1, min(tau+n+1,T+1)):
G += np.power(gamma, i-tau-1) * rewards[i]
if tau + n < T:
G += | np.power(gamma, n) | numpy.power |
import sys
import os
sys.path.insert(0, os.path.abspath('.'))
import unittest
import numpy as np
import numpy.testing as nt
import scipy.constants as consts
from sgp4.io import twoline2rv
import sgp4
import sorts
from sorts.propagator import SGP4
from sorts import frames
from astropy.time import Time, TimeDelta
class TestSGP4(unittest.TestCase):
def setUp(self):
self.epoch0 = Time(2457126.2729, format='jd', scale='utc')
self.params = dict(
C_D = 2.3,
m = 8000,
A = 1.0,
)
self.state0 = np.array([7000e3, 0.0, 0.0, 0.0, 0.0, 7e3])
self.settings = dict(in_frame='TEME', out_frame='TEME')
def test_init(self):
prop = SGP4(settings=self.settings)
def test_SGP4_propagate(self):
prop = SGP4(settings=self.settings)
t = np.arange(0,24*360, dtype=np.float)*10.0
ecefs = prop.propagate(t, self.state0, self.epoch0, **self.params)
assert ecefs.shape == (6, t.size)
assert isinstance(ecefs, np.ndarray)
ecef = prop.propagate(0, self.state0, self.epoch0, **self.params)
assert ecef.shape == (6, )
assert isinstance(ecef, np.ndarray)
nt.assert_almost_equal(ecefs[:,0], ecef)
def test_SGP4_propagate_B(self):
B = 0.5*self.params['C_D']*self.params['A']/self.params['m']
prop = SGP4(settings=self.settings)
t = | np.arange(0,24*360, dtype=np.float) | numpy.arange |
import numpy as np
import tensorflow as tf
from collections import deque, namedtuple
from typing import Tuple
import random
Transition = namedtuple('Transition',
('actions', 'rewards', 'gradients', 'data', 'targets'))
logs_path = '/tmp/tensorflow_logs/example/'
class ReplayMemory(object):
"""
Simple convenience class to store relevant training traces and efficiently sample from them.
:param int capacity: Size of the replay memory
"""
def __init__(self, capacity: int):
self.capacity = capacity
self.memory = deque([], maxlen=self.capacity)
self.position = 0
def push(self, transition: Transition):
"""
Adds a new observation to the memory
:param transition: Transition object
:return: none
"""
self.memory.append(transition)
def sample(self, batchsize: int):
"""
Samples 'batchsize'd number of samples from the memory
:param batchsize:
:return: sample of Transition objects
"""
return random.sample(self.memory, batchsize)
def __len__(self):
return len(self.memory)
class Agent(object):
"""
Agent that learns the update rule for a logistic regression.
:param sess: Tensorflow session.
"""
def __init__(self, sess: tf.Session):
self.memory = ReplayMemory(5000)
self.batch_size = 30
self.sess = sess
self.mode = tf.estimator.ModeKeys.TRAIN
self._init_graph()
self.optimizer = tf.train.GradientDescentOptimizer(learning_rate=1e-4)
init_op = tf.global_variables_initializer()
self.sess.run(init_op)
self.summary_writer = tf.summary.FileWriter(logs_path, graph=tf.get_default_graph())
def _init_graph(self):
self.data = tf.placeholder(name='data',
dtype=tf.float32,
shape=[100, None])
self.targets = tf.placeholder(name='target',
dtype=tf.float32,
shape=[100, None])
self.actions = tf.placeholder(name='action',
shape=[None, 3 * 25],
dtype=tf.float32)
self.reward = tf.placeholder(name='reward',
shape=[None, 25],
dtype=tf.float32)
self.grads = tf.placeholder(name='gradients',
shape=[None, 3 * 25],
dtype=tf.float32)
self.last_action = tf.placeholder(name='last_action',
shape=[None, 3],
dtype=tf.float32)
# We encode the bias by adding a third column to the data filled with 1's
self.weights = tf.Variable(initial_value=[[1e-5, 1e-4, 0]],
name="logit_weights",
dtype=tf.float32,
expected_shape=[1, 3],
trainable=True)
with tf.name_scope("policy"):
# Subgraph to define the policy network. We construct the input from
# atomic observation objects
self.input_layer = tf.concat([self.actions, self.reward, self.grads],
axis=1,
name="state_concatenation")
self.dense = tf.layers.dense(inputs=self.input_layer,
units=50,
activation=tf.nn.softmax,
name='dense_1')
self.dropout = tf.layers.dropout(inputs=self.dense,
rate=0.4,
training=(self.mode == tf.estimator.ModeKeys.TRAIN),
name='dropout')
self.policy = tf.layers.dense(inputs=self.dropout,
units=3,
name='output_layer')
with tf.name_scope('update_weights'):
# We update the weights variables using the policy output and the weights from the
# previous transaction
self.weights = self.last_action - self.policy
with tf.name_scope("meta_loss"):
# The meta-loss is constructed by varying the input data of a logit and then generally
# trying to find the right weights:
self.logits = tf.log(tf.nn.sigmoid(tf.matmul(self.data, self.weights, transpose_b=True)))
self.loss = -1. * tf.reduce_mean(tf.matmul(self.targets, self.logits, transpose_a=True))
def _train_minibatch(self):
"""
Samples from the ReplayMemory and trains the policy on the sampled observations
:return: None
"""
batch: Tuple[Transition] = self.memory.sample(self.batch_size)
for obs in batch:
action = obs.actions
reward = obs.rewards
grad = obs.gradients
data = obs.data
targets = obs.targets
self.sess.run(self.optimizer.minimize(self.loss),
feed_dict={
self.actions: np.array(action).flatten().reshape(-1, 75),
self.reward: np.array(reward).flatten().reshape(-1, 25),
self.grads: np.array(grad).flatten().reshape(-1, 75),
self.last_action: np.array(action[-1]).flatten().reshape(-1, 3),
self.data: data,
self.targets: np.array(targets).reshape(100, -1)
})
def _run_single_round(self, x0: list):
"""
Runs a single optimization round on a fixed dataset to create new memories to train on.
:param x0: Initial value for the weights.
:return: None
"""
# initialize round with new data
mean0 = [.1, .1]
cov0 = [[1, .01], [.01, 1]]
mean1 = [-.1, -.1]
cov1 = [[1, .02], [.02, 1]]
data, targets = create_data_for_metaloss(mean0, mean1, cov0, cov1)
# augment the data with a constant np.ones field to incorporate bias term
data = np.concatenate([data, np.ones(data.shape[0]).reshape(data.shape[0], 1)], axis=1)
# Initialize finite state space with a maximum FIFO queue
action = deque([], maxlen=25)
reward = deque([], maxlen=25)
grad = deque([], maxlen=25)
for _ in range(25):
action.append([0, 0, 0]) # 2 weights + 1 bias
reward.append(0)
grad.append([0, 0, 0]) # updates to the actions, a.k.a. logit weights
action.append(x0)
rew = 0
reward.append(rew)
grad.append(len(x0) * [0.0])
# Run a single event by doing 100 iterations of the update rule.
for idx in range(101):
rew, grad_update, weight = self.sess.run(
[self.loss, self.policy, self.weights],
feed_dict={
self.actions: np.array(action).flatten().reshape(-1, 75),
self.reward: np.array(reward).flatten().reshape(-1, 25),
self.grads: | np.array(grad) | numpy.array |
import numpy as np
import cv2
#cap = cv2.VideoCapture('http://192.168.0.104:8080/?action=stream')
imagen = cv2.imread('opencv_logo.png')
hsv = cv2.cvtColor(imagen, cv2.COLOR_BGR2HSV)
#cap = cv2.VideoCapture(0)
cap = cv2.VideoCapture('lineVideo.mp4')
#Rango de colores detectados:
#Verdes:
verde_bajos = np.array([49,50,50])
verde_altos = np.array([107, 255, 255])
#Azules:
azul_bajos = np.array([100,65,75], dtype=np.uint8)
azul_altos = np.array([130, 255, 255], dtype=np.uint8)
#Rojos:
rojo_bajos1 = np.array([0,65,75], dtype=np.uint8)
rojo_altos1 = np.array([12, 255, 255], dtype=np.uint8)
rojo_bajos2 = np.array([240,65,75], dtype=np.uint8)
rojo_altos2 = np.array([256, 255, 255], dtype=np.uint8)
#Crear las mascaras
mascara_verde = cv2.inRange(hsv, verde_bajos, verde_altos)
mascara_rojo1 = cv2.inRange(hsv, rojo_bajos1, rojo_altos1)
mascara_rojo2 = cv2.inRange(hsv, rojo_bajos2, rojo_altos2)
mascara_azul = cv2.inRange(hsv, azul_bajos, azul_altos)
#Juntar todas las mascaras
mask = cv2.add(mascara_rojo1, mascara_rojo2)
mask = cv2.add(mask, mascara_verde)
mask = cv2.add(mask, mascara_azul)
fourcc = cv2.VideoWriter_fourcc(*'MP4V')
out = cv2.VideoWriter('lineVideoFiltered.mp4', fourcc, 20.0, (640,480))
ret, imagen = cap.read()
kernel = np.ones((5,5),np.uint8)
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
# Our operations on the frame come here
#gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
#frame = cv2.addWeighted(frame,0.5,imagen,0.5,0)
###################################################
# LINE FOLLOWER
#img = cv2.imread(img_file, cv2.IMREAD_COLOR)
'''img = cv2.blur(img, (5, 5))
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
h, s, v = cv2.split(hsv)
thresh0 = cv2.adaptiveThreshold(s, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, 11, 2)
thresh1 = cv2.adaptiveThreshold(v, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, 11, 2)
thresh2 = cv2.adaptiveThreshold(v, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, 11, 2)
thresh = cv2.bitwise_or(thresh0, thresh1)
'''
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# define range of white color in HSV
# change it according to your need !
#lower_white = np.array([0,0,0], dtype=np.uint8)
#upper_white = np.array([0,0,255], dtype=np.uint8)
sensitivity = 15
lower_white = | np.array([0,0,255-sensitivity]) | numpy.array |
# Third-party
import astropy.units as u
import numpy as np
from scipy.signal import argrelmin
# Project
from . import PhaseSpacePosition, Orbit
__all__ = ['fast_lyapunov_max', 'lyapunov_max', 'surface_of_section']
def fast_lyapunov_max(w0, hamiltonian, dt, n_steps, d0=1e-5,
n_steps_per_pullback=10, noffset_orbits=2, t1=0.,
atol=1E-10, rtol=1E-10, nmax=0, return_orbit=True):
"""
Compute the maximum Lyapunov exponent using a C-implemented estimator
that uses the DOPRI853 integrator.
Parameters
----------
w0 : `~gala.dynamics.PhaseSpacePosition`, array_like
Initial conditions.
hamiltonian : `~gala.potential.Hamiltonian`
dt : numeric
Timestep.
n_steps : int
Number of steps to run for.
d0 : numeric (optional)
The initial separation.
n_steps_per_pullback : int (optional)
Number of steps to run before re-normalizing the offset vectors.
noffset_orbits : int (optional)
Number of offset orbits to run.
t1 : numeric (optional)
Time of initial conditions. Assumed to be t=0.
return_orbit : bool (optional)
Store the full orbit for the parent and all offset orbits.
Returns
-------
LEs : :class:`~astropy.units.Quantity`
Lyapunov exponents calculated from each offset / deviation orbit.
orbit : `~gala.dynamics.Orbit` (optional)
"""
from gala.potential import PotentialBase
from .lyapunov import dop853_lyapunov_max, dop853_lyapunov_max_dont_save
# TODO: remove in v1.0
if isinstance(hamiltonian, PotentialBase):
from ..potential import Hamiltonian
hamiltonian = Hamiltonian(hamiltonian)
if not hamiltonian.c_enabled:
raise TypeError("Input Hamiltonian must contain a C-implemented "
"potential and frame.")
if not isinstance(w0, PhaseSpacePosition):
w0 = np.asarray(w0)
ndim = w0.shape[0]//2
w0 = PhaseSpacePosition(pos=w0[:ndim],
vel=w0[ndim:])
_w0 = np.squeeze(w0.w(hamiltonian.units))
if _w0.ndim > 1:
raise ValueError("Can only compute fast Lyapunov exponent for a single orbit.")
if return_orbit:
t, w, l = dop853_lyapunov_max(hamiltonian, _w0,
dt, n_steps+1, t1,
d0, n_steps_per_pullback, noffset_orbits,
atol, rtol, nmax)
w = np.rollaxis(w, -1)
try:
tunit = hamiltonian.units['time']
except (TypeError, AttributeError):
tunit = u.dimensionless_unscaled
orbit = Orbit.from_w(w=w, units=hamiltonian.units,
t=t*tunit, hamiltonian=hamiltonian)
return l/tunit, orbit
else:
l = dop853_lyapunov_max_dont_save(hamiltonian, _w0,
dt, n_steps+1, t1,
d0, n_steps_per_pullback, noffset_orbits,
atol, rtol, nmax)
try:
tunit = hamiltonian.units['time']
except (TypeError, AttributeError):
tunit = u.dimensionless_unscaled
return l/tunit
def lyapunov_max(w0, integrator, dt, n_steps, d0=1e-5, n_steps_per_pullback=10,
noffset_orbits=8, t1=0., units=None):
"""
Compute the maximum Lyapunov exponent of an orbit by integrating many
nearby orbits (``noffset``) separated with isotropically distributed
directions but the same initial deviation length, ``d0``. This algorithm
re-normalizes the offset orbits every ``n_steps_per_pullback`` steps.
Parameters
----------
w0 : `~gala.dynamics.PhaseSpacePosition`, array_like
Initial conditions.
integrator : `~gala.integrate.Integrator`
An instantiated `~gala.integrate.Integrator` object. Must have a run() method.
dt : numeric
Timestep.
n_steps : int
Number of steps to run for.
d0 : numeric (optional)
The initial separation.
n_steps_per_pullback : int (optional)
Number of steps to run before re-normalizing the offset vectors.
noffset_orbits : int (optional)
Number of offset orbits to run.
t1 : numeric (optional)
Time of initial conditions. Assumed to be t=0.
units : `~gala.units.UnitSystem` (optional)
If passing in an array (not a `~gala.dynamics.PhaseSpacePosition`),
you must specify a unit system.
Returns
-------
LEs : :class:`~astropy.units.Quantity`
Lyapunov exponents calculated from each offset / deviation orbit.
orbit : `~gala.dynamics.Orbit`
"""
if units is not None:
pos_unit = units['length']
vel_unit = units['length']/units['time']
else:
pos_unit = u.dimensionless_unscaled
vel_unit = u.dimensionless_unscaled
if not isinstance(w0, PhaseSpacePosition):
w0 = np.asarray(w0)
ndim = w0.shape[0]//2
w0 = PhaseSpacePosition(pos=w0[:ndim]*pos_unit,
vel=w0[ndim:]*vel_unit)
_w0 = w0.w(units)
ndim = 2*w0.ndim
# number of iterations
niter = n_steps // n_steps_per_pullback
# define offset vectors to start the offset orbits on
d0_vec = np.random.uniform(size=(ndim, noffset_orbits))
d0_vec /= np.linalg.norm(d0_vec, axis=0)[np.newaxis]
d0_vec *= d0
w_offset = _w0 + d0_vec
all_w0 = np.hstack((_w0, w_offset))
# array to store the full, main orbit
full_w = np.zeros((ndim, n_steps+1, noffset_orbits+1))
full_w[:, 0] = all_w0
full_ts = np.zeros((n_steps+1,))
full_ts[0] = t1
# arrays to store the Lyapunov exponents and times
LEs = np.zeros((niter, noffset_orbits))
ts = | np.zeros_like(LEs) | numpy.zeros_like |
from typing import List, Union, Tuple
import numpy as np
import gym
from gym import spaces
class WindTunnel:
def __init__(self, parcel_dimensions: List[float], environment_dimensions: List[int], initial_temp: float = 298.15, wind_velocity: float = 30.0) -> None:
"""
This is the core environment of the simulation, and in this specific case, a wind tunnel
Args:
parcel_dimensions (List[float, float, float]): dimensions of each fluid parcel in meters [x, y, z]
environment_dimensions (List[int, int, int]): number of fluid parcels in each direction [x, y, z]
initial_temp (float): surrounding environment temperature in kelvin
air_velocity (float): speed in the wind tunnel in meters per second
"""
# X Y Z component of acceleration and velocity at parcel location x y z
self.acceleration_tensor = np.array([
np.array(np.array([0, 0, 0]) * environment_dimensions[0]),
np.array(np.array([0, 0, 0]) * environment_dimensions[1]),
np.array(np.array([0, 0, 0]) * environment_dimensions[2])
])
self.velocity_tensor = np.array([
np.array(np.array([0, 0, 0]) * environment_dimensions[0]),
np.array( | np.array([0, 0, 0]) | numpy.array |
#!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import numpy as np
from pyscf import lib
from pyscf import scf
from pyscf.lib import logger
from pyscf.cc import ccsd
from pyscf.cc import uccsd
from pyscf.cc import eom_rccsd
from pyscf.cc import eom_gccsd
from pyscf.cc import addons
########################################
# EOM-IP-CCSD
########################################
class EOMIP(eom_gccsd.EOMIP):
def __init__(self, cc):
gcc = addons.convert_to_gccsd(cc)
eom_gccsd.EOMIP.__init__(self, gcc)
########################################
# EOM-EA-CCSD
########################################
class EOMEA(eom_gccsd.EOMEA):
def __init__(self, cc):
gcc = addons.convert_to_gccsd(cc)
eom_gccsd.EOMEA.__init__(self, gcc)
########################################
# EOM-EE-CCSD
########################################
def eeccsd(eom, nroots=1, koopmans=False, guess=None, eris=None, imds=None):
'''Calculate N-electron neutral excitations via EOM-EE-CCSD.
Kwargs:
nroots : int
Number of roots (eigenvalues) requested
koopmans : bool
Calculate Koopmans'-like (1p1h) excitations only, targeting via
overlap.
guess : list of ndarray
List of guess vectors to use for targeting via overlap.
'''
if eris is None: eris = eom._cc.ao2mo()
if imds is None: imds = eom.make_imds(eris)
spinvec_size = eom.vector_size()
nroots = min(nroots, spinvec_size)
diag_ee, diag_sf = eom.get_diag(imds)
guess_ee = []
guess_sf = []
if guess and guess[0].size == spinvec_size:
raise NotImplementedError
#TODO: initial guess from GCCSD EOM amplitudes
#orbspin = scf.addons.get_ghf_orbspin(eris.mo_coeff)
#nmo = np.sum(eom.nmo)
#nocc = np.sum(eom.nocc)
#for g in guess:
# r1, r2 = eom_gccsd.vector_to_amplitudes_ee(g, nmo, nocc)
# r1aa = r1[orbspin==0][:,orbspin==0]
# r1ab = r1[orbspin==0][:,orbspin==1]
# if abs(r1aa).max() > 1e-7:
# r1 = addons.spin2spatial(r1, orbspin)
# r2 = addons.spin2spatial(r2, orbspin)
# guess_ee.append(eom.amplitudes_to_vector(r1, r2))
# else:
# r1 = spin2spatial_eomsf(r1, orbspin)
# r2 = spin2spatial_eomsf(r2, orbspin)
# guess_sf.append(amplitudes_to_vector_eomsf(r1, r2))
# r1 = r2 = r1aa = r1ab = g = None
#nroots_ee = len(guess_ee)
#nroots_sf = len(guess_sf)
elif guess:
for g in guess:
if g.size == diag_ee.size:
guess_ee.append(g)
else:
guess_sf.append(g)
nroots_ee = len(guess_ee)
nroots_sf = len(guess_sf)
else:
dee = np.sort(diag_ee)[:nroots]
dsf = np.sort(diag_sf)[:nroots]
dmax = np.sort(np.hstack([dee,dsf]))[nroots-1]
nroots_ee = np.count_nonzero(dee <= dmax)
nroots_sf = np.count_nonzero(dsf <= dmax)
guess_ee = guess_sf = None
def eomee_sub(cls, nroots, guess, diag):
ee_sub = cls(eom._cc)
ee_sub.__dict__.update(eom.__dict__)
e, v = ee_sub.kernel(nroots, koopmans, guess, eris, imds, diag=diag)
if nroots == 1:
e, v = [e], [v]
ee_sub.converged = [ee_sub.converged]
return list(ee_sub.converged), list(e), list(v)
e0 = e1 = []
v0 = v1 = []
conv0 = conv1 = []
if nroots_ee > 0:
conv0, e0, v0 = eomee_sub(EOMEESpinKeep, nroots_ee, guess_ee, diag_ee)
if nroots_sf > 0:
conv1, e1, v1 = eomee_sub(EOMEESpinFlip, nroots_sf, guess_sf, diag_sf)
e = | np.hstack([e0,e1]) | numpy.hstack |
import splat.simulate as spsim
import splat.evolve as spev
from .config import DATA_FOLDER, POLYNOMIALS, EVOL_MODELS_FOLDER, FIGURES
from .tools import teff_to_spt, teff_from_spt
from .abs_mags import get_abs_mag, get_teff_from_mag, get_teff_from_mag_ignore_unc
#import pymc3 as pm
from scipy.interpolate import griddata
#import theano.tensor as tt
#from theano.compile.ops import as_op
import astropy.units as u
import numba
import pandas as pd
import numpy as np
#use splat for no
import splat
import splat.empirical as spe
def read_bintemplates():
df=pd.read_pickle(DATA_FOLDER+'/binary_lookup_table.pkl.gz')
return [df.prim.values, df.sec.values, df.sys.values]
def get_system_type(pr, sc, interpolators):
"""
use the lookup table to get a spectral type for the binary
using a linear interpolation to avoid nans
pr: primary type (float, M0=10)
sc: secondary type float, M0=10)
interpolatotrs: (3, N) array of loats (0: primary, 1: secondary, 2: system)
"""
#where secondary are nans set to primaries
sc[np.isnan(sc)]=pr[np.isnan(sc)]
#interpolate
interpoints=np.array([interpolators[0], interpolators[1] ]).T
comb=griddata(interpoints, interpolators[-1] , (pr, sc), method='linear')
return comb
def evolutionary_model_interpolator(mass, age, model):
"""
Evolutionary model interpolator
input: mass, age
model: model name
"""
model_filename=EVOL_MODELS_FOLDER+'//'+model.lower()+'.csv'
evolutiomodel=pd.read_csv( model_filename)
#use the full cloud treatment for saumon models
if model=='saumon2008':
evolutiomodel=evolutiomodel[evolutiomodel.cloud=='hybrid']
#make age, teff, mass logarithm scale
valuest=np.log10(evolutiomodel.temperature.values)
valueslogg=evolutiomodel.gravity.values
valueslumn=evolutiomodel.luminosity.values
valuesm=np.log10(evolutiomodel.mass.values)
valuesag=np.log10(evolutiomodel.age.values)
evolpoints=np.array([valuesm, valuesag ]).T
teffs=griddata(evolpoints, valuest , (np.log10(mass), np.log10(age)), method='linear')
lumn=griddata(evolpoints, valueslumn , (np.log10(mass), np.log10(age)), method='linear')
return {'mass': mass*u.Msun, 'age': age*u.Gyr, 'temperature': 10**teffs*u.Kelvin,
'luminosity': lumn*u.Lsun}
def simulate_spts(**kwargs):
"""
Simulate parameters from mass function,
mass ratio distribution and age distribution
"""
recompute=kwargs.get('recompute', False)
model_name=kwargs.get('name','baraffe2003')
#use hybrid models that predit the T dwarf bump for Saumon Models
if model_name=='saumon2008':
cloud='hybrid'
else:
cloud=False
#automatically set maxima and minima to avoid having too many nans
#mass age and age, min, max
#all masses should be 0.01
acceptable_values={'baraffe2003': [0.01, 0.1, 0.01, 8.0],
'marley2019': [0.01, 0.08, 0.001, 8.0], 'saumon2008':[0.01, 0.09, 0.003, 8.0],
'phillips2020':[0.01, 0.075, 0.001, 8.0 ],'burrows2001':[0.01, 0.075, 10, 12]}
fname=kwargs.get('filename', DATA_FOLDER+'/mass_age_spcts_with_bin{}.pkl'.format(model_name))
filename=fname
if recompute:
nsim = kwargs.get('nsample', 1e5)
ranges=kwargs.get('range', None)
# masses for singles [this can be done with pymc but nvm]
m_singles = spsim.simulateMasses(nsim,range=[ranges[0], ranges[1]],distribution='power-law',alpha=0.6)
#ages for singles
ages_singles= spsim.simulateAges(nsim,range=[ranges[2], ranges[3]], distribution='uniform')
#parameters for binaries
#binrs=simulate_binary(int(nsim), [ranges[0], ranges[1]], [ranges[2], ranges[3]])
qs=spsim.simulateMassRatios(nsim,distribution='power-law',q_range=[0.1,1.0],gamma=4)
m_prims = spsim.simulateMasses(nsim,range=[ranges[0], ranges[1]],distribution='power-law',alpha=0.6)
m_sec=m_prims*qs
ages_bin= spsim.simulateAges(nsim,range=[ranges[2], ranges[3]], distribution='uniform')
#single_evol=spev.modelParameters(mass=m_singles,age=ages_singles, set=model_name, cloud=cloud)
single_evol=evolutionary_model_interpolator(m_singles, ages_singles, model_name)
#primary_evol=spev.modelParameters(mass=binrs[0],age=binrs[-1], set=model_name, cloud=cloud)
primary_evol=evolutionary_model_interpolator(m_prims,ages_bin, model_name)
#secondary_evol=spev.modelParameters(mass=binrs[1],age=binrs[-1], set=model_name, cloud=cloud)
secondary_evol=evolutionary_model_interpolator(m_sec,ages_bin, model_name)
#save luminosities
#temperatures
teffs_singl =single_evol['temperature'].value
teffs_primar=primary_evol['temperature'].value
teffs_second=secondary_evol['temperature'].value
#spectraltypes
spts_singl =teff_to_spt(teffs_singl)
#the singles will be fine, remove nans from systems
spt_primar=teff_to_spt(teffs_primar)
spt_second=teff_to_spt(teffs_second)
xy=np.vstack([np.round(np.array(spt_primar), decimals=0), np.round(np.array(spt_second), decimals=0)]).T
spt_binr=get_system_type(xy[:,0], xy[:,1], read_bintemplates())
values={ 'sing_evol': single_evol, 'sing_spt':spts_singl,
'prim_evol': primary_evol, 'prim_spt':spt_primar,
'sec_evol': secondary_evol, 'sec_spt': spt_second,
'binary_spt': spt_binr }
import pickle
with open(filename, 'wb') as file:
pickle.dump(values,file)
else:
values=pd.read_pickle(filename)
return values
def get_mag_from_luminosity(lumn, bc, log=False):
if log:
return -2.5*np.log10(lumn)+4.74-bc
else:
return -2.5*lumn+4.74-bc
def fillipazzo_bolometric_correction(spt, filt='2MASS_J', mask=None):
"""
number spectral type
"""
#for float
if isinstance(spt, (np.floating, float, int)):
return spe.typeToBC(spt, filt, ref='filippazzo2015')
#vectorized solution, masking things outside the range
else:
ref='filippazzo2015'
spt=np.array(spt)
res=np.ones_like(spt)*np.nan
if mask is None: mask=np.zeros_like(spt).astype(bool)
bc = np.polyval(splat.SPT_BC_RELATIONS[ref]['filters'][filt]['coeff'], spt-splat.SPT_BC_RELATIONS[ref]['sptoffset'])
bc_error = splat.SPT_BC_RELATIONS[ref]['filters'][filt]['fitunc']
rands=np.random.normal(bc, bc_error)
np.place(res, ~mask, rands )
return res
def make_systems(bfraction=0.2, recompute=False, model='baraffe2003',
mass_age_range=[0.01, 0.1, 0., 8.0], nsample=5e5, return_singles=False, **kwargs):
#quick but dirty
if 'filename' in kwargs:
mods=simulate_spts(name=model,
recompute=recompute, range=mass_age_range,\
nsample=nsample, filename= kwargs.get('filename', ''))
else:
mods=simulate_spts(name=model,
recompute=recompute, range=mass_age_range,\
nsample=nsample)
#singles
singles=mods['sing_evol']
#singles['abs_2MASS_J']= get_abs_mag(mods['sing_spt'], '2MASS J')[0]
#bolometric corrections for 2MASS J
#bcs_sings=fillipazzo_bolometric_correction(mods['sing_spt'], filt='2MASS_J',
# mask=None)
#singles['bolometric_cor_2MASS_J']=bcs_sings
#singles['abs_2MASS_J']=get_mag_from_luminosity(singles['luminosity'].value,\
# bcs_sings, log=False)
singles['is_binary']= np.zeros_like(mods['sing_spt']).astype(bool)
singles['spt']=mods['sing_spt']
singles['prim_spt']=mods['sing_spt']
singles['sec_spt']=np.ones_like(mods['sing_spt'])*np.nan
#binary
binaries={}
binaries['age']=mods['prim_evol']['age']
binaries['mass']=mods['prim_evol']['mass']+mods['sec_evol']['mass']
binaries['pri_mass']=mods['prim_evol']['mass']
binaries['sec_mass']=mods['sec_evol']['mass']
binaries['luminosity']=np.log10(10**(mods['prim_evol']['luminosity']).value+\
10**(mods['sec_evol']['luminosity']).value)
#binaries['temperature']=mods['prim_evol']['temperature']
binaries['spt']=np.random.normal(mods['binary_spt'], 0.3)
binaries['prim_spt']=mods['prim_spt']
binaries['sec_spt']=mods['sec_spt']
binaries['prim_luminosity']=10**(mods['prim_evol']['luminosity']).value
binaries['sec_luminosity']=10**(mods['sec_evol']['luminosity']).value
binaries['is_binary']=np.ones_like(mods['sec_spt']).astype(bool)
#bolometric corrections for 2MASS J
#bcs_bins=fillipazzo_bolometric_correction(binaries['spt'], filt='2MASS_J',
# mask=None)
#binaries['bolometric_cor_2MASS_J']=bcs_bins
#magnitudes ugh
"""
ignore 2mass photometry
js_singles, j_single_unc=get_abs_mag(mods['sing_spt'],'2MASS J')
hs_singles, h_single_unc=get_abs_mag(mods['sing_spt'],'2MASS H')
singles['abs_2MASS_J']=np.random.normal(js_singles, j_single_unc)
singles['abs_2MASS_H']=np.random.normal(hs_singles, h_single_unc)
js_primns, junc_prims=get_abs_mag(mods['prim_spt'], '2MASS J')
js_prims_to_use=np.random.normal(js_primns, junc_prims)
hs_primns, hunc_prims=get_abs_mag(mods['prim_spt'], '2MASS H')
hs_prims_to_use=np.random.normal(hs_primns, junc_prims)
js_secs, junc_secs=get_abs_mag(mods['sec_spt'], '2MASS J')
js_secs_to_use=np.random.normal(js_secs, junc_secs)
hs_secs, hunc_secs=get_abs_mag(mods['sec_spt'], '2MASS H')
hs_secs_to_use=np.random.normal(hs_secs, hunc_secs)
#print (np.isnan(js_prims_to_us).any())
binaries['abs_2MASS_J']= -2.5*np.log10(10**(-0.4*js_prims_to_use)+ 10**(-0.4*js_secs_to_use))
binaries['abs_2MASS_H']= -2.5*np.log10(10**(-0.4*hs_prims_to_use)+ 10**(-0.4*hs_secs_to_use))
"""
#assign teff from absolute mag
#binaries['temperature']=get_teff_from_mag_ignore_unc(binaries['abs_2MASS_H'])
binaries['temperature']=teff_from_spt(binaries['spt'])
#binaries['temperature']=
#compute numbers to choose based on binary fraction
ndraw= int(len(mods['sing_spt'])/(1-bfraction))-int(len(mods['sing_spt']))
#ndraw=int(len(mods['sing_spt'])* bfraction)
#random list of binaries to choose
random_int=np.random.choice(np.arange(len(binaries['spt'])), ndraw)
chosen_binaries={}
for k in binaries.keys():
chosen_binaries[k]=binaries[k][random_int]
#add scale to the local lf
res=pd.concat([pd.DataFrame(singles), pd.DataFrame(chosen_binaries)])
scl=scale_to_local_lf(res.temperature.values)
#print (scl
res['scale']=scl[0]
res['scale_unc']=scl[1]
res['scale_times_model']=scl[-1]
#combine the to dictionaries
return res
def scale_to_local_lf(teffs):
"""
scale a teff distribution to the local lf
"""
kirkpatrick2020LF={'bin_center': np.array([ 525, 675, 825, 975, 1125, 1275, 1425, 1575, 1725, 1875, 2025]),
'values': np.array([4.24, 2.8 , 1.99, 1.72, 1.11, 1.95, 0.94, 0.81, 0.78, 0.5 , 0.72]),
'unc': np.array([0.7 , 0.37, 0.32, 0.3 , 0.25, 0.3 , 0.22, 0.2 , 0.2 , 0.17, 0.18])}
binedges= np.append(kirkpatrick2020LF['bin_center']-75, kirkpatrick2020LF['bin_center'][-1]+75)
#bools=np.logical_and(teffs <= binedges[-1], teffs >= binedges[0])
#print (binedges[0], binedges[-1])
preds=np.histogram(teffs, bins=binedges, normed=False)[0]
obs=np.array(kirkpatrick2020LF['values'])
unc=np.array(kirkpatrick2020LF['unc'])
obs_monte_carlo= np.random.normal(obs, unc, (10000, len(obs)))
pred_monte= np.ones_like(obs_monte_carlo)*(preds)
unc_monte= np.ones_like(obs_monte_carlo)*(unc)
scale=(np.nansum((obs_monte_carlo*pred_monte)/(unc_monte**2), axis=1)\
/ | np.nansum(((pred_monte**2)/(unc_monte**2)), axis=1) | numpy.nansum |
# Filename: test_calib.py
# pylint: disable=C0111,E1003,R0904,C0103,R0201,C0102
from os.path import dirname, join
import functools
import operator
import shutil
import sys
import tempfile
import km3io
from thepipe import Module, Pipeline
import km3pipe as kp
from km3pipe.dataclasses import Table
from km3pipe.hardware import Detector
from km3pipe.io.hdf5 import HDF5Sink
from km3pipe.testing import TestCase, MagicMock, patch, skip, skipif, data_path
from km3pipe.calib import Calibration, CalibrationService, slew
from .test_hardware import EXAMPLE_DETX
import numpy as np
import tables as tb
__author__ = "<NAME>"
__copyright__ = "Copyright 2016, <NAME> and the KM3NeT collaboration."
__credits__ = []
__license__ = "MIT"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
class TestCalibration(TestCase):
"""Tests for the Calibration class"""
def test_init_with_wrong_file_extension(self):
with self.assertRaises(NotImplementedError):
Calibration(filename="foo")
@patch("km3pipe.calib.Detector")
def test_init_with_filename(self, mock_detector):
Calibration(filename="foo.detx")
mock_detector.assert_called_with(filename="foo.detx")
@patch("km3pipe.calib.Detector")
def test_init_with_det_id(self, mock_detector):
Calibration(det_id=1)
mock_detector.assert_called_with(t0set=None, calibset=None, det_id=1)
Calibration(det_id=1, calibset=2, t0set=3)
mock_detector.assert_called_with(t0set=3, calibset=2, det_id=1)
def test_init_with_detector(self):
det = Detector(data_path("detx/detx_v1.detx"))
Calibration(detector=det)
def test_apply_to_hits_with_pmt_id_aka_mc_hits(self):
calib = Calibration(filename=data_path("detx/detx_v1.detx"))
hits = Table({"pmt_id": [1, 2, 1], "time": [10.1, 11.2, 12.3]})
chits = calib.apply(hits, correct_slewing=False)
assert len(hits) == len(chits)
a_hit = chits[0]
self.assertAlmostEqual(1.1, a_hit.pos_x)
self.assertAlmostEqual(10, a_hit.t0)
self.assertAlmostEqual(10.1, a_hit.time) # t0 should not bei applied
a_hit = chits[1]
self.assertAlmostEqual(1.4, a_hit.pos_x)
self.assertAlmostEqual(20, a_hit.t0)
self.assertAlmostEqual(11.2, a_hit.time) # t0 should not be applied
def test_apply_to_hits_with_dom_id_and_channel_id(self):
calib = Calibration(filename=data_path("detx/detx_v1.detx"))
hits = Table(
{"dom_id": [2, 3, 3], "channel_id": [0, 1, 2], "time": [10.1, 11.2, 12.3]}
)
chits = calib.apply(hits, correct_slewing=False)
assert len(hits) == len(chits)
a_hit = chits[0]
self.assertAlmostEqual(2.1, a_hit.pos_x)
self.assertAlmostEqual(40, a_hit.t0)
t0 = a_hit.t0
self.assertAlmostEqual(10.1 + t0, a_hit.time)
a_hit = chits[1]
self.assertAlmostEqual(3.4, a_hit.pos_x)
self.assertAlmostEqual(80, a_hit.t0)
t0 = a_hit.t0
self.assertAlmostEqual(11.2 + t0, a_hit.time)
def test_assert_apply_adds_dom_id_and_channel_id_to_mc_hits(self):
calib = Calibration(filename=data_path("detx/detx_v1.detx"))
hits = Table({"pmt_id": [1, 2, 1], "time": [10.1, 11.2, 12.3]})
chits = calib.apply(hits)
self.assertListEqual([1, 1, 1], list(chits.dom_id))
self.assertListEqual([0, 1, 0], list(chits.channel_id))
def test_assert_apply_adds_pmt_id_to_hits(self):
calib = Calibration(filename=data_path("detx/detx_v1.detx"))
hits = Table(
{"dom_id": [2, 3, 3], "channel_id": [0, 1, 2], "time": [10.1, 11.2, 12.3]}
)
chits = calib.apply(hits, correct_slewing=False)
self.assertListEqual([4, 8, 9], list(chits.pmt_id))
def test_apply_to_hits_with_pmt_id_with_wrong_calib_raises(self):
calib = Calibration(filename=data_path("detx/detx_v1.detx"))
hits = Table({"pmt_id": [999], "time": [10.1]})
with self.assertRaises(KeyError):
calib.apply(hits, correct_slewing=False)
def test_apply_to_hits_with_dom_id_and_channel_id_with_wrong_calib_raises(self):
calib = Calibration(filename=data_path("detx/detx_v1.detx"))
hits = Table({"dom_id": [999], "channel_id": [0], "time": [10.1]})
with self.assertRaises(KeyError):
calib.apply(hits, correct_slewing=False)
def test_apply_to_hits_from_km3io(self):
calib = Calibration(filename=data_path("detx/km3net_offline.detx"))
hits = km3io.OfflineReader(data_path("offline/km3net_offline.root"))[0].hits
chits = calib.apply(hits)
assert 176 == len(chits.t0)
assert np.allclose([207747.825, 207745.656, 207743.836], chits.t0.tolist()[:3])
chits = calib.apply(hits[:3])
assert 3 == len(chits.t0)
assert np.allclose([207747.825, 207745.656, 207743.836], chits.t0.tolist()[:3])
def test_apply_to_hits_from_km3io_iterator(self):
calib = Calibration(filename=data_path("detx/km3net_offline.detx"))
f = km3io.OfflineReader(data_path("offline/km3net_offline.root"))
for event in f:
chits = calib.apply(event.hits)
assert 176 == len(chits.t0)
assert np.allclose(
[207747.825, 207745.656, 207743.836], chits.t0.tolist()[:3]
)
break
def test_time_slewing_correction(self):
calib = Calibration(filename=data_path("detx/detx_v1.detx"))
hits = Table(
{
"dom_id": [2, 3, 3],
"channel_id": [0, 1, 2],
"time": [10.1, 11.2, 12.3],
"tot": [0, 10, 255],
}
)
chits = calib.apply(hits) # correct_slewing=True is default
assert len(hits) == len(chits)
a_hit = chits[0]
self.assertAlmostEqual(10.1 + a_hit.t0 - slew(a_hit.tot), a_hit.time)
a_hit = chits[1]
self.assertAlmostEqual(11.2 + a_hit.t0 - slew(a_hit.tot), a_hit.time)
a_hit = chits[2]
self.assertAlmostEqual(12.3 + a_hit.t0 - slew(a_hit.tot), a_hit.time)
def test_apply_to_timeslice_hits(self):
tshits = Table.from_template(
{
"channel_id": [0, 1, 2],
"dom_id": [2, 3, 3],
"time": [10.1, 11.2, 12.3],
"tot": np.ones(3, dtype=float),
"group_id": 0,
},
"TimesliceHits",
)
calib = Calibration(filename=data_path("detx/detx_v1.detx"))
c_tshits = calib.apply(tshits, correct_slewing=False)
assert len(c_tshits) == len(tshits)
assert | np.allclose([40, 80, 90], c_tshits.t0) | numpy.allclose |
import sys, math
import numpy as np
from scipy.misc import imresize as resize
from scipy.misc import toimage as toimage
import gym
from gym import spaces
from gym.spaces.box import Box
from gym.utils import seeding
from gym.envs.classic_control import rendering
import pyglet
from pyglet import gl
import tensorflow as tf
import keras.backend as K
from model import make_model
FPS = 50
SCREEN_X = 64
SCREEN_Y = 64
FACTOR = 8
HIDDEN_UNITS = 256
GAUSSIAN_MIXTURES = 5
Z_DIM = 32
initial_z = np.load('./data/initial_z.npz')
initial_mu = initial_z['initial_mu']
initial_log_var = initial_z['initial_log_var']
initial_mu_log_var = [list(elem) for elem in zip(initial_mu, initial_log_var)]
def get_pi_idx(x, pdf):
# samples from a categorial distribution
N = pdf.size
accumulate = 0
for i in range(0, N):
accumulate += pdf[i]
if (accumulate >= x):
return i
random_value = np.random.randint(N)
#print('error with sampling ensemble, returning random', random_value)
return random_value
class CarRacingDream(gym.Env):
metadata = {
'render.modes': ['human', 'rgb_array'],
'video.frames_per_second' : FPS
}
def __init__(self, model):
self.observation_space = Box(low=-50., high=50., shape=(model.rnn.z_dim,) , dtype = np.float32) # , dtype=np.float32
self.action_space = spaces.Box( np.array([-1,0,0]), np.array([+1,+1,+1]) , dtype = np.float32) # steer, gas, brake
self.seed()
self.model = model
self.viewer = None
self.t = None
self.z = None
self.h = None
self.c = None
self.previous_reward = None
self.reset()
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def sample_z(self, mu, log_sigma):
z = mu + (np.exp(log_sigma)) * self.np_random.randn(*log_sigma.shape)
return z
def reset(self):
idx = self.np_random.randint(0, len(initial_mu_log_var))
init_mu, init_log_var = initial_mu_log_var[idx]
init_log_sigma = init_log_var / 2
self.z = self.sample_z(init_mu, init_log_sigma)
self.h = np.zeros(HIDDEN_UNITS)
self.c = np.zeros(HIDDEN_UNITS)
self.previous_reward = 0
self.t = 0
return self.z
def close(self):
if self.viewer is not None:
self.viewer.close()
self.viewer = None
def get_mixture_coef(self, z_pred):
log_pi, mu, log_sigma = np.split(z_pred, 3, 1)
log_pi = log_pi - np.log(np.sum(np.exp(log_pi), axis = 1, keepdims = True))
return log_pi, mu, log_sigma
def sample_next_mdn_output(self, action):
d = GAUSSIAN_MIXTURES * Z_DIM
z_dim = self.model.rnn.z_dim
input_to_rnn = [np.array([[np.concatenate([self.z, action, [self.previous_reward]])]]),np.array([self.h]),np.array([self.c])]
out = self.model.rnn.forward.predict(input_to_rnn)
y_pred = out[0][0][0]
new_h = out[1][0]
new_c = out[2][0]
mdn_pred = y_pred[:(3*d)]
rew_pred = y_pred[-1]
mdn_pred = np.reshape(mdn_pred, [-1, GAUSSIAN_MIXTURES * 3])
log_pi, mu, log_sigma = self.get_mixture_coef(mdn_pred)
chosen_log_pi = | np.zeros(z_dim) | numpy.zeros |
import numpy as np
class Quaternion:
"""Quaternions for 3D rotations"""
def __init__(self, x):
self.x = np.asarray(x, dtype=float)
@classmethod
def from_v_theta(cls, v, theta):
""" Construct quaternion from unit vector v and rotation angle theta"""
theta = np.asarray(theta)
v = np.asarray(v)
s = np.sin(0.5 * theta)
c = np.cos(0.5 * theta)
vnrm = np.sqrt(np.sum(v * v))
q = np.concatenate([[c], s * v / vnrm])
return cls(q)
def get_array(self):
return self.x
def __eq__(self, other):
return np.array_equal(self.x, other.x)
def __ne__(self, other):
return not (self==other)
def __repr__(self):
return "Quaternion:\n" + self.x.__repr__()
def __mul__(self, other):
# multiplication of two quaternions.
prod = self.x[:, None] * other.x
return self.__class__([(prod[0, 0] - prod[1, 1]
- prod[2, 2] - prod[3, 3]),
(prod[0, 1] + prod[1, 0]
+ prod[2, 3] - prod[3, 2]),
(prod[0, 2] - prod[1, 3]
+ prod[2, 0] + prod[3, 1]),
(prod[0, 3] + prod[1, 2]
- prod[2, 1] + prod[3, 0])])
def as_v_theta(self):
"""Return the v, theta equivalent of the (normalized) quaternion"""
# compute theta
norm = np.sqrt((self.x ** 2).sum(0))
assert(norm != 0)
theta = 2 * np.arccos(self.x[0] / norm)
# compute the unit vector
v = | np.array(self.x[1:], order='F', copy=True) | numpy.array |
import numpy as np
import scipy as sp
import unittest
import pytest
from .context import knockpy
from knockpy import dgp
class TestSampleData(unittest.TestCase):
""" Tests sample_data function """
def test_logistic(self):
np.random.seed(110)
p = 50
dgprocess = dgp.DGP()
X, y, beta, Q, Sigma = dgprocess.sample_data(p=p, y_dist="binomial")
# Test outputs are binary
y_vals = np.unique(y)
np.testing.assert_array_almost_equal(
y_vals,
np.array([0, 1]),
err_msg="Binomial flag not producing binary responses",
)
# Test conditional mean for a single X val - start by
# sampling ys
N = 5000
X_repeated = np.repeat(X[0], N).reshape(p, N).T
ys = dgp.sample_response(X_repeated, beta, y_dist="binomial")
# Then check that the theoretical/empirical mean are the same
cond_mean = 1 / (1 + np.exp(-1 * np.dot(X_repeated[0], beta)))
emp_cond_mean = ys.mean(axis=0)
np.testing.assert_almost_equal(cond_mean, emp_cond_mean, decimal=2)
def test_beta_gen(self):
# Test sparsity
p = 100
dgprocess = dgp.DGP()
_, _, beta, _, _ = dgprocess.sample_data(p=p, sparsity=0.3, coeff_size=0.3,)
self.assertTrue(
(beta != 0).sum() == 30, msg="sparsity parameter yields incorrect sparsity"
)
abs_coefs = np.unique(np.abs(beta[beta != 0]))
np.testing.assert_array_almost_equal(
abs_coefs,
np.array([0.3]),
err_msg="beta generation yields incorrect coefficients",
)
# Test number of selections for groups
sparsity = 0.2
groups = np.concatenate([np.arange(0, 50, 1), np.arange(0, 50, 1)])
dgprocess = dgp.DGP()
_, _, beta, _, _ = dgprocess.sample_data(p=p, sparsity=sparsity, groups=groups,)
# First, test that the correct number of features is chosen
num_groups = np.unique(groups).shape[0]
expected_nonnull_features = sparsity * p
self.assertTrue(
(beta != 0).sum() == expected_nonnull_features,
msg="sparsity for groups chooses incorrect number of features",
)
# Check that the correct number of GROUPS has been chosen
expected_nonnull_groups = sparsity * num_groups
selected_groups = np.unique(groups[beta != 0]).shape[0]
self.assertTrue(
selected_groups == expected_nonnull_groups,
msg="group sparsity parameter does not choose coeffs within a group",
)
def test_y_response(self):
# Sample design matrix, beta
# np.random.seed(100)
n = 100000
p = 10
X = np.random.randn(n, p)
beta = dgp.create_sparse_coefficients(
p=p, sparsity=0.5, coeff_size=1, sign_prob=0.5, coeff_dist="none"
)
beta[0] = 1
beta[1] = -1
beta[2] = 0
# Test if a feature has the expected marginal covariance w y
def test_cov(feature, y, name, expected=1):
ycov = (feature * y).mean()
var = (feature ** 2).mean()
coef = ycov / var
self.assertTrue(
np.abs(coef - expected) < 0.05,
msg=f"when sampling y, {name} cond_mean yields unexpected results ({coef} vs {expected})",
)
# Cond mean 1: linear.
y = dgp.sample_response(X, beta, cond_mean="linear")
test_cov(X[:, 0], y, name="linear")
# Cond mean 2: cubic
y = dgp.sample_response(X, beta, cond_mean="cubic")
feature = np.power(X[:, 0], 3) - X[:, 0]
test_cov(feature, y, name="cubic")
# Cond mean 3: trunclinear
y = dgp.sample_response(X, beta, cond_mean="trunclinear")
feature = X[:, 0] >= 1
mean1 = y[feature].mean()
mean2 = y[~feature].mean()
self.assertTrue(
np.abs(mean1 - mean2 - 1) < 0.05,
msg=f"when sampling y, trunclinear cond_mean yields unexpected results for conditional means {mean1} vs {mean2+1}",
)
# Cond mean 4: pairwise interactions
y = dgp.sample_response(X, beta, cond_mean="pairint")
feature = X[:, 0] * X[:, 1]
test_cov(feature, y, name="pairint", expected=-1)
# Cond mean 5: sin
y = dgp.sample_response(X, beta, cond_mean="cos")
feature = X[:, 0]
test_cov(feature, y, name="cos", expected=0)
feature = X[:, 2]
test_cov(feature, y, name="cos", expected=0)
# Cond mean 6: quadratic
y = dgp.sample_response(X, beta, cond_mean="quadratic")
feature = X[:, 0]
test_cov(feature, y, name="quadratic", expected=0)
def test_coeff_dist(self):
# Test normal
np.random.seed(110)
p = 1000
dgprocess = dgp.DGP()
_, _, beta, _, _ = dgprocess.sample_data(
p=p, sparsity=1, coeff_size=1, coeff_dist="normal", sign_prob=0
)
expected = 0
mean_est = beta.mean()
self.assertTrue(
np.abs(mean_est - expected) < 0.1,
msg=f"coeff_dist (normal) mean is wrong: expected mean 1 but got mean {mean_est}",
)
# Test uniform
| np.random.seed(110) | numpy.random.seed |
"""
This module provides functions that are used to generate
simple dynamical systems
You can simulate your own systems here!
created: 11/07/18 <NAME> (<EMAIL>)
modified: 11/13/18 <NAME> (<EMAIL>)
12/11/18 <NAME> (<EMAIL>)
"""
import numpy as np
import scipy as sp
from scipy import integrate
def linear_dynamics_generator(mtx, x_init, _dt=0.01, len_t=10, noise=0.):
"""
:param mtx: a 2D numpy array (matrix) under which the dynamics evolve
:param x_init: a 1D numpy array specifies the initial states
:param _dt: float, time step
:param len_t: time length of simulation
:param noise: the noise level being added
:return: a numpy array with shape n x len(tSpan)
"""
shape = np.max(x_init.shape)
x_init = x_init.reshape(shape,)
_t = np.arange(0, len_t+_dt, _dt)
sol = sp.integrate.solve_ivp(lambda _, _x: np.dot(mtx, _x), [0, len_t], x_init, t_eval=_t)
_y = sol.y + noise*np.random.rand(shape, len(_t))
return _y
def multi_scale_linear_dynamics_generator(weights, spatial_exp,
temporal_exp, x_scales, t_scales, _dx, _dt):
"""
:param weights: weights of each dynamics
:param spatial_exp: spatial modes exponents
:param temporal_exp: temporal modes exponents
:param x_scales: n x 2 numpy array, provides scale of spatial modes
:param t_scales: n x 2 numpy array, provides scale of temporal modes
:param _dx: spatial discretization
:param _dt: time step
:return: a 2D numpy array represents the multi-scale dynamics (or signal)
"""
shape = np.max(weights.shape)
weights = weights.reshape(shape,)
dim1 = np.max(spatial_exp.shape)
spatial_exp = spatial_exp.reshape(dim1,)
dim2 = np.max(temporal_exp.shape)
temporal_exp = temporal_exp.reshape(dim2, )
assert dim1 == shape and dim2 == shape,\
"weights and exponents provided should be of the same number!"
assert len(x_scales.shape) == 2 and x_scales.shape[1] == 2,\
"x_scale must be a Nx2 numpy array!"
assert len(t_scales.shape) == 2 and t_scales.shape[1] == 2,\
"x_scale must be a Nx2 numpy array!"
assert x_scales.shape[0] == shape and t_scales.shape[0] == shape, \
"number of x_scales and t_scales should be the same as weights! "
# find the boundary
x_min = np.min(x_scales, axis=0)[0]
x_max = np.max(x_scales, axis=0)[1]
t_min = np.min(t_scales, axis=0)[0]
t_max = np.max(t_scales, axis=0)[1]
_x = np.arange(x_min, x_max+_dx, _dx)
_t = np.arange(t_min, t_max+_dt, _dt)
# adjust the scales
differences = x_scales.reshape(1, -1) - _x.reshape(-1, 1)
x_indices = np.abs(differences).argmin(axis=0)
x_scales = np.array([_x[i] for i in x_indices]).reshape(-1, 2)
x_indices = x_indices.reshape(-1, 2)
differences = t_scales.reshape(1, -1) - _t.reshape(-1, 1)
t_indices = np.abs(differences).argmin(axis=0)
t_scales = | np.array([_t[i] for i in t_indices]) | numpy.array |
# Copyright 2020 FZI Forschungszentrum Informatik
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import tensorflow as tf
import numpy as np
import random
class RandomAgent(object):
"""Basic random agent for DeepMind Lab."""
def __init__(self, action_spec, forbidden_actions=[]):
self.action_spec = action_spec
self.action_count = len(action_spec)
self.forbidden_actions = forbidden_actions
self.prev_action = None
def step(self):
"""Choose a random amount of a randomly selected action."""
action_choice = None
while action_choice is None:
action_choice = random.randint(0, self.action_count - 1)
if self.action_spec[action_choice]['name'] in self.forbidden_actions:
action_choice = None
action_amount = random.randint(self.action_spec[action_choice]['min'],
self.action_spec[action_choice]['max'])
action = np.zeros([self.action_count], dtype=np.intc)
action[action_choice] = action_amount
return action
def reset(self):
self.prev_action = None
class RodentDynamicModel(object):
def __init__(self, min_x, min_y, max_x, max_y):
self.size_x = max_x - min_x
self.size_y = max_y - min_y
self.mu_x = min_x + self.size_x/2.
self.mu_y = min_y + self.size_y/2.
self.cov = | np.diag([(self.size_x/3.)**2.0, (self.size_y/3.)**2.]) | numpy.diag |
###############################################################################
# Federated averaging aggregator.
#
# NOTE: source from https://github.com/litian96/FedProx
# Modified by <NAME>, 2020-08
# * adapative fl training with adp_p, adp_q
# * async aggregation with staleness on clients
###############################################################################
import numpy as np
from tqdm import trange, tqdm
import tensorflow as tf
from .fedbase import BaseFedarated
class Server(BaseFedarated):
""" Server class"""
def __init__(self, params, learner, dataset):
""" Initialize the Server
Args:
params: dictionary
learner: learning model class
dataset: tuple
user, group, train_data, test_data
"""
print('Using Federated Average to Train')
self.inner_opt = tf.train.GradientDescentOptimizer(params['learning_rate'])
super(Server, self).__init__(params, learner, dataset)
def train(self):
''' Train using Federated Averaging. '''
for i in trange(self.num_rounds, desc='Round: ', ncols=120):
self.p_clients = self.select_clients(i, num_clients=self.clients_p_round[i])
self.r_clients = self.select_clients(i, num_clients=self.clients_r_round[i])
self.current_seq += 1
csolns = []
# REVIEW: train with participate clients
for c in tqdm(self.p_clients, desc='Client: ', leave=False, ncols=120):
# ignore too old clients
if self.asyn and self.current_seq - c.seq_id > self.window_size:
continue
# REVIEW: set model from the saved_models
c.set_params(self.saved_models[c.seq_id])
# solve minimization locally
soln, stats = c.solve_inner(self.current_seq, num_epochs=self.num_epochs, batch_size=self.batch_size)
# gather solutions from client
csolns.append(soln)
# track communication cost
self.metrics.update(rnd=i, cid=c.id, stats=stats)
# REVIEW: aggregate with momentum
self.latest_model = self.aggregate(csolns, alpha=self.alpha, gamma=(self.num_rounds - i) / self.num_rounds)
self.saved_models[self.current_seq] = self.latest_model
# REVIEW: maintain the size of saved model
if len(self.saved_models.keys()) > self.window_size:
del self.saved_models[min(self.saved_models.keys())]
# eval and std output
if i % self.eval_every == 0:
stats_test = self.test()
stats_train = self.train_error_and_loss()
test_acc = | np.sum(stats_test[3]) | numpy.sum |
# packages
import numpy as np
# project
import forecast.helpers as hlp
import config.parameters as prm
class Sensor():
"""
One Sensor object for each sensor in project.
It keeps track of the algorithm state between events.
When new event_data json is received, iterate algorithm one sample.
"""
def __init__(self, device, device_id, args):
# give to self
self.device = device
self.device_id = device_id
self.args = args
# contains level, trend and season for modelled data
self.model = {
'unixtime': [], # shared unixtime timeaxis
'temperature': [], # temperature values
'level': [], # modeled level
'trend': [], # modeled trend
'season': [], # modeled season
}
# contains all previous forecasts in history
self.forecast = {
'unixtime': [], # shared unixtime timeaxis
'temperature': [], # temperature values
'residual': [], # forecast residual
}
# variables
self.n_samples = 0 # number of event samples received
self.initialised = False
self.residual_std = 0
def new_event_data(self, event_data):
"""
Receive new event from Director and iterate algorithm.
Parameters
----------
event_data : dict
Event json containing temperature data.
"""
# convert timestamp to unixtime
_, unixtime = hlp.convert_event_data_timestamp(event_data['data']['temperature']['updateTime'])
# append new temperature value
self.model['unixtime'].append(unixtime)
self.model['temperature'].append(event_data['data']['temperature']['value'])
self.n_samples += 1
# initialise holt winters
if self.n_samples < prm.season_length * prm.n_seasons_init:
return
elif not self.initialised:
self.__initialise_holt_winters()
else:
# iterate Holt-Winters
self.__iterate_holt_winters()
# forecast
self.__model_forecast()
def __initialise_holt_winters(self):
"""
Calculate initial level, trend and seasonal component.
Based on: https://robjhyndman.com/hyndsight/hw-initialization/
"""
# convert to numpy array for indexing
temperature = np.array(self.model['temperature'])
# fit a 3xseason moving average to temperature
ma = np.zeros(self.n_samples)
for i in range(self.n_samples):
# define ma interval
xl = max(0, i - int(1.5*prm.season_length))
xr = min(self.n_samples, i + int(1.5*prm.season_length+1))
# mean
ma[i] = np.mean(temperature[xl:xr])
# subtract moving average
df = temperature - ma
# generate average seasonal component
avs = []
for i in range(prm.season_length):
avs.append(np.mean([df[i+j*prm.season_length] for j in range(prm.n_seasons_init)]))
# expand average season into own seasonal component
for i in range(self.n_samples):
self.model['season'].append(avs[i%len(avs)])
# subtract initial season from original temperature to get adjusted temperature
adjusted = temperature - | np.array(self.model['season']) | numpy.array |
from copy import deepcopy
from astropy.io.fits import hdu
import numpy as np
import multiprocessing as mp
import six
import scipy
from scipy import fftpack
from scipy.ndimage import fourier_shift
from scipy.ndimage.interpolation import rotate
from astropy.convolution import convolve, convolve_fft
from astropy.io import fits
from poppy.utils import krebin
from .utils import S
# Program bar
from tqdm.auto import trange, tqdm
import logging
_log = logging.getLogger('webbpsf_ext')
###########################################################################
# Image manipulation
###########################################################################
def fshift(inarr, delx=0, dely=0, pad=False, cval=0.0, interp='linear', **kwargs):
""" Fractional image shift
Ported from IDL function fshift.pro.
Routine to shift an image by non-integer values.
Parameters
----------
inarr: ndarray
1D, or 2D array to be shifted. Can also be an image
cube assume with shape [nz,ny,nx].
delx : float
shift in x (same direction as IDL SHIFT function)
dely: float
shift in y
pad : bool
Should we pad the array before shifting, then truncate?
Otherwise, the image is wrapped.
cval : sequence or float, optional
The values to set the padded values for each axis. Default is 0.
((before_1, after_1), ... (before_N, after_N)) unique pad constants for each axis.
((before, after),) yields same before and after constants for each axis.
(constant,) or int is a shortcut for before = after = constant for all axes.
interp : str
Type of interpolation to use during the sub-pixel shift. Valid values are
'linear', 'cubic', and 'quintic'.
Returns
-------
ndarray
Shifted image
"""
from scipy.interpolate import interp1d, interp2d
shape = inarr.shape
ndim = len(shape)
if ndim == 1:
# Return if delx is 0
if np.isclose(delx, 0, atol=1e-5):
return inarr
# separate shift into an integer and fraction shift
intx = np.int(delx)
fracx = delx - intx
if fracx < 0:
fracx += 1
intx -= 1
# Pad ends with constant value
if pad:
padx = np.abs(intx) + 5
out = np.pad(inarr,np.abs(intx),'constant',constant_values=cval)
else:
padx = 0
out = inarr.copy()
# shift by integer portion
out = np.roll(out, intx)
# if significant fractional shift...
if not np.isclose(fracx, 0, atol=1e-5):
if interp=='linear':
out = out * (1.-fracx) + np.roll(out,1) * fracx
elif interp=='cubic':
xvals = np.arange(len(out))
fint = interp1d(xvals, out, kind=interp, bounds_error=False, fill_value='extrapolate')
out = fint(xvals+fracx)
elif interp=='quintic':
xvals = np.arange(len(out))
fint = interp1d(xvals, out, kind=5, bounds_error=False, fill_value='extrapolate')
out = fint(xvals+fracx)
else:
raise ValueError(f'interp={interp} not recognized.')
out = out[padx:padx+inarr.size]
elif ndim == 2:
# Return if both delx and dely are 0
if np.isclose(delx, 0, atol=1e-5) and np.isclose(dely, 0, atol=1e-5):
return inarr
ny, nx = shape
# separate shift into an integer and fraction shift
intx = np.int(delx)
inty = np.int(dely)
fracx = delx - intx
fracy = dely - inty
if fracx < 0:
fracx += 1
intx -= 1
if fracy < 0:
fracy += 1
inty -= 1
# Pad ends with constant value
if pad:
padx = np.abs(intx) + 5
pady = np.abs(inty) + 5
pad_vals = ([pady]*2,[padx]*2)
out = np.pad(inarr,pad_vals,'constant',constant_values=cval)
else:
padx = 0; pady = 0
out = inarr.copy()
# shift by integer portion
out = np.roll(np.roll(out, intx, axis=1), inty, axis=0)
# Check if fracx and fracy are effectively 0
fxis0 = np.isclose(fracx,0, atol=1e-5)
fyis0 = np.isclose(fracy,0, atol=1e-5)
# If fractional shifts are significant
# use bi-linear interpolation between four pixels
if interp=='linear':
if not (fxis0 and fyis0):
# Break bi-linear interpolation into four parts
# to avoid NaNs unnecessarily affecting integer shifted dimensions
part1 = out * ((1-fracx)*(1-fracy))
part2 = 0 if fyis0 else np.roll(out,1,axis=0)*((1-fracx)*fracy)
part3 = 0 if fxis0 else np.roll(out,1,axis=1)*((1-fracy)*fracx)
part4 = 0 if (fxis0 or fyis0) else np.roll(np.roll(out, 1, axis=1), 1, axis=0) * fracx*fracy
out = part1 + part2 + part3 + part4
elif interp=='cubic' or interp=='quintic':
fracx = 0 if fxis0 else fracx
fracy = 0 if fxis0 else fracy
y = np.arange(out.shape[0])
x = np.arange(out.shape[1])
fint = interp2d(x, y, out, kind=interp)
out = fint(x-fracx, y-fracy)
else:
raise ValueError(f'interp={interp} not recognized.')
out = out[pady:pady+ny, padx:padx+nx]
elif ndim == 3:
# Perform shift on each image in succession
kwargs['delx'] = delx
kwargs['dely'] = dely
kwargs['pad'] = pad
kwargs['cval'] = cval
kwargs['interp'] = interp
out = np.array([fshift(im, **kwargs) for im in inarr])
else:
raise ValueError(f'Found {ndim} dimensions {shape}. Only up to 3 dimensions allowed.')
return out
def fourier_imshift(image, xshift, yshift, pad=False, cval=0.0, **kwargs):
"""Fourier shift image
Shift an image by use of Fourier shift theorem
Parameters
----------
image : ndarray
2D image or 3D image cube [nz,ny,nx].
xshift : float
Number of pixels to shift image in the x direction
yshift : float
Number of pixels to shift image in the y direction
pad : bool
Should we pad the array before shifting, then truncate?
Otherwise, the image is wrapped.
cval : sequence or float, optional
The values to set the padded values for each axis. Default is 0.
((before_1, after_1), ... (before_N, after_N)) unique pad constants for each axis.
((before, after),) yields same before and after constants for each axis.
(constant,) or int is a shortcut for before = after = constant for all axes.
Returns
-------
ndarray
Shifted image
"""
shape = image.shape
ndim = len(shape)
if ndim==2:
ny, nx = shape
# Pad ends with zeros
if pad:
padx = np.abs(np.int(xshift)) + 5
pady = np.abs(np.int(yshift)) + 5
pad_vals = ([pady]*2,[padx]*2)
im = np.pad(image,pad_vals,'constant',constant_values=cval)
else:
padx = 0; pady = 0
im = image
offset = fourier_shift( np.fft.fft2(im), (yshift,xshift) )
offset = np.fft.ifft2(offset).real
offset = offset[pady:pady+ny, padx:padx+nx]
elif ndim==3:
kwargs['pad'] = pad
kwargs['cval'] = cval
offset = np.array([fourier_imshift(im, xshift, yshift, **kwargs) for im in image])
else:
raise ValueError(f'Found {ndim} dimensions {shape}. Only up 2 or 3 dimensions allowed.')
return offset
def pad_or_cut_to_size(array, new_shape, fill_val=0.0, offset_vals=None,
shift_func=fshift, **kwargs):
"""
Resize an array to a new shape by either padding with zeros
or trimming off rows and/or columns. The output shape can
be of any arbitrary amount.
Parameters
----------
array : ndarray
A 1D, 2D, or 3D array. If 3D, then taken to be a stack of images
that are cropped or expanded in the same fashion.
new_shape : tuple
Desired size for the output array. For 2D case, if a single value,
then will create a 2-element tuple of the same value.
fill_val : scalar, optional
Value to pad borders. Default is 0.0
offset_vals : tuple
Option to perform image shift in the (xpix) direction for 1D,
or (ypix,xpix) direction for 2D/3D prior to cropping or expansion.
shift_func : function
Function to use for shifting. Usually either `fshift` or `fourier_imshift`.
Returns
-------
output : ndarray
An array of size new_shape that preserves the central information
of the input array.
"""
shape_orig = array.shape
ndim = len(shape_orig)
if ndim == 1:
# is_1d = True
# Reshape array to a 2D array with nx=1
array = array.reshape((1,1,-1))
nz, ny, nx = array.shape
if isinstance(new_shape, (float,int,np.int,np.int64)):
nx_new = int(new_shape+0.5)
ny_new = 1
new_shape = (ny_new, nx_new)
elif len(new_shape) < 2:
nx_new = new_shape[0]
ny_new = 1
new_shape = (ny_new, nx_new)
else:
ny_new, nx_new = new_shape
output = np.zeros(shape=(nz,ny_new,nx_new), dtype=array.dtype)
elif (ndim == 2) or (ndim == 3):
if ndim==2:
nz = 1
ny, nx = array.shape
array = array.reshape([nz,ny,nx])
else:
nz, ny, nx = array.shape
if isinstance(new_shape, (float,int,np.int,np.int64)):
ny_new = nx_new = int(new_shape+0.5)
new_shape = (ny_new, nx_new)
elif len(new_shape) < 2:
ny_new = nx_new = new_shape[0]
new_shape = (ny_new, nx_new)
else:
ny_new, nx_new = new_shape
output = np.zeros(shape=(nz,ny_new,nx_new), dtype=array.dtype)
else:
raise ValueError(f'Found {ndim} dimensions {shape_orig}. Only up to 3 dimensions allowed.')
# Return if no difference in shapes
# This needs to occur after the above so that new_shape is verified to be a tuple
# If offset_vals is set, then continue to perform shift function
if (array.shape == new_shape) and (offset_vals is None):
return array
# Input the fill values
if fill_val != 0:
output += fill_val
# Pixel shift values
if offset_vals is not None:
if ndim == 1:
ny_off = 0
if isinstance(offset_vals, (float,int,np.int,np.int64)):
nx_off = offset_vals
elif len(offset_vals) < 2:
nx_off = offset_vals[0]
else:
raise ValueError('offset_vals should be a single value.')
else:
if len(offset_vals) == 2:
ny_off, nx_off = offset_vals
else:
raise ValueError('offset_vals should have two values.')
else:
nx_off = ny_off = 0
if nx_new>nx:
n0 = (nx_new - nx) / 2
n1 = n0 + nx
elif nx>nx_new:
n0 = (nx - nx_new) / 2
n1 = n0 + nx_new
else:
n0, n1 = (0, nx)
n0 = int(n0+0.5)
n1 = int(n1+0.5)
if ny_new>ny:
m0 = (ny_new - ny) / 2
m1 = m0 + ny
elif ny>ny_new:
m0 = (ny - ny_new) / 2
m1 = m0 + ny_new
else:
m0, m1 = (0, ny)
m0 = int(m0+0.5)
m1 = int(m1+0.5)
if (nx_new>=nx) and (ny_new>=ny):
#print('Case 1')
output[:,m0:m1,n0:n1] = array.copy()
for i, im in enumerate(output):
output[i] = shift_func(im, nx_off, ny_off, pad=True, cval=fill_val, **kwargs)
elif (nx_new<=nx) and (ny_new<=ny):
#print('Case 2')
if (nx_off!=0) or (ny_off!=0):
array_temp = array.copy()
for i, im in enumerate(array_temp):
array_temp[i] = shift_func(im, nx_off, ny_off, pad=True, cval=fill_val, **kwargs)
output = array_temp[:,m0:m1,n0:n1]
else:
output = array[:,m0:m1,n0:n1]
elif (nx_new<=nx) and (ny_new>=ny):
#print('Case 3')
if nx_off!=0:
array_temp = array.copy()
for i, im in enumerate(array_temp):
array_temp[i] = shift_func(im, nx_off, 0, pad=True, cval=fill_val, **kwargs)
output[:,m0:m1,:] = array_temp[:,:,n0:n1]
else:
output[:,m0:m1,:] = array[:,:,n0:n1]
for i, im in enumerate(output):
output[i] = shift_func(im, 0, ny_off, pad=True, cval=fill_val, **kwargs)
elif (nx_new>=nx) and (ny_new<=ny):
#print('Case 4')
if ny_off!=0:
array_temp = array.copy()
for i, im in enumerate(array_temp):
array_temp[i] = shift_func(im, 0, ny_off, pad=True, cval=fill_val, **kwargs)
output[:,:,n0:n1] = array_temp[:,m0:m1,:]
else:
output[:,:,n0:n1] = array[:,m0:m1,:]
for i, im in enumerate(output):
output[i] = shift_func(im, nx_off, 0, pad=True, cval=fill_val, **kwargs)
# Flatten if input and output arrays are 1D
if (ndim==1) and (ny_new==1):
output = output.flatten()
elif ndim==2:
output = output[0]
return output
def rotate_offset(data, angle, cen=None, cval=0.0, order=1,
reshape=True, recenter=True, shift_func=fshift, **kwargs):
"""Rotate and offset an array.
Same as `rotate` in `scipy.ndimage.interpolation` except that it
rotates around a center point given by `cen` keyword.
The array is rotated in the plane defined by the two axes given by the
`axes` parameter using spline interpolation of the requested order.
Default rotation is clockwise direction.
Parameters
----------
data : ndarray
The input array.
angle : float
The rotation angle in degrees (rotates in clockwise direction).
cen : tuple
Center location around which to rotate image.
Values are expected to be `(xcen, ycen)`.
recenter : bool
Do we want to reposition so that `cen` is the image center?
shift_func : function
Function to use for shifting. Usually either `fshift` or `fourier_imshift`.
Keyword Args
------------
axes : tuple of 2 ints, optional
The two axes that define the plane of rotation. Default is the first
two axes.
reshape : bool, optional
If `reshape` is True, the output shape is adapted so that the input
array is contained completely in the output. Default is True.
order : int, optional
The order of the spline interpolation, default is 1.
The order has to be in the range 0-5.
mode : str, optional
Points outside the boundaries of the input are filled according
to the given mode ('constant', 'nearest', 'reflect', 'mirror' or 'wrap').
Default is 'constant'.
cval : scalar, optional
Value used for points outside the boundaries of the input if
``mode='constant'``. Default is 0.0
prefilter : bool, optional
The parameter prefilter determines if the input is pre-filtered with
`spline_filter` before interpolation (necessary for spline
interpolation of order > 1). If False, it is assumed that the input is
already filtered. Default is True.
Returns
-------
rotate : ndarray or None
The rotated data.
"""
# Return input data if angle is set to None or 0
# and if
if ((angle is None) or (angle==0)) and (cen is None):
return data
shape_orig = data.shape
ndim = len(shape_orig)
if ndim==2:
ny, nx = shape_orig
nz = 1
elif ndim==3:
nz, ny, nx = shape_orig
else:
raise ValueError(f'Found {ndim} dimensions {shape_orig}. Only 2 or 3 dimensions allowed.')
if 'axes' not in kwargs.keys():
kwargs['axes'] = (2,1)
kwargs['order'] = order
kwargs['cval'] = cval
xcen, ycen = (nx/2, ny/2)
if cen is None:
cen = (xcen, ycen)
xcen_new, ycen_new = cen
delx, dely = (xcen-xcen_new, ycen-ycen_new)
# Reshape into a 3D array if nz=1
data = data.reshape([nz,ny,nx])
# Return rotate function if rotating about center
if np.allclose((delx, dely), 0, atol=1e-5):
return rotate(data, angle, reshape=reshape, **kwargs).squeeze()
# fshift interp type
if order <=1:
interp='linear'
elif order <=3:
interp='cubic'
else:
interp='quintic'
# Pad and then shift array
new_shape = (int(ny+2*abs(dely)), int(nx+2*abs(delx)))
images_shift = []
for im in data:
im_pad = pad_or_cut_to_size(im, new_shape, fill_val=cval)
im_new = shift_func(im_pad, delx, dely, cval=cval, interp=interp)
images_shift.append(im_new)
images_shift = np.asarray(images_shift)
# Remove additional dimension in the case of single image
#images_shift = images_shift.squeeze()
# Rotate images
# TODO: Should reshape=True or reshape=reshape?
images_shrot = rotate(images_shift, angle, reshape=True, **kwargs)
if reshape:
return images_shrot.squeeze()
else:
# Shift back to it's location
if recenter:
images_rot = images_shrot
else:
images_rot = []
for im in images_shrot:
im_new = shift_func(im, -1*delx, -1*dely, pad=True, cval=cval, interp=interp)
images_rot.append(im_new)
images_rot = np.asarray(images_rot)
images_fin = []
for im in images_rot:
im_new = pad_or_cut_to_size(im, (ny,nx))
images_fin.append(im_new)
images_fin = np.asarray(images_fin)
return images_fin.squeeze()
def frebin(image, dimensions=None, scale=None, total=True):
"""Fractional rebin
Python port from the IDL frebin.pro
Shrink or expand the size of a 1D or 2D array by an arbitary amount
using bilinear interpolation. Conserves flux by ensuring that each
input pixel is equally represented in the output array. Can also input
an image cube.
Parameters
----------
image : ndarray
Input image ndarray (1D, 2D). Can also be an image
cube assumed to have shape [nz,ny,nx].
dimensions : tuple or None
Desired size of output array (take priority over scale).
scale : tuple or None
Factor to scale output array size. A scale of 2 will increase
the number of pixels by 2 (ie., finer pixel scale).
total : bool
Conserves the surface flux. If True, the output pixels
will be the sum of pixels within the appropriate box of
the input image. Otherwise, they will be the average.
Returns
-------
ndarray
The binned ndarray
"""
shape = image.shape
ndim = len(shape)
if ndim>2:
ndim_temp = 2
sh_temp = shape[-2:]
else:
ndim_temp = ndim
sh_temp = shape
if dimensions is not None:
if isinstance(dimensions, float):
dimensions = [int(dimensions)] * ndim_temp
elif isinstance(dimensions, int):
dimensions = [dimensions] * ndim_temp
elif len(dimensions) != ndim_temp:
raise RuntimeError("The number of input dimensions don't match the image shape.")
elif scale is not None:
if isinstance(scale, float) or isinstance(scale, int):
dimensions = list(map(int, map(lambda x: x+0.5, map(lambda x: x*scale, sh_temp))))
elif len(scale) != ndim_temp:
raise RuntimeError("The number of input dimensions don't match the image shape.")
else:
dimensions = [scale[i]*sh_temp[i] for i in range(len(scale))]
else:
raise RuntimeError('Incorrect parameters to rebin.\n\frebin(image, dimensions=(x,y))\n\frebin(image, scale=a')
#print(dimensions)
if ndim==1:
nlout = 1
nsout = dimensions[0]
nsout = int(nsout+0.5)
dimensions = [nsout]
elif ndim==2:
nlout, nsout = dimensions
nlout = int(nlout+0.5)
nsout = int(nsout+0.5)
dimensions = [nlout, nsout]
elif ndim==3:
kwargs = {'dimensions': dimensions, 'scale': scale, 'total': total}
result = np.array([frebin(im, **kwargs) for im in image])
return result
elif ndim > 3:
raise ValueError(f'Found {ndim} dimensions {shape}. Only up to 3 dimensions allowed.')
if nlout != 1:
nl = shape[0]
ns = shape[1]
else:
nl = nlout
ns = shape[0]
sbox = ns / float(nsout)
lbox = nl / float(nlout)
#print(sbox,lbox)
# Contract by integer amount
if (sbox.is_integer()) and (lbox.is_integer()):
image = image.reshape((nl,ns))
result = krebin(image, (nlout,nsout))
if not total:
result /= (sbox*lbox)
if nl == 1:
return result[0,:]
else:
return result
ns1 = ns - 1
nl1 = nl - 1
if nl == 1:
#1D case
_log.debug("Rebinning to Dimension: %s" % nsout)
result = np.zeros(nsout)
for i in range(nsout):
rstart = i * sbox
istart = int(rstart)
rstop = rstart + sbox
if int(rstop) < ns1:
istop = int(rstop)
else:
istop = ns1
frac1 = float(rstart) - istart
frac2 = 1.0 - (rstop - istop)
#add pixel values from istart to istop and subtract fraction pixel
#from istart to rstart and fraction pixel from rstop to istop
result[i] = np.sum(image[istart:istop + 1]) - frac1 * image[istart] - frac2 * image[istop]
if total:
return result
else:
return result / (float(sbox) * lbox)
else:
_log.debug("Rebinning to Dimensions: %s, %s" % tuple(dimensions))
#2D case, first bin in second dimension
temp = np.zeros((nlout, ns))
result = np.zeros((nsout, nlout))
#first lines
for i in range(nlout):
rstart = i * lbox
istart = int(rstart)
rstop = rstart + lbox
if int(rstop) < nl1:
istop = int(rstop)
else:
istop = nl1
frac1 = float(rstart) - istart
frac2 = 1.0 - (rstop - istop)
if istart == istop:
temp[i, :] = (1.0 - frac1 - frac2) * image[istart, :]
else:
temp[i, :] = np.sum(image[istart:istop + 1, :], axis=0) -\
frac1 * image[istart, :] - frac2 * image[istop, :]
temp = np.transpose(temp)
#then samples
for i in range(nsout):
rstart = i * sbox
istart = int(rstart)
rstop = rstart + sbox
if int(rstop) < ns1:
istop = int(rstop)
else:
istop = ns1
frac1 = float(rstart) - istart
frac2 = 1.0 - (rstop - istop)
if istart == istop:
result[i, :] = (1. - frac1 - frac2) * temp[istart, :]
else:
result[i, :] = np.sum(temp[istart:istop + 1, :], axis=0) -\
frac1 * temp[istart, :] - frac2 * temp[istop, :]
if total:
return np.transpose(result)
else:
return np.transpose(result) / (sbox * lbox)
def image_rescale(HDUlist_or_filename, pixscale_out, pixscale_in=None,
dist_in=None, dist_out=None, cen_star=True, shape_out=None):
""" Rescale image flux
Scale the flux and rebin an image to some new pixel scale and distance.
The object's physical units (AU) are assumed to be constant, so the
total angular size changes if the distance to the object changes.
IT IS RECOMMENDED THAT UNITS BE IN PHOTONS/SEC/PIXEL (not mJy/arcsec)
Parameters
==========
HDUlist_or_filename : HDUList or str
Input either an HDUList or file name.
pixscale_out : float
Desired pixel scale (asec/pix) of returned image. Will be saved in header info.
Keyword Args
============
pixscale_in : float or None
Input image pixel scale. If None, then tries to grab info from the header.
dist_in : float
Input distance (parsec) of original object. If not set, then we look for
the header keywords 'DISTANCE' or 'DIST'.
dist_out : float
Output distance (parsec) of object in image. Will be saved in header info.
If not set, then assumed to be same as input distance.
cen_star : bool
Is the star placed in the central pixel? If so, then the stellar flux is
assumed to be a single pixel that is equal to the maximum flux in the
image. Rather than rebinning that pixel, the total flux is pulled out
and re-added to the central pixel of the final image.
shape_out : tuple, int, or None
Desired size for the output array (ny,nx). If a single value, then will
create a 2-element tuple of the same value.
Returns
=======
HDUlist of the new image.
"""
if isinstance(HDUlist_or_filename, six.string_types):
hdulist = fits.open(HDUlist_or_filename)
elif isinstance(HDUlist_or_filename, fits.HDUList):
hdulist = HDUlist_or_filename
else:
raise ValueError("Input must be a filename or HDUlist")
header = hdulist[0].header
# Try to update input pixel scale if it exists in header
if pixscale_in is None:
key_test = ['PIXELSCL','PIXSCALE']
for k in key_test:
if k in header:
pixscale_in = header[k]
if pixscale_in is None:
raise KeyError("Cannot determine input image pixel scale.")
# Try to update input distance if it exists in header
if dist_in is None:
key_test = ['DISTANCE','DIST']
for k in key_test:
if k in header:
dist_in = header[k]
# If output distance is not set, set to input distance
if dist_out is None:
dist_out = 'None' if dist_in is None else dist_in
fratio = 1
elif dist_in is None:
raise ValueError('Input distance should not be None if output distance is specified.')
else:
fratio = dist_in / dist_out
# Scale the input flux by inverse square law
image = (hdulist[0].data) * fratio**2
# If we move the image closer while assuming same number of pixels with
# the same AU/pixel, then this implies we've increased the angle that
# the image subtends. So, each pixel would have a larger angular size.
# New image scale in arcsec/pixel
imscale_new = pixscale_in * fratio
# Before rebinning, we want the flux in the central pixel to
# always be in the central pixel (the star). So, let's save
# and remove that flux then add back after the rebinning.
if cen_star:
mask_max = image==image.max()
star_flux = image[mask_max][0]
image[mask_max] = 0
# Rebin the image to get a pixel scale that oversamples the detector pixels
fact = imscale_new / pixscale_out
image_new = frebin(image, scale=fact)
# Restore stellar flux to the central pixel.
ny, nx = image_new.shape
if cen_star:
image_new[ny//2, nx//2] += star_flux
if shape_out is not None:
image_new = pad_or_cut_to_size(image_new, shape_out)
hdu_new = fits.PrimaryHDU(image_new)
hdu_new.header = hdulist[0].header.copy()
hdulist_new = fits.HDUList([hdu_new])
hdulist_new[0].header['PIXELSCL'] = (pixscale_out, 'arcsec/pixel')
hdulist_new[0].header['PIXSCALE'] = (pixscale_out, 'arcsec/pixel')
hdulist_new[0].header['DISTANCE'] = (dist_out, 'parsecs')
return hdulist_new
def model_to_hdulist(args_model, sp_star, bandpass):
"""HDUList from model FITS file.
Convert disk model to an HDUList with units of photons/sec/pixel.
If observed filter is different than input filter, we assume that
the disk has a flat scattering, meaning it scales with stellar
continuum. Pixel sizes and distances are left unchanged, and
stored in header.
Parameters
----------
args_model - tuple
Arguments describing the necessary model information:
- fname : Name of model file or an HDUList
- scale0 : Pixel scale (in arcsec/pixel)
- dist0 : Assumed model distance
- wave_um : Wavelength of observation
- units0 : Assumed flux units (e.g., MJy/arcsec^2 or muJy/pixel)
sp_star : :mod:`pysynphot.spectrum`
A pysynphot spectrum of central star. Used to adjust observed
photon flux if filter differs from model input
bandpass : :mod:`pysynphot.obsbandpass`
Output `Pysynphot` bandpass from instrument class. This corresponds
to the flux at the entrance pupil for the particular filter.
"""
#filt, mask, pupil = args_inst
fname, scale0, dist0, wave_um, units0 = args_model
wave0 = wave_um * 1e4
#### Read in the image, then convert from mJy/arcsec^2 to photons/sec/pixel
if isinstance(fname, fits.HDUList):
hdulist = fname
else:
# Open file
hdulist = fits.open(fname)
# Get rid of any non-standard header keywords
hdu = fits.PrimaryHDU(hdulist[0].data)
for k in hdulist[0].header.keys():
try:
hdu.header[k] = hdulist[0].header[k]
except ValueError:
pass
hdulist = fits.HDUList(hdu)
# Break apart units0
units_list = units0.split('/')
if 'mJy' in units_list[0]:
units_pysyn = S.units.mJy()
elif 'uJy' in units_list[0]:
units_pysyn = S.units.muJy()
elif 'nJy' in units_list[0]:
units_pysyn = S.units.nJy()
elif 'MJy' in units_list[0]:
hdulist[0].data *= 1000 # Convert to Jy
units_pysyn = S.units.Jy()
elif 'Jy' in units_list[0]: # Jy should be last
units_pysyn = S.units.Jy()
else:
errstr = "Do not recognize units0='{}'".format(units0)
raise ValueError(errstr)
# Convert from input units to photlam (photons/sec/cm^2/A/angular size)
im = units_pysyn.ToPhotlam(wave0, hdulist[0].data)
# We assume scattering is flat in photons/sec/A
# This means everything scales with stellar continuum
sp_star.convert('photlam')
wstar, fstar = (sp_star.wave/1e4, sp_star.flux)
# Compare observed wavelength to image wavelength
wobs_um = bandpass.avgwave() / 1e4 # Current bandpass wavelength
wdel = np.linspace(-0.1,0.1)
f_obs = np.interp(wobs_um+wdel, wstar, fstar)
f0 = np.interp(wave_um+wdel, wstar, fstar)
im *= np.mean(f_obs / f0)
# Convert to photons/sec/pixel
im *= bandpass.equivwidth() * S.refs.PRIMARY_AREA
# If input units are per arcsec^2 then scale by pixel scale
# This will be ph/sec for each oversampled pixel
if ('arcsec' in units_list[1]) or ('asec' in units_list[1]):
im *= scale0**2
elif 'mas' in units_list[1]:
im *= (scale0*1000)**2
# Save into HDUList
hdulist[0].data = im
hdulist[0].header['UNITS'] = 'photons/sec'
hdulist[0].header['PIXELSCL'] = (scale0, 'arcsec/pixel')
hdulist[0].header['PIXSCALE'] = (scale0, 'arcsec/pixel') # Alternate keyword
hdulist[0].header['DISTANCE'] = (dist0, 'parsecs')
return hdulist
def distort_image(hdulist_or_filename, ext=0, to_frame='sci', fill_value=0,
xnew_coords=None, ynew_coords=None, return_coords=False,
aper=None, sci_cen=None, pixelscale=None, oversamp=None):
""" Distort an image
Apply SIAF instrument distortion to an image that is assumed to be in
its ideal coordinates. The header information should contain the relevant
SIAF point information, such as SI instrument, aperture name, pixel scale,
detector oversampling, and detector position ('sci' coords).
This function then transforms the image to the new coordinate system using
scipy's RegularGridInterpolator (linear interpolation).
Parameters
----------
hdulist_or_filename : str or HDUList
A PSF from WebbPSF, either as an HDUlist object or as a filename
ext : int
Extension of HDUList to perform distortion on.
fill_value : float or None
Value used to fill in any blank space by the skewed PSF. Default = 0.
If set to None, values outside the domain are extrapolated.
to_frame : str
Type of input coordinates.
* 'tel': arcsecs V2,V3
* 'sci': pixels, in conventional DMS axes orientation
* 'det': pixels, in raw detector read out axes orientation
* 'idl': arcsecs relative to aperture reference location.
xnew_coords : None or ndarray
Array of x-values in new coordinate frame to interpolate onto.
Can be a 1-dimensional array of unique values, in which case
the final image will be of size (ny_new, nx_new). Or a 2d array
that corresponds to full regular grid and has same shape as
`ynew_coords` (ny_new, nx_new). If set to None, then final image
is same size as input image, and coordinate grid spans the min
and max values of siaf_ap.convert(xidl,yidl,'idl',to_frame).
ynew_coords : None or ndarray
Array of y-values in new coordinate frame to interpolate onto.
Can be a 1-dimensional array of unique values, in which case
the final image will be of size (ny_new, nx_new). Or a 2d array
that corresponds to full regular grid and has same shape as
`xnew_coords` (ny_new, nx_new). If set to None, then final image
is same size as input image, and coordinate grid spans the min
and max values of siaf_ap.convert(xidl,yidl,'idl',to_frame).
return_coords : bool
In addition to returning the final image, setting this to True
will return the full set of new coordinates. Output will then
be (psf_new, xnew, ynew), where all three array have the same
shape.
aper : None or :mod:`pysiaf.Aperture`
Option to pass the SIAF aperture if it is already known or
specified to save time on generating a new one. If set to None,
then automatically determines a new `pysiaf` aperture based on
information stored in the header.
sci_cen : tuple or None
Science pixel values associated with center of array. If set to
None, then will grab values from DET_X and DET_Y header keywords.
pixelscale : float or None
Pixel scale of input image in arcsec/pixel. If set to None, then
will search for PIXELSCL and PIXSCALE keywords in header.
oversamp : int or None
Oversampling of input image relative to native detector pixel scale.
If set to None, will search for OSAMP and DET_SAMP keywords.
"""
import pysiaf
from scipy.interpolate import RegularGridInterpolator
def _get_default_siaf(instrument, aper_name):
# Create new naming because SIAF requires special capitalization
if instrument == "NIRCAM":
siaf_name = "NIRCam"
elif instrument == "NIRSPEC":
siaf_name = "NIRSpec"
else:
siaf_name = instrument
# Select a single SIAF aperture
siaf = pysiaf.Siaf(siaf_name)
aper = siaf.apertures[aper_name]
return aper
# Read in input PSF
if isinstance(hdulist_or_filename, str):
hdu_list = fits.open(hdulist_or_filename)
elif isinstance(hdulist_or_filename, fits.HDUList):
hdu_list = hdulist_or_filename
else:
raise ValueError("input must be a filename or HDUlist")
if aper is None:
# Log instrument and detector names
instrument = hdu_list[0].header["INSTRUME"].upper()
aper_name = hdu_list[0].header["APERNAME"].upper()
# Pull default values
aper = _get_default_siaf(instrument, aper_name)
# Pixel scale information
ny, nx = hdu_list[ext].shape
if pixelscale is None:
# Pixel scale of input image
try: pixelscale = hdu_list[ext].header["PIXELSCL"]
except: pixelscale = hdu_list[ext].header["PIXSCALE"]
if oversamp is None:
# Image oversampling relative to detector
try: oversamp = hdu_list[ext].header["OSAMP"]
except: oversamp = hdu_list[ext].header["DET_SAMP"]
# Get 'sci' reference location where PSF is observed
if sci_cen is None:
xsci_cen = hdu_list[ext].header["DET_X"] # center x location in pixels ('sci')
ysci_cen = hdu_list[ext].header["DET_Y"] # center y location in pixels ('sci')
else:
xsci_cen, ysci_cen = sci_cen
# ###############################################
# Create an array of indices (in pixels) for where the PSF is located on the detector
nx_half, ny_half = ( (nx-1)/2., (ny-1)/2. )
xlin = np.linspace(-1*nx_half, nx_half, nx)
ylin = np.linspace(-1*ny_half, ny_half, ny)
xarr, yarr = np.meshgrid(xlin, ylin)
# Convert the PSF center point from pixels to arcseconds using pysiaf
xidl_cen, yidl_cen = aper.sci_to_idl(xsci_cen, ysci_cen)
# Get 'idl' coords
xidl = xarr * pixelscale + xidl_cen
yidl = yarr * pixelscale + yidl_cen
# ###############################################
# Create an array of indices (in pixels) that the final data will be interpolated onto
xnew_cen, ynew_cen = aper.convert(xsci_cen, ysci_cen, 'sci', to_frame)
# If new x and y values are specified, create a meshgrid
if (xnew_coords is not None) and (ynew_coords is not None):
if len(xnew_coords.shape)==1 and len(ynew_coords.shape)==1:
xnew, ynew = np.meshgrid(xnew_coords, ynew_coords)
elif len(xnew_coords.shape)==2 and len(ynew_coords.shape)==2:
assert xnew_coords.shape==ynew_coords.shape, "If new x and y inputs are a grid, must be same shapes"
xnew, ynew = xnew_coords, ynew_coords
elif to_frame=='sci':
xnew = xarr / oversamp + xnew_cen
ynew = yarr / oversamp + ynew_cen
else:
xv, yv = aper.convert(xidl, yidl, 'idl', to_frame)
xmin, xmax = (xv.min(), xv.max())
ymin, ymax = (yv.min(), yv.max())
# Range xnew from 0 to 1
xnew = xarr - xarr.min()
xnew /= xnew.max()
# Set to xmin to xmax
xnew = xnew * (xmax - xmin) + xmin
# Make sure center value is xnew_cen
xnew += xnew_cen - np.median(xnew)
# Range ynew from 0 to 1
ynew = yarr - yarr.min()
ynew /= ynew.max()
# Set to ymin to ymax
ynew = ynew * (ymax - ymin) + ymin
# Make sure center value is xnew_cen
ynew += ynew_cen - np.median(ynew)
# Convert requested coordinates to 'idl' coordinates
xnew_idl, ynew_idl = aper.convert(xnew, ynew, to_frame, 'idl')
# ###############################################
# Interpolate using Regular Grid Interpolator
xvals = xlin * pixelscale + xidl_cen
yvals = ylin * pixelscale + yidl_cen
func = RegularGridInterpolator((yvals,xvals), hdu_list[ext].data, method='linear',
bounds_error=False, fill_value=fill_value)
# Create an array of (yidl, xidl) values to interpolate onto
pts = np.array([ynew_idl.flatten(),xnew_idl.flatten()]).transpose()
psf_new = func(pts).reshape(xnew.shape)
# Make sure we're not adding flux to the system via interpolation artifacts
sum_orig = hdu_list[ext].data.sum()
sum_new = psf_new.sum()
if sum_new > sum_orig:
psf_new *= (sum_orig / sum_new)
if return_coords:
return (psf_new, xnew, ynew)
else:
return psf_new
def _convolve_psfs_for_mp(arg_vals):
"""
Internal helper routine for parallelizing computations across multiple processors,
specifically for convolving position-dependent PSFs with an extended image or
field of PSFs.
"""
im, psf, ind_mask = arg_vals
ny, nx = im.shape
ny_psf, nx_psf = psf.shape
try:
# Get region to perform convolution
xtra_pix = int(nx_psf/2 + 10)
ind = np.argwhere(ind_mask.sum(axis=0)>0)
ix1, ix2 = (np.min(ind), np.max(ind))
ix1 -= xtra_pix
ix1 = 0 if ix1<0 else ix1
ix2 += xtra_pix
ix2 = nx if ix2>nx else ix2
xtra_pix = int(ny_psf/2 + 10)
ind = np.argwhere(ind_mask.sum(axis=1))
iy1, iy2 = (np.min(ind), np.max(ind))
iy1 -= xtra_pix
iy1 = 0 if iy1<0 else iy1
iy2 += xtra_pix
iy2 = ny if iy2>ny else iy2
except ValueError:
# No
return 0
im_temp = im.copy()
im_temp[~ind_mask] = 0
if np.allclose(im_temp,0):
# No need to convolve anything if no flux!
res = im_temp
else:
# Normalize PSF sum to 1.0
# Otherwise convolve_fft may throw an error if psf.sum() is too small
norm = psf.sum()
psf = psf / norm
res = convolve_fft(im_temp[iy1:iy2,ix1:ix2], psf, fftn=fftpack.fftn, ifftn=fftpack.ifftn, allow_huge=True)
res *= norm
im_temp[iy1:iy2,ix1:ix2] = res
res = im_temp
return res
def _convolve_psfs_for_mp_old(arg_vals):
"""
Internal helper routine for parallelizing computations across multiple processors,
specifically for convolving position-dependent PSFs with an extended image or
field of PSFs.
"""
im, psf, ind_mask = arg_vals
im_temp = im.copy()
im_temp[~ind_mask] = 0
if np.allclose(im_temp,0):
# No need to convolve anything if no flux!
res = im_temp
else:
# Normalize PSF sum to 1.0
# Otherwise convolve_fft may throw an error if psf.sum() is too small
norm = psf.sum()
psf = psf / norm
res = convolve_fft(im_temp, psf, fftn=fftpack.fftn, ifftn=fftpack.ifftn, allow_huge=True)
res *= norm
return res
def _crop_hdul(hdul_sci_image, psf_shape):
# Science image aperture info
im_input = hdul_sci_image[0].data
hdr_im = hdul_sci_image[0].header
# Crop original image in case of unnecessary zeros
zmask = im_input!=0
row_sum = zmask.sum(axis=0)
col_sum = zmask.sum(axis=1)
indx = np.where(row_sum>0)[0]
indy = np.where(col_sum>0)[0]
try:
ix1, ix2 = indx[0], indx[-1]+1
except IndexError:
# In case all zeroes
ix1 = int(im_input.shape[1] / 2)
ix2 = ix1 + 1
try:
iy1, iy2 = indy[0], indy[-1]+1
except IndexError:
# In case all zeroes
iy1 = int(im_input.shape[0] / 2)
iy2 = iy1 + 1
# Expand indices to accommodate PSF size
ny_psf, nx_psf = psf_shape
ny_im, nx_im = im_input.shape
ix1 -= int(nx_psf/2 + 5)
ix2 += int(nx_psf/2 + 5)
iy1 -= int(ny_psf/2 + 5)
iy2 += int(ny_psf/2 + 5)
# Make sure we don't go out of bounds
if ix1<0: ix1 = 0
if ix2>nx_im: ix2 = nx_im
if iy1<0: iy1 = 0
if iy2>ny_im: iy2 = ny_im
# Make HDU and copy header info
hdu = fits.PrimaryHDU(im_input[iy1:iy2,ix1:ix2])
try:
hdu.header['XIND_REF'] = hdr_im['XIND_REF'] - ix1
hdu.header['YIND_REF'] = hdr_im['YIND_REF'] - iy1
except:
try:
hdu.header['XCEN'] = hdr_im['XCEN'] - ix1
hdu.header['YCEN'] = hdr_im['YCEN'] - iy1
except:
hdu.header['XIND_REF'] = im_input.shape[1] / 2 - ix1
hdu.header['YIND_REF'] = im_input.shape[0] / 2 - iy1
hdu.header['CFRAME'] = hdr_im['CFRAME']
if 'PIXELSCL' in hdr_im.keys():
hdu.header['PIXELSCL'] = hdr_im['PIXELSCL']
if 'OSAMP' in hdr_im.keys():
hdu.header['OSAMP'] = hdr_im['OSAMP']
hdu.header['APERNAME'] = hdr_im['APERNAME']
hdu.header['IX1'] = ix1
hdu.header['IX2'] = ix2
hdu.header['IY1'] = iy1
hdu.header['IY2'] = iy2
return fits.HDUList([hdu])
def convolve_image(hdul_sci_image, hdul_psfs, return_hdul=False,
output_sampling=None, crop_zeros=True):
""" Convolve image with various PSFs
Takes an extended image, breaks it up into subsections, then
convolves each subsection with the nearest neighbor PSF. The
subsection sizes and locations are determined from PSF 'sci'
positions.
Parameters
==========
hdul_sci_image : HDUList
Image to convolve. Requires header info of:
- APERNAME : SIAF aperture that images is placed in
- PIXELSCL : Pixel scale of image (arcsec/pixel)
- OSAMP : Oversampling relative to detector pixels
- CFRAME : Coordinate frame of image ('sci', 'tel', 'idl', 'det')
- XCEN : Image x-position corresponding to aperture reference location
- YCEN : Image y-position corresponding to aperture reference location
- XIND_REF, YIND_REF : Alternative for (XCEN, YCEN)
hdul_psfs : HDUList
Multi-extension FITS. Each HDU element is a different PSF for
some location within some field of view. Must have same pixel
scale as hdul_sci_image.
Keyword Args
============
return_hdul : bool
Return as an HDUList, otherwise return as an image.
output_sampling : None or int
Sampling output relative to detector.
If None, then return same sampling as input image.
crop_zeros : bool
For large images that are zero-padded, this option will first crop off the
extraneous zeros (but accounting for PSF size to not tuncate resulting
convolution at edges), then place the convolved subarray image back into
a full frame of zeros. This process can improve speeds by a factor of a few,
with no resulting differences. Should always be set to True; only provided
as an option for debugging purposes.
"""
import pysiaf
# Get SIAF aperture info
hdr_psf = hdul_psfs[0].header
siaf = pysiaf.siaf.Siaf(hdr_psf['INSTRUME'])
siaf_ap_psfs = siaf[hdr_psf['APERNAME']]
if crop_zeros:
hdul_sci_image_orig = hdul_sci_image
hdul_sci_image = _crop_hdul(hdul_sci_image, hdul_psfs[0].data.shape)
# Science image aperture info
im_input = hdul_sci_image[0].data
hdr_im = hdul_sci_image[0].header
siaf_ap_sci = siaf[hdr_im['APERNAME']]
# Get tel coordinates for all PSFs
xvals = np.array([hdu.header['XVAL'] for hdu in hdul_psfs])
yvals = np.array([hdu.header['YVAL'] for hdu in hdul_psfs])
if 'tel' in hdr_psf['CFRAME']:
xtel_psfs, ytel_psfs = (xvals, yvals)
else:
xtel_psfs, ytel_psfs = siaf_ap_psfs.convert(xvals, yvals, hdr_psf['CFRAME'], 'tel')
# Get tel coordinates for every pixel in science image
# Size of input image in arcsec
ysize, xsize = im_input.shape
# Image index corresponding to reference point
try:
xcen_im = hdr_im['XIND_REF']
ycen_im = hdr_im['YIND_REF']
except:
try:
xcen_im = hdr_im['XCEN']
ycen_im = hdr_im['YCEN']
except:
ycen_im, xcen_im = np.array(im_input.shape) / 2
try:
pixscale = hdr_im['PIXELSCL']
except:
pixscale = hdul_psfs[0].header['PIXELSCL']
xvals_im = np.arange(xsize).astype('float') - xcen_im
yvals_im = np.arange(ysize).astype('float') - ycen_im
xarr_im, yarr_im = np.meshgrid(xvals_im, yvals_im)
xref, yref = siaf_ap_sci.reference_point(hdr_im['CFRAME'])
if (hdr_im['CFRAME'] == 'tel') or (hdr_im['CFRAME'] == 'idl'):
xarr_im *= pixscale
xarr_im += xref
yarr_im *= pixscale
yarr_im += yref
elif (hdr_im['CFRAME'] == 'sci') or (hdr_im['CFRAME'] == 'det'):
xarr_im /= hdr_im['OSAMP']
xarr_im += xref
yarr_im /= hdr_im['OSAMP']
yarr_im += yref
# Convert each element in image array to tel coords
xtel_im, ytel_im = siaf_ap_sci.convert(xarr_im, yarr_im, hdr_im['CFRAME'], 'tel')
# Create mask for input image for each PSF to convolve
# For each pixel, find PSF that is closest on the sky
# Go row-by-row to save on memory
npsf = len(hdul_psfs)
mask_arr = np.zeros([npsf, ysize, xsize], dtype='bool')
for iy in range(ysize):
rho_arr = (xtel_im[iy].reshape([-1,1]) - xtel_psfs.reshape([1,-1]))**2 \
+ (ytel_im[iy].reshape([-1,1]) - ytel_psfs.reshape([1,-1]))**2
# Calculate indices corresponding to closest PSF for each pixel
im_ind = np.argmin(rho_arr, axis=1)
mask = np.asarray([im_ind==i for i in range(npsf)])
mask_arr[:,iy,:] = mask
del rho_arr, im_ind, mask, xtel_im, ytel_im
# Make sure all pixels have a mask value of 1 somewhere (and only in one mask!)
mask_sum = mask_arr.sum(axis=0)
ind_bad = (mask_sum != 1)
nbad = len(mask_sum[ind_bad])
assert np.allclose(mask_sum, 1), f"{nbad} pixels in mask not assigned a PSF."
# Split into workers
im_conv = np.zeros_like(im_input)
worker_args = [(im_input, hdul_psfs[i].data, mask_arr[i]) for i in range(npsf)]
for wa in tqdm(worker_args, desc='Convolution', leave=False):
im_conv += _convolve_psfs_for_mp(wa)
# Ensure there are no negative values from convolve_fft
im_conv[im_conv<0] = 0
# If we cropped the original input, put convolved image into full array
if crop_zeros:
hdul_sci_image_crop = hdul_sci_image
hdul_sci_image = hdul_sci_image_orig
im_conv_crop = im_conv
im_conv = np.zeros_like(hdul_sci_image[0].data)
hdr_crop = hdul_sci_image_crop[0].header
ix1, ix2 = (hdr_crop['IX1'], hdr_crop['IX2'])
iy1, iy2 = (hdr_crop['IY1'], hdr_crop['IY2'])
im_conv[iy1:iy2,ix1:ix2] = im_conv_crop
# Scale to specified output sampling
output_sampling = 1 if output_sampling is None else output_sampling
scale = output_sampling / hdr_im['OSAMP']
im_conv = frebin(im_conv, scale=scale)
if return_hdul:
hdul = deepcopy(hdul_sci_image)
hdul[0].data = im_conv
hdul[0].header['OSAMP'] = output_sampling
return hdul
else:
return im_conv
def _convolve_image_old(hdul_sci_image, hdul_psfs, aper=None, nsplit=None):
""" Convolve image with various PSFs
Takes an extended image, breaks it up into subsections, then
convolves each subsection with the nearest neighbor PSF. The
subsection sizes and locations are determined from PSF 'sci'
positions.
Parameters
==========
hdul_sci_image : HDUList
Disk model. Requires header keyword 'PIXELSCL'.
hdul_psfs : HDUList
Multi-extension FITS. Each HDU element is a different PSF for
some location within some field of view.
aper : :mod:`pysiaf.aperture.JwstAperture`
Option to specify the reference SIAF aperture.
"""
import pysiaf
# Get SIAF aperture info
hdr = hdul_psfs[0].header
if aper is None:
siaf = pysiaf.siaf.Siaf(hdr['INSTRUME'])
siaf_ap = siaf[hdr['APERNAME']]
else:
siaf_ap = aper
# Get xsci and ysci coordinates
xvals = np.array([hdu.header['XVAL'] for hdu in hdul_psfs])
yvals = np.array([hdu.header['YVAL'] for hdu in hdul_psfs])
if 'sci' in hdr['CFRAME']:
xsci, ysci = (xvals, yvals)
else:
xsci, ysci = siaf_ap.convert(xvals, yvals, hdr['CFRAME'], 'sci')
xoff_sci_asec_psfs = (xsci - siaf_ap.XSciRef) * siaf_ap.XSciScale
yoff_sci_asec_psfs = (ysci - siaf_ap.YSciRef) * siaf_ap.YSciScale
# Size of input image in arcsec
im_input = hdul_sci_image[0].data
pixscale = hdul_sci_image[0].header['PIXELSCL']
ysize, xsize = im_input.shape
ysize_asec = ysize * pixscale
xsize_asec = xsize * pixscale
# Create mask for input image for each PSF to convolve
rho_arr = []
coords_asec = (xoff_sci_asec_psfs, yoff_sci_asec_psfs)
for xv, yv in | np.transpose(coords_asec) | numpy.transpose |
"""SAR/Radar related routines and classes.
SAR/Radar related routines assume 2d radar images with multiple channels
with the structure [az, rg, ...].
This can be:
- [az, rg] - single channel data (e.g. single-pol slc)
- [az, rg, 3] - 3 channel data (e.g. 3 polarization channels)
- [az, rg, 2, 3] - 2 tracks with 3 polarizations each (SB-PolInSAR)
- [az, rg, n_tr, n_pol] - multi-baseline PolInSAR scattering vectors
- [az, rg, n_tr*n_pol, n_tr*n_pol] - multi-baseine PolInSAR covariance matrix
Includes:
- db / db2lin
- cc : complex coherence computation from 2 channes, with optional phase offset.
Accepts either presumming parameter (div), or smoothing parameter (smm).
Should handle numpy, memmap, and h5py arrays.
- mtv : convenient visualization function for radar data (and not only).
Should handle numpy, memmap, and h5py arrays.
Modifications:
- 4/23/15, mn: show_slc_spectrum() added
"""
import numpy as np
import scipy as sp
import pylab as plt
#from mxn.lib.base import normscl
#from mxn.lib.proc import *
from .base import normscl
from .proc import *
def db(x):
"""From linear to decibel"""
return 10.0*np.log10(x)
def db2lin(x):
"""From decibel to linear"""
return 10.0**(x/10.0)
def magscale(img, factor=2.5, div=None, type='mag'):
"""Scales radar image magnitude.
Options:
- type : {'slc', 'amp', 'mag'}
- div : when provided, image is shrinked
"""
if type in ['slc', 'amp','a']:
func = lambda x: np.abs(x)**2
elif type in ['mag','m','i']:
if img.dtype in ['F', 'D']:
func = np.abs
else:
func = None
if div is not None and func is not None:
mag = block_rebin(func, img, div=div, dtype='f', bs=div[0]*2)
elif func is not None:
mag = block_filter(func, img, dtype='f')
elif div is not None:
mag = rebin(img, div=div)
else:
mag = img
n = np.shape(mag)
if len(n) == 3:
ret = np.zeros(n,dtype='float32')
for i in range(n[2]):
im = mag[:,:,i]
ret[:,:,i] = np.clip(im/np.mean(im[im > 0])*255//factor,0,255)
return ret
return np.clip(mag/ | np.mean(mag[mag > 0]) | numpy.mean |
"""
Super-resolution of CelebA using Generative Adversarial Networks.
The dataset can be downloaded from: https://www.dropbox.com/sh/8oqt9vytwxb3s4r/AADIKlz8PR9zr6Y20qbkunrba/Img/img_align_celeba.zip?dl=0
Instrustion on running the script:
1. Download the dataset from the provided link
2. Save the folder 'img_align_celeba' to 'datasets/'
4. Run the sript using command 'python srgan.py'
"""
from scipy.special import expit
import keras.backend as K
from keras.datasets import mnist
#from keras_contrib.layers.normalization import InstanceNormalization
from keras.layers import Input, Dense, Reshape, Flatten, Dropout, Concatenate, concatenate, Lambda
from keras.layers import BatchNormalization, Activation, ZeroPadding2D, Add, Layer
from keras.layers.advanced_activations import PReLU, LeakyReLU
from keras.layers.convolutional import UpSampling2D, Conv2D
from keras.applications import VGG19
from keras.models import Sequential, Model
from keras.initializers import VarianceScaling
from keras.optimizers import Adam
from keras.callbacks import LearningRateScheduler, History, TensorBoard, ModelCheckpoint, Callback
import datetime
import matplotlib.pyplot as plt
import sys
from data_loader import DataLoader
import numpy as np
import os
import keras.backend as K
import tensorflow as tf
from SpectralNormalizationKeras import DenseSN, ConvSN2D
import sys
import numpy as np
def make_image(images, name):
"""
Convert an numpy representation image to Image protobuf.
Copied from https://github.com/lanpa/tensorboard-pytorch/
"""
tensor = tf.convert_to_tensor(images)
return tf.summary.image(name, tensor, max_outputs=2, family=name)
class TensorBoardImage(Callback):
def __init__(self, model, loader, batch_size, log_dir):
super().__init__()
self.model = model
self.data_loader = loader
self.batch_size = batch_size
self.lr_imgs = tf.placeholder(tf.float32, shape=self.model.layers[0].input_shape)
self.imgs = tf.placeholder(tf.float32, shape=self.model.layers[-1].output_shape)
self.real_lr = tf.summary.image('Real low-res', self.lr_imgs, max_outputs=2, family='Real low-res')
self.generated = tf.summary.image('Generated', self.imgs, max_outputs=2, family='Generated')
self.real_hr = tf.summary.image('Real high-res', self.imgs, max_outputs=2, family='Real high-res')
self.writer = tf.summary.FileWriter(log_dir)
def on_epoch_end(self, epoch, logs={}):
# Load image
# Do something to the image
imgs_hr, imgs_lr = self.data_loader.load_data(batch_size=self.batch_size, is_testing=True)
fake_hr = self.model.predict(imgs_lr)
# Rescale images 0 - 1
imgs_lr = 0.5 * imgs_lr + 0.5
fake_hr = 0.5 * fake_hr + 0.5
imgs_hr = 0.5 * imgs_hr + 0.5
sess = K.get_session()
summ = sess.run(self.real_lr, {self.lr_imgs: imgs_lr})
self.writer.add_summary(summ, epoch)
summ = sess.run(self.generated, {self.imgs: fake_hr})
self.writer.add_summary(summ, epoch)
summ = sess.run(self.real_hr, {self.imgs: imgs_hr})
self.writer.add_summary(summ, epoch)
self.writer.flush()
def on_train_end(self, logs=None):
self.writer.close()
def discriminator_loss(y_true, y_pred):
shape = K.shape(y_pred)
d_real = y_pred[:shape[0]//2, :]
d_fake = y_pred[shape[0]//2:, :]
loss_real = K.mean(K.relu(-1 + d_real))
loss_fake = K.mean(K.relu(1 + d_fake))
return loss_real + loss_fake
def generator_loss(y_true, y_pred):
return -K.mean(y_pred)
class SelfAttention(Layer):
def __init__(self, **kwargs):
super(SelfAttention, self).__init__(**kwargs)
def build(self, input_shape):
# Create a trainable weight variable for this layer.
input_dim = input_shape[-1]
kernel_shape = (1, 1, input_dim, input_dim // 8)
self.kernel_f = self.add_weight(name='kernel_f',
shape=kernel_shape,
initializer='uniform',
trainable=True)
self.kernel_g = self.add_weight(name='kernel_g',
shape=kernel_shape,
initializer='uniform',
trainable=True)
self.kernel_h = self.add_weight(name='kernel_h',
shape=(1, 1, input_dim, input_dim),
initializer='uniform',
trainable=True)
self._gamma = self.add_weight(name='scale',
shape=(1,),
initializer='zeros',
trainable=True)
super(SelfAttention, self).build(input_shape) # Be sure to call this at the end
def call(self, x):
_, h, w, c = K.int_shape(x)
b = K.shape(x)[0]
f = K.reshape(K.conv2d(x, self.kernel_f, padding='same'), (b, h*w, -1))
g = K.permute_dimensions(K.reshape(K.conv2d(x, self.kernel_g, padding='same'), (b, h*w, -1)), (0, 2, 1))
s = K.batch_dot(f, g)
beta = K.softmax(s)
h = K.reshape(K.conv2d(x, self.kernel_h, padding='same'),
(b, h*w, c))
out = K.batch_dot(beta, h)
out = K.reshape(out, K.shape(x))
out = self._gamma * out + x
return out
def compute_output_shape(self, input_shape):
return input_shape
class SmallInitialization(VarianceScaling):
def __init__(self, scale=0.1):
super().__init__()
self.scale = scale
def __call__(self, shape, dtype=None):
return self.scale * super().__call__(shape, dtype)
def rel_avg_loss(x_r, x_f):
return K.sigmoid(x_r - K.mean(x_f))
def d_loss(y_real, y_pred):
batch_size = K.shape(y_pred)[0]
x_r = y_pred[:batch_size // 2, :]
x_f = y_pred[batch_size // 2:, :]
d_ra_real = rel_avg_loss(x_r, x_f)
d_ra_fake = rel_avg_loss(x_f, x_r)
y_pred = K.concatenate([d_ra_real, d_ra_fake], axis=0)
return K.mean(K.binary_crossentropy(y_real, y_pred), axis=-1)
def g_loss(y_real, y_pred):
d_ra_real = rel_avg_loss(y_real, y_pred)
d_ra_fake = rel_avg_loss(y_pred, y_real)
shape = K.shape(y_pred)
zeros = K.zeros(shape=shape)
ones = K.ones(shape=shape)
real_loss = K.binary_crossentropy(zeros, d_ra_real)
fake_loss = K.binary_crossentropy(ones, d_ra_fake)
return K.mean(real_loss + fake_loss, axis=-1)
class SRGAN():
def __init__(self, parent_dir):
# Input shape
self.channels = 3
self.lr_height = 64 # Low resolution height
self.lr_width = 64 # Low resolution width
self.lr_shape = (self.lr_height, self.lr_width, self.channels)
self.hr_height = self.lr_height*4 # High resolution height
self.hr_width = self.lr_width*4 # High resolution width
self.hr_shape = (self.hr_height, self.hr_width, self.channels)
# Number of residual blocks in the generator
self.n_residual_blocks = 13
optimizer = Adam(0.0002, 0.5)
# We use a pre-trained VGG19 model to extract image features from the high resolution
# and the generated high resolution images and minimize the mse between them
self.vgg = self.build_vgg()
self.vgg.trainable = False
self.vgg.compile(loss='mse',
optimizer=optimizer,
metrics=['accuracy'])
# Configure data loader
self.dataset_name = 'img_align_celeba'
self.parent_dir = parent_dir
self.data_loader = DataLoader(dataset_name=self.dataset_name, parent_dir=self.parent_dir,
img_res=(self.hr_height, self.hr_width))
# Calculate output shape of D (PatchGAN)
patch = int(self.hr_height / 2**4)
self.disc_patch = (patch, patch, 1)
# Number of filters in the first layer of G and D
self.gf = 64
self.df = 64
# Build and compile the discriminator
self.discriminator = self.build_discriminator()
self.discriminator.summary()
self.discriminator.compile(loss=d_loss,
optimizer=optimizer,
metrics=['accuracy'])
# Build the generator
self.generator = self.build_generator()
self.generator.summary()
# High res. and low res. images
img_hr = Input(shape=self.hr_shape)
img_lr = Input(shape=self.lr_shape)
# Generate high res. version from low res.
fake_hr = self.generator(img_lr)
# Extract image features of the generated img
fake_features = self.vgg(fake_hr)
# For the combined model we will only train the generator
self.discriminator.trainable = False
# Discriminator determines validity of generated high res. images
validity = self.discriminator(fake_hr)
self.combined = Model([img_lr, img_hr], [validity, fake_features, validity, fake_hr])
self.combined.compile(loss=['binary_crossentropy', 'mse', g_loss, 'mae'],
loss_weights=[1e-3, 1, 5e-3, 1e-2],
optimizer=optimizer)
def build_vgg(self):
"""
Builds a pre-trained VGG19 model that outputs image features extracted at the
third block of the model
"""
vgg = VGG19(weights="imagenet")
vgg.layers[9].activation = None
# Set outputs to outputs of last conv. layer in block 3
# See architecture at: https://github.com/keras-team/keras/blob/master/keras/applications/vgg19.py
vgg.outputs = [vgg.layers[9].output]
img = Input(shape=self.hr_shape)
# Extract image features
img_features = vgg(img)
return Model(img, img_features)
def build_generator(self):
def residual_block(layer_input, filters):
"""Residual block described in paper"""
concatenated_inputs = layer_input
for _ in range(4):
d = Conv2D(filters, kernel_initializer=SmallInitialization(), kernel_size=3, strides=1, padding='same')(concatenated_inputs)
d = LeakyReLU()(d)
concatenated_inputs = Concatenate()([concatenated_inputs, d])
d = Conv2D(filters, kernel_initializer=SmallInitialization(), kernel_size=3, strides=1, padding='same')(concatenated_inputs)
return d
def RRDB(layer_input, filters, beta=0.2):
d_input = layer_input
for _ in range(3):
d = residual_block(d_input, filters)
d = Lambda(lambda x: x * beta)(d)
d_input = Add()([d_input, d])
d_input = Lambda(lambda x: x * beta)(d_input)
d = Add()([d_input, layer_input])
return d
def deconv2d(layer_input, activation = 'relu'):
"""Layers used during upsampling"""
u = UpSampling2D(size=2)(layer_input)
u = Conv2D(256, kernel_size=3, strides=1, padding='same', activation=activation)(u)
return u
# Low resolution image input
img_lr = Input(shape=self.lr_shape)
# Pre-residual block
c1 = Conv2D(64, kernel_size=3, strides=1, padding='same')(img_lr)
# Propogate through residual blocks
r = RRDB(c1, self.gf)
for _ in range(self.n_residual_blocks - 1):
r = RRDB(r, self.gf)
# Post-residual block
c2 = Conv2D(64, kernel_size=3, strides=1, padding='same')(r)
c2 = Add()([c2, c1])
# Upsampling
u1 = deconv2d(c2)
u2 = deconv2d(u1, None)
# Generate high resolution output
gen_hr = Conv2D(self.channels, kernel_size=9, strides=1, padding='same', activation='tanh')(u2)
return Model(img_lr, gen_hr)
def build_discriminator(self):
def d_block(layer_input, filters, strides=1, bn=True):
"""Discriminator layer"""
d = Conv2D(filters, kernel_size=3, strides=strides, padding='same')(layer_input)
d = LeakyReLU(alpha=0.2)(d)
if bn:
d = BatchNormalization(momentum=0.8)(d)
return d
# Input img
d0 = Input(shape=self.hr_shape)
d1 = d_block(d0, self.df, bn=False)
d2 = d_block(d1, self.df, strides=2)
d3 = d_block(d2, self.df*2)
d4 = d_block(d3, self.df*2, strides=2)
d5 = d_block(d4, self.df*4)
d6 = d_block(d5, self.df*4, strides=2)
d7 = d_block(d6, self.df*8)
d8 = d_block(d7, self.df*8, strides=2)
d9 = Dense(self.df*16)(d8)
d10 = LeakyReLU(alpha=0.2)(d9)
d11 = Dense(1)(d10)
return Model(d0, d11)
def train(self, epochs, batch_size=1, sample_interval=50):
start_time = datetime.datetime.now()
def lrate_decay(epoch, lrate):
if (epoch+1) % int(2e+5) == 0:
return lrate * 0.5
return lrate
lrate_callback = LearningRateScheduler(lrate_decay)
lrate_callback.set_model(self.combined)
def named_logs(model, logs):
result = {}
for l in zip(model.metrics_names, logs):
result[l[0]] = l[1]
return result
tb_callback = TensorBoard(log_dir='./logs/generator_values', batch_size=batch_size, write_grads=False, write_images=True, write_graph=True)
tb_callback.set_model(self.combined)
tb_callback_disc = TensorBoard(log_dir='./logs/discriminator_values', batch_size=batch_size, write_grads=False, write_images=False, write_graph=True)
tb_callback_disc.set_model(self.discriminator)
tbi_callback = TensorBoardImage(self.generator, self.data_loader, batch_size, log_dir='./logs/images')
#checkpoint_cb = ModelCheckpoint('./checkpoints/weights.{epoch:02d}-{val_loss:.2f}.hdf5', save_best_only=True, period=50)
valid = np.ones((batch_size,) + self.disc_patch)
fake = np.zeros((batch_size,) + self.disc_patch)
y_true = np.vstack((valid, fake))
for epoch in range(epochs):
# ----------------------
# Train Discriminator
# ----------------------
lrate_callback.on_epoch_begin(epoch)
# Sample images and their conditioning counterparts
imgs_hr, imgs_lr = self.data_loader.load_data(batch_size)
# From low res. image generate high res. version
fake_hr = self.generator.predict(imgs_lr)
imgs = | np.vstack((imgs_hr, fake_hr)) | numpy.vstack |
#!/usr/bin/env python
import argparse
parser = argparse.ArgumentParser(description="Convert UVHDF5 files into CASA Measurement Set files.")
parser.add_argument("--HDF5", default="model.hdf5", help="The name of the UVHDF5 file you wish to import.")
parser.add_argument("--MS", help="The original MS data set, so that we can copy it to stuff in new values.")
parser.add_argument("--out", default="model.ms", help="The output MS dataset.")
parser.add_argument("--casac", action="store_true", help="Use the casac distribution instead of casapy")
parser.add_argument("--skip", action="store_true", help="Skip checking that the weights are the same.")
args,extras = parser.parse_known_args()
import numpy as np
import sys
import shutil
import h5py
import os
cc = 2.99792458e10 # [cm s^-1]
# Credit: parts of this file originated from the `vis_sample` repository,
# at https://github.com/AstroChem/vis_sample/blob/master/vis_sample/file_handling.py
# CASA interfacing code comes from <NAME>' casa-python and casa-data package
# commands for retrieving ms data are from <NAME> (@seanandrews)
ms_clone = args.MS
outfile = args.out
if args.casac:
try:
import casac
tb = casac.casac.table()
ms = casac.casac.ms()
except ImportError:
print("casac was not able to be imported, make sure all dependent packages are installed")
print("try: conda install -c pkgw casa-python casa-data")
sys.exit(1)
# Copy the original file so that we can then stuff our own visibilities into it
os.system("rm -rf " + outfile)
shutil.copytree(ms_clone, outfile)
# Use CASA ms tools to get the channel/spw info
ms.open(outfile)
spw_info = ms.getspectralwindowinfo()
nchan = spw_info["0"]["NumChan"]
npol = spw_info["0"]["NumCorr"]
ms.close()
# Use CASA table tools to get frequencies
tb.open(outfile + "/SPECTRAL_WINDOW")
ms_freqs = np.squeeze(tb.getcol("CHAN_FREQ"))
tb.close()
# Ascertain whether the frequencies were stored increasing or decreasing in the original MS
if np.all(np.diff(ms_freqs) > 0.0):
dnu_pos = True
elif np.all(np.diff(ms_freqs) < 0.0):
dnu_pos = False
else:
raise RuntimeError("Measurement Set Frequencies not in strict monotonically increasing or decreasing order.")
# Read the model from the HDF5 file
fid = h5py.File(args.HDF5, "r")
if dnu_pos:
freqs = fid["freqs"][:] # [Hz]
uu = fid["uu"][:,:] # [kilolam]
vv = fid["vv"][:,:] # [kilolam]
real = fid["real"][:,:] # [Jy]
imag = fid["imag"][:,:] # [Jy]
weight = fid["weight"][:,:] #[1/Jy^2]
unflagged = fid["flag"][:,:] # Bool
else:
freqs = fid["freqs"][:][::-1] # [Hz]
uu = fid["uu"][:][::-1,:] # [kilolam]
vv = fid["vv"][:][::-1,:] # [kilolam]
real = fid["real"][:][::-1,:] # [Jy]
imag = fid["imag"][:][::-1,:] # [Jy]
weight = fid["weight"][:][::-1,:] #[1/Jy^2]
unflagged = fid["flag"][:][::-1,:] # Bool
VV = real + 1.0j * imag # [Jy]
fid.close()
# Check to make sure the frequencies of the two datasets match
assert | np.allclose(freqs, ms_freqs) | numpy.allclose |
"""
Tensor Meshes
=============
Here we demonstrate various ways that models can be defined and mapped to
tensor meshes. Some things we consider are:
- Surface topography
- Adding structures of various shape to the model
- Parameterized models
- Models with 2 or more physical properties
"""
#########################################################################
# Import modules
# --------------
#
from discretize import TensorMesh
from SimPEG.Utils import mkvc, surface2ind_topo, ModelBuilder
from SimPEG import Maps
import numpy as np
import matplotlib.pyplot as plt
# sphinx_gallery_thumbnail_number = 3
#############################################
# Defining the mesh
# -----------------
#
# Here, we create the tensor mesh that will be used for all examples.
#
def make_example_mesh():
dh = 5.
hx = [(dh, 5, -1.3), (dh, 20), (dh, 5, 1.3)]
hy = [(dh, 5, -1.3), (dh, 20), (dh, 5, 1.3)]
hz = [(dh, 5, -1.3), (dh, 20), (dh, 5, 1.3)]
mesh = TensorMesh([hx, hy, hz], 'CCC')
return mesh
#############################################
# Halfspace model with topography at z = 0
# ----------------------------------------
#
# In this example we generate a half-space model. Since air cells remain
# constant during geophysical inversion, the number of model values we define
# should be equal to the number of cells lying below the surface. Here, we
# define the model (*model* ) as well as the mapping (*model_map* ) that goes from
# the model-space to the entire mesh.
#
mesh = make_example_mesh()
halfspace_value = 100.
# Find cells below topography and define mapping
air_value = 0.
ind_active = mesh.gridCC[:, 2] < 0.
model_map = Maps.InjectActiveCells(mesh, ind_active, air_value)
# Define the model
model = halfspace_value*np.ones(ind_active.sum())
# We can plot a slice of the model at Y=-2.5
fig = plt.figure(figsize=(5, 5))
ax = fig.add_subplot(111)
ind_slice = int(mesh.nCy/2)
mesh.plotSlice(model_map*model, normal='Y', ax=ax, ind=ind_slice, grid=True)
ax.set_title("Model slice at y = {} m".format(mesh.vectorCCy[ind_slice]))
plt.show()
#############################################
# Topography, a block and a vertical dyke
# ---------------------------------------
#
# In this example we create a model containing a block and a vertical dyke
# that strikes along the y direction. The utility *surface2ind_topo* is used
# to find the cells which lie below a set of xyz points defining a surface.
#
mesh = make_example_mesh()
background_value = 100.
dyke_value = 40.
block_value = 70.
# Define surface topography as an (N, 3) np.array. You could also load a file
# containing the xyz points
[xx, yy] = np.meshgrid(mesh.vectorNx, mesh.vectorNy)
zz = -3*np.exp((xx**2 + yy**2) / 75**2) + 40.
topo = np.c_[mkvc(xx), mkvc(yy), mkvc(zz)]
# Find cells below topography and define mapping
air_value = 0.
ind_active = surface2ind_topo(mesh, topo, 'N')
model_map = Maps.InjectActiveCells(mesh, ind_active, air_value)
# Define the model on subsurface cells
model = background_value*np.ones(ind_active.sum())
ind_dyke = (mesh.gridCC[ind_active, 0] > 20.) & (mesh.gridCC[ind_active, 0] < 40.)
model[ind_dyke] = dyke_value
ind_block = (
(mesh.gridCC[ind_active, 0] > -40.) & (mesh.gridCC[ind_active, 0] < -10.) &
(mesh.gridCC[ind_active, 1] > -30.) & (mesh.gridCC[ind_active, 1] < 30.) &
(mesh.gridCC[ind_active, 2] > -40.) & (mesh.gridCC[ind_active, 2] < 0.)
)
model[ind_block] = block_value
# Plot
fig = plt.figure(figsize=(5, 5))
ax = fig.add_subplot(111)
ind_slice = int(mesh.nCy/2)
mesh.plotSlice(model_map*model, normal='Y', ax=ax, ind=ind_slice, grid=True)
ax.set_title("Model slice at y = {} m".format(mesh.vectorCCy[ind_slice]))
plt.show()
#############################################
# Combo Maps
# ----------
#
# Here we demonstrate how combo maps can be used to create a single mapping
# from the model to the mesh. In this case, our model consists of
# log-conductivity values but we want to plot the resistivity. To accomplish
# this we must take the exponent of our model values, then take the reciprocal,
# then map from below surface cell to the mesh.
#
mesh = make_example_mesh()
background_value = np.log(1./100.)
dyke_value = np.log(1./40.)
block_value = np.log(1./70.)
# Define surface topography
[xx, yy] = np.meshgrid(mesh.vectorNx, mesh.vectorNy)
zz = -3* | np.exp((xx**2 + yy**2) / 75**2) | numpy.exp |
"""Module for calibrating the double spike composition."""
import numpy as np
from scipy.optimize import minimize
from .isodata import normalise_composition, realproptoratioprop, ratioproptorealprop
from .errors import calcratiocov
def spike_calibration(
isodata, spike_measurement, mixture_measurement, isoinv=None, standard=None
):
"""A simple least squares routine for calibrating a double spike from spike-standard mixtures.
Args:
isodata: object of class IsoData, e.g. IsoData('Fe')
spike_measurement (array): a matrix of beam intensities for direct measurements of
the spike. Columns correspond to the different isotopes e.g. for Fe, first
column is 54Fe, second is 56Fe, third is 57Fe, fourth is 58Fe. The matrix should
have the same number of columns as there are isotopes available.
mixture_measurement (array): a matrix of beam intensities for the measurements of
spike-standarard mixtures.
isoinv (array): the isotopes to use in the fitting, e.g [54, 56, 57, 58]. If
None this is read from isodata.
standard (array): standard composition. If None this is read from isodata.
Returns:
This routine estimates the spike composition given a direct measurement of the spike
and measurements of spike-standard mixtures. The routine minimises the chi-squared
misfit between the measurements and model, where measurements are weighted
according to the expected covariance given in isodata.errormodel['measured'].
Output is returned as a dictionary with the following fields:
calibrated_spike: the estimated spike composition
prop_mixture: the proportion of spike in the spike-sample mixtures
beta_mixture: the fractionation factors for the mixture measurements
beta_spike: the fractionation factors for the spike measurements
misfit: the chi-squared misfit
df: the degrees of freedom for the chi-squared statistic
"""
if isoinv is None:
if isodata.isoinv is None:
raise Exception("Inversion isotopes not specified.")
isoinv = isodata.isoinv
if standard is None:
standard = isodata.standard
# make sure working with numpy arrays
spike_measurement = np.array(spike_measurement)
mixture_measurement = np.array(mixture_measurement)
# make sure working with two dimensional arrays
if mixture_measurement.ndim == 1:
mixture_measurement = mixture_measurement[np.newaxis, :]
if spike_measurement.ndim == 1:
spike_measurement = spike_measurement[np.newaxis, :]
# normalise so have compositional vectors
spike_measurement = normalise_composition(spike_measurement)
mixture_measurement = normalise_composition(mixture_measurement)
# choose isotope to denominator by using largest isotope in spike
isoinv = isodata.isoindex(isoinv)
ix = np.argmax(spike_measurement[0, isoinv])
deno = isoinv[ix]
nume = isoinv[isoinv != deno]
isoinv = np.concatenate((np.array([deno]), nume))
invrat = isodata.invrat(isoinv)
An = isodata.ratio(standard, deno)
At = isodata.ratio(spike_measurement, deno)
Am = isodata.ratio(mixture_measurement, deno)
AP = np.log(isodata.ratio(isodata.mass, deno))
n_m = mixture_measurement.shape[0]
n_t = spike_measurement.shape[0]
emod_mixture = isodata.errormodel["measured"]
VAms = [
calcratiocov(mixture_measurement[i, :], emod_mixture, deno) for i in range(n_m)
]
emod_spike = isodata.errormodel["measured"]
VAts = [calcratiocov(spike_measurement[i, :], emod_spike, deno) for i in range(n_t)]
n = An[invrat]
P = AP[invrat]
t = At[:, invrat]
m = Am[:, invrat]
Vms = [V[np.ix_(invrat, invrat)] for V in VAms]
Vts = [V[np.ix_(invrat, invrat)] for V in VAts]
Sms = [np.linalg.inv(V) for V in Vms]
Sts = [np.linalg.inv(V) for V in Vts]
# form initial guess of model parameters. guess a 50-50 mix, with no fractionation
prop0 = 0.5
lambda0 = realproptoratioprop(prop0, At[0, :], An) * np.ones(m.shape[0])
beta0 = 0.0 * np.ones(m.shape[0])
betaT0 = 0.0 * np.ones(t.shape[0])
T0 = t[0, :]
z0 = np.concatenate((lambda0, beta0, betaT0, T0))
df = (t.shape[0] + m.shape[0]) * len(invrat) - len(z0) # degrees of freedom
res = minimize(
objective,
z0,
args=(m, t, P, n, Sms, Sts, n_m, n_t),
jac=True,
tol=1e-16,
options={"disp": False, "gtol": 1e-8, "eps": 1e-12},
)
z = res.x
misfit = res.fun
lambda_, beta, betat, T = z_to_params(z, P, n_m, n_t)
# Reconstruct spike vector
calibrated_spike = np.zeros_like(spike_measurement[0, :])
calibrated_spike[deno] = 1.0
calibrated_spike[nume] = T
# For isotopes that were not used in inversion, work out an expectation based on known betat
isonum = np.arange(isodata.nisos)
unused = np.array(list(set(isonum).difference(set(isoinv))))
if len(unused) > 0:
expected_spike_measurement = np.mean(spike_measurement, axis=0)
expected_betat = np.mean(betat)
expected_spike = expected_spike_measurement * np.exp(
-np.log(isodata.mass) * expected_betat
)
expected_spike = normalise_composition(expected_spike)
expected_unused = expected_spike[unused] / expected_spike[deno]
calibrated_spike[unused] = expected_unused
calibrated_spike = normalise_composition(calibrated_spike)
AT = isodata.ratio(calibrated_spike, deno)
prop = [ratioproptorealprop(lam, AT, An) for lam in lambda_]
out = {
"calibrated_spike": calibrated_spike,
"prop_mixture": prop,
"beta_mixture": beta,
"beta_spike": betat,
"misfit": misfit,
"df": df,
}
return out
def objective(z, m, t, P, n, Wm, Wt, n_m, n_t):
"""The objective function and its Jacobian for the chi-squared minimization."""
me, te = mt_expected(z, P, n, n_m, n_t)
res_m = m - me
res_t = t - te
obs = []
for i in range(res_m.shape[0]):
rm = res_m[i, :][np.newaxis, :]
obs.append((rm @ Wm[i] @ rm.T)[0][0])
for i in range(res_t.shape[0]):
rt = res_t[i, :][np.newaxis, :]
obs.append((rt @ Wt[i] @ rt.T)[0][0])
ob = sum(obs)
dmdz, dtdz = dmt_expected_dz(z, P, n, n_m, n_t)
dob_dzs = []
for i in range(res_m.shape[0]):
rm = res_m[i, :][np.newaxis, :]
dmidz = dmdz[i, :, :]
dob_dzs.append(-(2 * rm @ Wm[i] @ dmidz)[0])
for i in range(res_t.shape[0]):
rt = res_t[i, :][np.newaxis, :]
dtidz = dtdz[i, :, :]
dob_dzs.append(-(2 * rt @ Wt[i] @ dtidz)[0])
dob_dz = np.vstack(dob_dzs)
dob_dz = np.sum(dob_dz, axis=0)
return ob, dob_dz
def individual_m_expected(lambda_, beta, T, P, n):
"""Mixture measurement."""
return np.exp(beta * P) * (lambda_ * T + (1 - lambda_) * n)
def dindividual_m_expected_dlambda(lambda_, beta, T, P, n):
"""dm/dlambda."""
return np.exp(beta * P) * (T - n)
def dindividual_m_expected_dbeta(lambda_, beta, T, P, n):
"""dm/dbeta."""
return P * np.exp(beta * P) * (lambda_ * T + (1 - lambda_) * n)
def dindividual_m_expected_dT(lambda_, beta, T, P, n):
"""dm/dT."""
return np.diag(np.exp(beta * P) * lambda_)
def individual_t_expected(betat, T, P):
"""Spike measurement."""
return np.exp(betat * P) * T
def dindividual_t_expected_dbetat(betat, T, P):
"""dt/dbetat."""
return P * np.exp(betat * P) * T
def dindividual_t_expected_dT(betat, T, P):
"""dt/dT."""
return np.diag( | np.exp(betat * P) | numpy.exp |
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 31 03:00:36 2020
@author: hp
"""
import cv2
import numpy as np
import math
from face_detector import get_face_detector, find_faces
from face_landmarks import get_landmark_model, detect_marks
def get_2d_points(img, rotation_vector, translation_vector, camera_matrix, val):
"""Return the 3D points present as 2D for making annotation box"""
point_3d = []
dist_coeffs = np.zeros((4,1))
rear_size = val[0]
rear_depth = val[1]
point_3d.append((-rear_size, -rear_size, rear_depth))
point_3d.append((-rear_size, rear_size, rear_depth))
point_3d.append((rear_size, rear_size, rear_depth))
point_3d.append((rear_size, -rear_size, rear_depth))
point_3d.append((-rear_size, -rear_size, rear_depth))
front_size = val[2]
front_depth = val[3]
point_3d.append((-front_size, -front_size, front_depth))
point_3d.append((-front_size, front_size, front_depth))
point_3d.append((front_size, front_size, front_depth))
point_3d.append((front_size, -front_size, front_depth))
point_3d.append((-front_size, -front_size, front_depth))
point_3d = np.array(point_3d, dtype=np.float).reshape(-1, 3)
# Map to 2d img points
(point_2d, _) = cv2.projectPoints(point_3d,
rotation_vector,
translation_vector,
camera_matrix,
dist_coeffs)
point_2d = np.int32(point_2d.reshape(-1, 2))
return point_2d
def draw_annotation_box(img, rotation_vector, translation_vector, camera_matrix,
rear_size=300, rear_depth=0, front_size=500, front_depth=400,
color=(255, 255, 0), line_width=2):
"""
Draw a 3D anotation box on the face for head pose estimation
Parameters
----------
img : np.unit8
Original Image.
rotation_vector : Array of float64
Rotation Vector obtained from cv2.solvePnP
translation_vector : Array of float64
Translation Vector obtained from cv2.solvePnP
camera_matrix : Array of float64
The camera matrix
rear_size : int, optional
Size of rear box. The default is 300.
rear_depth : int, optional
The default is 0.
front_size : int, optional
Size of front box. The default is 500.
front_depth : int, optional
Front depth. The default is 400.
color : tuple, optional
The color with which to draw annotation box. The default is (255, 255, 0).
line_width : int, optional
line width of lines drawn. The default is 2.
Returns
-------
None.
"""
rear_size = 1
rear_depth = 0
front_size = img.shape[1]
front_depth = front_size*2
val = [rear_size, rear_depth, front_size, front_depth]
point_2d = get_2d_points(img, rotation_vector, translation_vector, camera_matrix, val)
# # Draw all the lines
cv2.polylines(img, [point_2d], True, color, line_width, cv2.LINE_AA)
cv2.line(img, tuple(point_2d[1]), tuple(
point_2d[6]), color, line_width, cv2.LINE_AA)
cv2.line(img, tuple(point_2d[2]), tuple(
point_2d[7]), color, line_width, cv2.LINE_AA)
cv2.line(img, tuple(point_2d[3]), tuple(
point_2d[8]), color, line_width, cv2.LINE_AA)
def head_pose_points(img, rotation_vector, translation_vector, camera_matrix):
"""
Get the points to estimate head pose sideways
Parameters
----------
img : np.unit8
Original Image.
rotation_vector : Array of float64
Rotation Vector obtained from cv2.solvePnP
translation_vector : Array of float64
Translation Vector obtained from cv2.solvePnP
camera_matrix : Array of float64
The camera matrix
Returns
-------
(x, y) : tuple
Coordinates of line to estimate head pose
"""
rear_size = 1
rear_depth = 0
front_size = img.shape[1]
front_depth = front_size*2
val = [rear_size, rear_depth, front_size, front_depth]
point_2d = get_2d_points(img, rotation_vector, translation_vector, camera_matrix, val)
y = (point_2d[5] + point_2d[8])//2
x = point_2d[2]
return (x, y)
def eye_on_mask(mask, side, shape):
"""
Create ROI on mask of the size of eyes and also find the extreme points of each eye
Parameters
----------
mask : np.uint8
Blank mask to draw eyes on
side : list of int
the facial landmark numbers of eyes
shape : Array of uint32
Facial landmarks
Returns
-------
mask : np.uint8
Mask with region of interest drawn
[l, t, r, b] : list
left, top, right, and bottommost points of ROI
"""
points = [shape[i] for i in side]
points = np.array(points, dtype=np.int32)
mask = cv2.fillConvexPoly(mask, points, 255)
l = points[0][0]
t = (points[1][1]+points[2][1])//2
r = points[3][0]
b = (points[4][1]+points[5][1])//2
return mask, [l, t, r, b]
def find_eyeball_position(end_points, cx, cy):
"""Find and return the eyeball positions, i.e. left or right or top or normal"""
x_ratio = (end_points[0] - cx)/(cx - end_points[2])
y_ratio = (cy - end_points[1])/(end_points[3] - cy)
text = ''
if x_ratio > 3 and y_ratio < 1:
font = cv2.FONT_HERSHEY_SIMPLEX
# text = 'diagonal up left'
# cv2.putText(img, text, (30, 30), font,
# 1, (0, 255, 0), 2, cv2.LINE_AA)
if x_ratio < .33 and y_ratio < 1:
font = cv2.FONT_HERSHEY_SIMPLEX
# text = 'diagonal up right'
# cv2.putText(img, text, (30, 30), font,
# 1, (0, 255, 0), 2, cv2.LINE_AA)
if x_ratio > 3:
return 1
elif x_ratio < 0.33:
return 2
elif y_ratio < 0.33:
return 3
else:
return 0
def find_eyeroll(end_points, cx, cy, img, face_pos):
"""Find and return the eyeball positions, i.e. left or right or top or normal"""
x_ratio = (end_points[0] - cx)/(cx - end_points[2])
y_ratio = (cy - end_points[1])/(end_points[3] - cy)
text = 'eyeroll'
if face_pos == 'in_range':
if x_ratio > 3 and y_ratio < 1:
print('eyeroll')
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(img, text, (300, 60), font,
4, (0, 255, 0), 3, cv2.LINE_AA)
return True
if x_ratio < .33 and y_ratio < 1:
print('eyeroll')
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(img, text, (300, 60), font,
4, (0, 255, 0), 3, cv2.LINE_AA)
return True
elif face_pos == 'out_range':
if x_ratio > 4 and y_ratio < 2:
print('eyeroll')
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(img, text, (300, 60), font,
4, (0, 255, 0), 3, cv2.LINE_AA)
return True
if x_ratio < -.33 and y_ratio < 2:
print('eyeroll')
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(img, text, (300, 60), font,
4, (0, 255, 0), 3, cv2.LINE_AA)
return True
else:
return False
def contouring(thresh, mid, img, end_points, face_pos, right=False):
"""
Find the largest contour on an image divided by a midpoint and subsequently the eye position
Parameters
----------
thresh : Array of uint8
Thresholded image of one side containing the eyeball
mid : int
The mid point between the eyes
img : Array of uint8
Original Image
end_points : list
List containing the exteme points of eye
right : boolean, optional
Whether calculating for right eye or left eye. The default is False.
Returns
-------
pos: int
the position where eyeball is:
0 for normal
1 for left
2 for right
3 for up
"""
cnts, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE)
try:
cnt = max(cnts, key = cv2.contourArea)
M = cv2.moments(cnt)
cx = int(M['m10']/M['m00'])
cy = int(M['m01']/M['m00'])
if right:
cx += mid
cv2.circle(img, (cx, cy), 4, (0, 255, 0), 2)
pos = find_eyeball_position(end_points, cx, cy)
eyeroll = find_eyeroll(end_points, cx, cy, img, face_pos)
return pos
except:
pass
def process_thresh(thresh):
"""
Preprocessing the thresholded image
Parameters
----------
thresh : Array of uint8
Thresholded image to preprocess
Returns
-------
thresh : Array of uint8
Processed thresholded image
"""
thresh = cv2.erode(thresh, None, iterations=2)
thresh = cv2.dilate(thresh, None, iterations=4)
thresh = cv2.medianBlur(thresh, 3)
thresh = cv2.bitwise_not(thresh)
return thresh
def print_eye_pos(img, left, right):
"""
Print the side where eye is looking and display on image
Parameters
----------
img : Array of uint8
Image to display on
left : int
Position obtained of left eye.
right : int
Position obtained of right eye.
Returns
-------
None.
"""
# if left == right and left != 0:
# text = ''
# if left == 1:
# print('Looking left')
# text = 'Looking left'
# elif left == 2:
# print('Looking right')
# text = 'Looking right'
# elif left == 3:
# print('Looking up')
# text = 'Looking up'
# font = cv2.FONT_HERSHEY_SIMPLEX
# cv2.putText(img, text, (30, 30), font,
# 1, (0, 255, 255), 2, cv2.LINE_AA)
face_model = get_face_detector()
landmark_model = get_landmark_model()
left = [36, 37, 38, 39, 40, 41]
right = [42, 43, 44, 45, 46, 47]
cap = cv2.VideoCapture(0)
ret, img = cap.read()
thresh = img.copy()
cv2.namedWindow('image')
kernel = | np.ones((9, 9), np.uint8) | numpy.ones |
# from sequitur.models import LSTM_AE
import torch
from torch import nn
import numpy as np
# from sequitur import quick_train
from torch.utils.data.sampler import SubsetRandomSampler
from torch.utils.data import DataLoader
from torch.nn import MSELoss
from models.AED.simpleAED import Encoder, Decoder, RecurrentAutoencoder
import copy
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
from torch import nn, optim
import pandas as pd
import torch.nn.functional as F
#%%
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
#load the data
per_unit = np.load('data/aug_all_per_unit_806_824_836_846.npy')
labels = np.load('data/aug_labels_806_824_836_846.npy')
# normalize data
new_data = []
for i in range(per_unit.shape[-1]):
mx = np.max(per_unit[:, :, i], axis=1)
# mn = np.min(per_unit[:, :, i], axis=1)
new_data.append((per_unit[:, :, i])/(mx[:, None]))
new_data = np.array(new_data)
new_data = np.swapaxes(new_data, 0, 1)
per_unit = np.swapaxes(new_data, 2, 1)
n_seq, seq_len, n_features = per_unit.shape
num_examples = per_unit.shape[0]
num_train = int(num_examples * 0.9)
train_selector = np.random.choice(num_examples, num_train, replace=False)
test_selector = np.setdiff1d(np.arange(num_examples), train_selector)
train_sampler = SubsetRandomSampler(torch.from_numpy(train_selector))
test_sampler = SubsetRandomSampler(torch.from_numpy(test_selector))
b_size = 100
train_dataloader = DataLoader(
per_unit, sampler=train_sampler, batch_size=b_size, drop_last=False)
test_dataloader = DataLoader(
per_unit, sampler=test_sampler, batch_size=b_size, drop_last=False)
#%%
#define and train a model and save
model = RecurrentAutoencoder(125, 36, 32)
model.to(device)
model.float()
def train_model(model, train_dataset, val_dataset, n_epochs):
optimizer = torch.optim.Adam(model.parameters(), lr=1e-3)
criterion = criterion = nn.MSELoss(reduction='mean')
history = dict(train=[], val=[])
best_model_wts = copy.deepcopy(model.state_dict())
best_loss = 10000.0
for epoch in range(1, n_epochs + 1):
model = model.train()
train_losses = []
for seq_true in train_dataset:
seq_true = seq_true.to(device)
optimizer.zero_grad()
seq_pred = model(seq_true)
loss = criterion(seq_pred.float(), seq_true.float())
# print(epoch, loss)
loss.backward()
optimizer.step()
train_losses.append(loss.item())
val_losses = []
model = model.eval()
with torch.no_grad():
for seq_true in val_dataset:
seq_true = seq_true.to(device)
seq_pred = model(seq_true)
loss = criterion(seq_pred.float(), seq_true.float())
val_losses.append(loss.item())
train_loss = np.mean(train_losses)
val_loss = np.mean(val_losses)
history['train'].append(train_loss)
history['val'].append(val_loss)
if val_loss < best_loss:
best_loss = val_loss
best_model_wts = copy.deepcopy(model.state_dict())
print(f'Epoch {epoch}: train loss {train_loss} val loss {val_loss}')
model.load_state_dict(best_model_wts)
return model.eval(), history
#%%
model, history = train_model(
model,
train_dataloader,
test_dataloader,
n_epochs=100
)
#%%
model_path = 'models/AED/806_824_836_846_stacked'
torch.save(model, model_path)
#%%
def show_detail(data, pmu, type):
fig, (ax0, ax1, ax2) = plt.subplots(nrows=3, constrained_layout=True)
for k in range(3):
ax0.plot(data[5:,pmu * k])
ax1.plot(data[5:,pmu * k + 3])
ax2.plot(data[5:,pmu * k + 6])
ax0.set_xlabel('timesteps')
ax0.set_ylabel('voltage magnitude')
ax0.legend(['v1', 'v2', 'v3'])
ax1.set_xlabel('timesteps')
ax1.set_ylabel('current magnitude')
ax1.legend(['i1', 'i2', 'i3'])
ax2.set_xlabel('timesteps')
ax2.set_ylabel('angle diff')
ax2.legend(['t1', 't2', 't3'])
fig.title = 'real'
if type == 'pred':
fig.title = 'pred'
return fig
#%%
pmu = 1
for ev in range(5):
# ev = 100
data = per_unit[ev]
data = torch.from_numpy(data).to(device).reshape(1, data.shape[0], data.shape[1])
pred = model(data)
def torch_to_numpy_cpu(data):
return data.cpu()[0].detach().numpy()
data = torch_to_numpy_cpu(data)
pred = torch_to_numpy_cpu(pred)
fig1 = show_detail(data, pmu, 'real')
plt.show()
fig2 = show_detail(pred, pmu, 'pred')
plt.show()
#%%
#load the trained model
model_path = 'models/AED/806_824_836_846_stacked'
model = torch.load(model_path)
model.eval()
#%%
labels = np.load('data/aug_labels_806_824_836_846.npy')
labels = pd.DataFrame({'labels': labels})
labels = labels['labels'].astype('category').cat.codes.to_numpy()
#%%
all_data = torch.from_numpy(per_unit).to(device)
# get the latent variables
#%%
selected_latent = model.encoder(all_data[train_selector[2387:3580]]).cpu().detach().numpy()
#%%
pmus = [2, 8, 19, 23]
selected_latents = []
for g in pos_graphs:
selected_latents.append(torch.ravel(g.ndata['latent'][pmus]).detach().cpu().numpy())
selected_latents = np.array(selected_latents)
np.random.seed(0)
# evs = np.random.randint(0,10000,1000)
selected_latent = selected_latents
selected_labels = labels
#%%
#clustering results based on different clustering models
#but the representation learning of the latent space is the important part not the clustering model
def all_clustering_models(latent, labels, cluster_num):
from sklearn import metrics
from sklearn.mixture import GaussianMixture
from sklearn.cluster import AgglomerativeClustering
#gmm
pred_labels = GaussianMixture(n_components=cluster_num, random_state=0).fit_predict(latent)
print('trian accuracy (ARS) for gmm', metrics.adjusted_rand_score(labels, pred_labels))
#AgglomerativeClustering
pred_labels = AgglomerativeClustering(n_clusters=cluster_num).fit_predict(latent)
print('trian accuracy (ARS) for AgglomerativeClustering', metrics.adjusted_rand_score(labels, pred_labels))
from sklearn.cluster import DBSCAN
pred_labels = DBSCAN().fit_predict(latent)
print('trian accuracy (ARS) for DBSCAN', metrics.adjusted_rand_score(labels, pred_labels))
from sklearn.cluster import KMeans
pred_labels = KMeans(n_clusters=cluster_num, random_state=0).fit_predict(latent)
print('trian accuracy (ARS) for KMeans', metrics.adjusted_rand_score(labels, pred_labels))
# from sklearn.cluster import SpectralClustering
# pred_labels = SpectralClustering(n_clusters=cluster_num, assign_labels="discretize", random_state=0).fit_predict(latent)
# print('trian accuracy (ARS) for SpectralClustering', metrics.adjusted_rand_score(labels, pred_labels))
cluster_num = 9
all_clustering_models(selected_latent, selected_labels, cluster_num)
#%%
#show TSNE of the clusters based on the selected latent
from sklearn.manifold import TSNE
X_embedded = TSNE(n_components=2).fit_transform(selected_latent)
from matplotlib.colors import ListedColormap
#%%
import matplotlib
import matplotlib.pyplot as plt
pad = 5
xyticks_num = 10
unique_labels = np.unique(selected_labels)
clrs = ['r','g','b','c','m','y','k','orange','lime']
values = [unique_labels.tolist().index(i) for i in selected_labels]
plt.style.use('default')
matplotlib.rcParams['figure.figsize'] = 20, 12
# colors = ListedColormap(['r','b','g'])
scatter = plt.scatter(X_embedded[:, 0], X_embedded[:, 1], c=values, s=100, cmap='tab10')
plt.title('TSNE for the embeddings of stacked AED with DEC')
plt.xlabel('Feature 1')
plt.ylabel('Feature 2')
plt.xlim([np.min(X_embedded[:, 0])-pad,np.max(X_embedded[:, 0]) + pad])
plt.ylim([np.min(X_embedded[:, 1])-pad, | np.max(X_embedded[:, 1]) | numpy.max |
from __future__ import division
import glob
import numpy as NP
from functools import reduce
import numpy.ma as MA
import progressbar as PGB
import h5py
import healpy as HP
import warnings
import copy
import astropy.cosmology as CP
from astropy.time import Time, TimeDelta
from astropy.io import fits
from astropy import units as U
from astropy import constants as FCNST
from scipy import interpolate
from astroutils import DSP_modules as DSP
from astroutils import constants as CNST
from astroutils import nonmathops as NMO
from astroutils import mathops as OPS
from astroutils import lookup_operations as LKP
import prisim
from prisim import interferometry as RI
from prisim import primary_beams as PB
from prisim import delay_spectrum as DS
try:
from pyuvdata import UVBeam
except ImportError:
uvbeam_module_found = False
else:
uvbeam_module_found = True
prisim_path = prisim.__path__[0]+'/'
cosmoPlanck15 = CP.Planck15 # Planck 2015 cosmology
cosmo100 = cosmoPlanck15.clone(name='Modified Planck 2015 cosmology with h=1.0', H0=100.0) # Modified Planck 2015 cosmology with h=1.0, H= 100 km/s/Mpc
################################################################################
def write_PRISim_bispectrum_phase_to_npz(infile_prefix, outfile_prefix,
triads=None, bltriplet=None,
hdf5file_prefix=None, infmt='npz',
datakey='noisy', blltol=0.1):
"""
----------------------------------------------------------------------------
Write closure phases computed in a PRISim simulation to a NPZ file with
appropriate format for further analysis.
Inputs:
infile_prefix
[string] HDF5 file or NPZ file created by a PRISim simulation or
its replication respectively. If infmt is specified as 'hdf5',
then hdf5file_prefix will be ignored and all the observing
info will be read from here. If infmt is specified as 'npz',
then hdf5file_prefix needs to be specified in order to read the
observing parameters.
triads [list or numpy array or None] Antenna triads given as a list of
3-element lists or a ntriads x 3 array. Each element in the
inner list is an antenna label. They will be converted to
strings internally. If set to None, then all triads determined
by bltriplet will be used. If specified, then inputs in blltol
and bltriplet will be ignored.
bltriplet [numpy array or None] 3x3 numpy array containing the 3 baseline
vectors. The first axis denotes the three baselines, the second
axis denotes the East, North, Up coordinates of the baseline
vector. Units are in m. Will be used only if triads is set to
None.
outfile_prefix
[string] Prefix of the NPZ file. It will be appended by
'_noiseless', '_noisy', and '_noise' and further by extension
'.npz'
infmt [string] Format of the input file containing visibilities.
Accepted values are 'npz' (default), and 'hdf5'. If infmt is
specified as 'npz', then hdf5file_prefix also needs to be
specified for reading the observing parameters
datakey [string] Specifies which -- 'noiseless', 'noisy' (default), or
'noise' -- visibilities are to be written to the output. If set
to None, and infmt is 'hdf5', then all three sets of
visibilities are written. The datakey string will also be added
as a suffix in the output file.
blltol [scalar] Baseline length tolerance (in m) for matching baseline
vectors in triads. It must be a scalar. Default = 0.1 m. Will
be used only if triads is set to None and bltriplet is to be
used.
----------------------------------------------------------------------------
"""
if not isinstance(infile_prefix, str):
raise TypeError('Input infile_prefix must be a string')
if not isinstance(outfile_prefix, str):
raise TypeError('Input outfile_prefix must be a string')
if (triads is None) and (bltriplet is None):
raise ValueError('One of triads or bltriplet must be set')
if triads is None:
if not isinstance(bltriplet, NP.ndarray):
raise TypeError('Input bltriplet must be a numpy array')
if not isinstance(blltol, (int,float)):
raise TypeError('Input blltol must be a scalar')
if bltriplet.ndim != 2:
raise ValueError('Input bltriplet must be a 2D numpy array')
if bltriplet.shape[0] != 3:
raise ValueError('Input bltriplet must contain three baseline vectors')
if bltriplet.shape[1] != 3:
raise ValueError('Input bltriplet must contain baseline vectors along three corrdinates in the ENU frame')
else:
if not isinstance(triads, (list, NP.ndarray)):
raise TypeError('Input triads must be a list or numpy array')
triads = NP.asarray(triads).astype(str)
if not isinstance(infmt, str):
raise TypeError('Input infmt must be a string')
if infmt.lower() not in ['npz', 'hdf5']:
raise ValueError('Input file format must be npz or hdf5')
if infmt.lower() == 'npz':
if not isinstance(hdf5file_prefix, str):
raise TypeError('If infmt is npz, then hdf5file_prefix needs to be specified for observing parameters information')
if datakey is None:
datakey = ['noisy']
if isinstance(datakey, str):
datakey = [datakey]
elif not isinstance(datakey, list):
raise TypeError('Input datakey must be a list')
for dkey in datakey:
if dkey.lower() not in ['noiseless', 'noisy', 'noise']:
raise ValueError('Invalid input found in datakey')
if infmt.lower() == 'hdf5':
fullfnames_with_extension = glob.glob(infile_prefix + '*' + infmt.lower())
fullfnames_without_extension = [fname.split('.hdf5')[0] for fname in fullfnames_with_extension]
else:
fullfnames_without_extension = [infile_prefix]
if len(fullfnames_without_extension) == 0:
raise IOError('No input files found with pattern {0}'.format(infile_prefix))
try:
if infmt.lower() == 'hdf5':
simvis = RI.InterferometerArray(None, None, None, init_file=fullfnames_without_extension[0])
else:
simvis = RI.InterferometerArray(None, None, None, init_file=hdf5file_prefix)
except:
raise IOError('Input PRISim file does not contain a valid PRISim output')
latitude = simvis.latitude
longitude = simvis.longitude
location = ('{0:.5f}d'.format(longitude), '{0:.5f}d'.format(latitude))
last = simvis.lst / 15.0 / 24.0 # from degrees to fraction of day
last = last.reshape(-1,1)
daydata = NP.asarray(simvis.timestamp[0]).ravel()
if infmt.lower() == 'npz':
simvisinfo = NP.load(fullfnames_without_extension[0]+'.'+infmt.lower())
skyvis = simvisinfo['noiseless'][0,...]
vis = simvisinfo['noisy']
noise = simvisinfo['noise']
n_realize = vis.shape[0]
else:
n_realize = len(fullfnames_without_extension)
cpdata = {}
outfile = {}
for fileind in range(n_realize):
if infmt.lower() == 'npz':
simvis.vis_freq = vis[fileind,...]
simvis.vis_noise_freq = noise[fileind,...]
else:
simvis = RI.InterferometerArray(None, None, None, init_file=fullfnames_without_extension[fileind])
if fileind == 0:
if triads is None:
triads, bltriplets = simvis.getThreePointCombinations(unique=False)
# triads = NP.asarray(prisim_BSP_info['antenna_triplets']).reshape(-1,3)
# bltriplets = NP.asarray(prisim_BSP_info['baseline_triplets'])
triads = NP.asarray(triads).reshape(-1,3)
bltriplets = NP.asarray(bltriplets)
blinds = []
matchinfo = LKP.find_NN(bltriplet, bltriplets.reshape(-1,3), distance_ULIM=blltol)
revind = []
for blnum in NP.arange(bltriplet.shape[0]):
if len(matchinfo[0][blnum]) == 0:
revind += [blnum]
if len(revind) > 0:
flip_factor = NP.ones(3, dtype=NP.float)
flip_factor[NP.array(revind)] = -1
rev_bltriplet = bltriplet * flip_factor.reshape(-1,1)
matchinfo = LKP.find_NN(rev_bltriplet, bltriplets.reshape(-1,3), distance_ULIM=blltol)
for blnum in NP.arange(bltriplet.shape[0]):
if len(matchinfo[0][blnum]) == 0:
raise ValueError('Some baselines in the triplet are not found in the model triads')
triadinds = []
for blnum in NP.arange(bltriplet.shape[0]):
triadind, blind = NP.unravel_index(NP.asarray(matchinfo[0][blnum]), (bltriplets.shape[0], bltriplets.shape[1]))
triadinds += [triadind]
triadind_intersection = NP.intersect1d(triadinds[0], NP.intersect1d(triadinds[1], triadinds[2]))
if triadind_intersection.size == 0:
raise ValueError('Specified triad not found in the PRISim model. Try other permutations of the baseline vectors and/or reverse individual baseline vectors in the triad before giving up.')
triads = triads[triadind_intersection,:]
selected_bltriplets = bltriplets[triadind_intersection,:,:].reshape(-1,3,3)
prisim_BSP_info = simvis.getClosurePhase(antenna_triplets=triads.tolist(),
delay_filter_info=None,
specsmooth_info=None,
spectral_window_info=None,
unique=False)
if fileind == 0:
triads = NP.asarray(prisim_BSP_info['antenna_triplets']).reshape(-1,3) # Re-establish the triads returned after the first iteration (to accunt for any order flips)
for outkey in datakey:
if fileind == 0:
outfile[outkey] = outfile_prefix + '_{0}.npz'.format(outkey)
if outkey == 'noiseless':
if fileind == 0:
# cpdata = prisim_BSP_info['closure_phase_skyvis'][triadind_intersection,:,:][NP.newaxis,...]
cpdata[outkey] = prisim_BSP_info['closure_phase_skyvis'][NP.newaxis,...]
else:
# cpdata = NP.concatenate((cpdata, prisim_BSP_info['closure_phase_skyvis'][triadind_intersection,:,:][NP.newaxis,...]), axis=0)
cpdata[outkey] = NP.concatenate((cpdata[outkey], prisim_BSP_info['closure_phase_skyvis'][NP.newaxis,...]), axis=0)
if outkey == 'noisy':
if fileind == 0:
# cpdata = prisim_BSP_info['closure_phase_vis'][triadind_intersection,:,:][NP.newaxis,...]
cpdata[outkey] = prisim_BSP_info['closure_phase_vis'][NP.newaxis,...]
else:
# cpdata = NP.concatenate((cpdata, prisim_BSP_info['closure_phase_vis'][triadind_intersection,:,:][NP.newaxis,...]), axis=0)
cpdata[outkey] = NP.concatenate((cpdata[outkey], prisim_BSP_info['closure_phase_vis'][NP.newaxis,...]), axis=0)
if outkey == 'noise':
if fileind == 0:
# cpdata = prisim_BSP_info['closure_phase_noise'][triadind_intersection,:,:]
cpdata[outkey] = prisim_BSP_info['closure_phase_noise'][NP.newaxis,:,:]
else:
# cpdata = NP.concatenate((cpdata, prisim_BSP_info['closure_phase_noise'][triadind_intersection,:,:][NP.newaxis,...]), axis=0)
cpdata[outkey] = NP.concatenate((cpdata[outkey], prisim_BSP_info['closure_phase_noise'][NP.newaxis,...]), axis=0)
for outkey in datakey:
cpdata[outkey] = NP.rollaxis(cpdata[outkey], 3, start=0)
flagsdata = NP.zeros(cpdata[outkey].shape, dtype=NP.bool)
NP.savez_compressed(outfile[outkey], closures=cpdata[outkey],
flags=flagsdata, triads=triads,
last=last+NP.zeros((1,n_realize)),
days=daydata+NP.arange(n_realize))
################################################################################
def loadnpz(npzfile, longitude=0.0, latitude=0.0, lst_format='fracday'):
"""
----------------------------------------------------------------------------
Read an input NPZ file containing closure phase data output from CASA and
return a dictionary
Inputs:
npzfile [string] Input NPZ file including full path containing closure
phase data. It must have the following files/keys inside:
'closures' [numpy array] Closure phase (radians). It is of
shape (nlst,ndays,ntriads,nchan)
'triads' [numpy array] Array of triad tuples, of shape
(ntriads,3)
'flags' [numpy array] Array of flags (boolean), of shape
(nlst,ndays,ntriads,nchan)
'last' [numpy array] Array of LST for each day (CASA units
which is MJD+6713). Shape is (nlst,ndays)
'days' [numpy array] Array of days, shape is (ndays,)
'averaged_closures'
[numpy array] optional array of closure phases
averaged across days. Shape is (nlst,ntriads,nchan)
'std_dev_lst'
[numpy array] optional array of standard deviation
of closure phases across days. Shape is
(nlst,ntriads,nchan)
'std_dev_triads'
[numpy array] optional array of standard deviation
of closure phases across triads. Shape is
(nlst,ndays,nchan)
latitude [scalar int or float] Latitude of site (in degrees).
Default=0.0 deg.
longitude [scalar int or float] Longitude of site (in degrees).
Default=0.0 deg.
lst_format [string] Specifies the format/units in which the 'last' key
is to be interpreted. If set to 'hourangle', the LST is in
units of hour angle. If set to 'fracday', the fractional
portion of the 'last' value is the LST in units of days.
Output:
cpinfo [dictionary] Contains one top level keys, namely, 'raw'
Under key 'raw' which holds a dictionary, the subkeys
include 'cphase' (nlst,ndays,ntriads,nchan),
'triads' (ntriads,3), 'lst' (nlst,ndays), and 'flags'
(nlst,ndays,ntriads,nchan), and some other optional keys
----------------------------------------------------------------------------
"""
npzdata = NP.load(npzfile)
cpdata = npzdata['closures']
triadsdata = npzdata['triads']
flagsdata = npzdata['flags']
location = ('{0:.5f}d'.format(longitude), '{0:.5f}d'.format(latitude))
daydata = Time(npzdata['days'].astype(NP.float64), scale='utc', format='jd', location=location)
# lstdata = Time(npzdata['last'].astype(NP.float64) - 6713.0, scale='utc', format='mjd', location=('+21.4278d', '-30.7224d')).sidereal_time('apparent') # Subtract 6713 based on CASA convention to obtain MJD
if lst_format.lower() == 'hourangle':
lstHA = npzdata['last']
lstday = daydata.reshape(1,-1) + TimeDelta(NP.zeros(lstHA.shape[0]).reshape(-1,1)*U.s)
elif lst_format.lower() == 'fracday':
lstfrac, lstint = NP.modf(npzdata['last'])
lstday = Time(lstint.astype(NP.float64) - 6713.0, scale='utc', format='mjd', location=location) # Subtract 6713 based on CASA convention to obtain MJD
lstHA = lstfrac * 24.0 # in hours
else:
raise ValueError('Input lst_format invalid')
cp = cpdata.astype(NP.float64)
flags = flagsdata.astype(NP.bool)
cpinfo = {}
datapool = ['raw']
for dpool in datapool:
cpinfo[dpool] = {}
if dpool == 'raw':
qtys = ['cphase', 'triads', 'flags', 'lst', 'lst-day', 'days', 'dayavg', 'std_triads', 'std_lst']
for qty in qtys:
if qty == 'cphase':
cpinfo[dpool][qty] = NP.copy(cp)
elif qty == 'triads':
cpinfo[dpool][qty] = NP.copy(triadsdata)
elif qty == 'flags':
cpinfo[dpool][qty] = NP.copy(flags)
elif qty == 'lst':
cpinfo[dpool][qty] = NP.copy(lstHA)
elif qty == 'lst-day':
cpinfo[dpool][qty] = NP.copy(lstday.jd)
elif qty == 'days':
cpinfo[dpool][qty] = NP.copy(daydata.jd)
elif qty == 'dayavg':
if 'averaged_closures' in npzdata:
cpinfo[dpool][qty] = NP.copy(cp_dayavg)
elif qty == 'std_triads':
if 'std_dev_triad' in npzdata:
cpinfo[dpool][qty] = NP.copy(cp_std_triads)
elif qty == 'std_lst':
if 'std_dev_lst' in npzdata:
cpinfo[dpool][qty] = NP.copy(cp_std_lst)
return cpinfo
################################################################################
def npz2hdf5(npzfile, hdf5file, longitude=0.0, latitude=0.0,
lst_format='fracday'):
"""
----------------------------------------------------------------------------
Read an input NPZ file containing closure phase data output from CASA and
save it to HDF5 format
Inputs:
npzfile [string] Input NPZ file including full path containing closure
phase data. It must have the following files/keys inside:
'closures' [numpy array] Closure phase (radians). It is of
shape (nlst,ndays,ntriads,nchan)
'triads' [numpy array] Array of triad tuples, of shape
(ntriads,3)
'flags' [numpy array] Array of flags (boolean), of shape
(nlst,ndays,ntriads,nchan)
'last' [numpy array] Array of LST for each day (CASA units
ehich is MJD+6713). Shape is (nlst,ndays)
'days' [numpy array] Array of days, shape is (ndays,)
'averaged_closures'
[numpy array] optional array of closure phases
averaged across days. Shape is (nlst,ntriads,nchan)
'std_dev_lst'
[numpy array] optional array of standard deviation
of closure phases across days. Shape is
(nlst,ntriads,nchan)
'std_dev_triads'
[numpy array] optional array of standard deviation
of closure phases across triads. Shape is
(nlst,ndays,nchan)
hdf5file [string] Output HDF5 file including full path.
latitude [scalar int or float] Latitude of site (in degrees).
Default=0.0 deg.
longitude [scalar int or float] Longitude of site (in degrees).
Default=0.0 deg.
lst_format [string] Specifies the format/units in which the 'last' key
is to be interpreted. If set to 'hourangle', the LST is in
units of hour angle. If set to 'fracday', the fractional
portion of the 'last' value is the LST in units of days.
----------------------------------------------------------------------------
"""
npzdata = NP.load(npzfile)
cpdata = npzdata['closures']
triadsdata = npzdata['triads']
flagsdata = npzdata['flags']
location = ('{0:.5f}d'.format(longitude), '{0:.5f}d'.format(latitude))
daydata = Time(npzdata['days'].astype(NP.float64), scale='utc', format='jd', location=location)
# lstdata = Time(npzdata['last'].astype(NP.float64) - 6713.0, scale='utc', format='mjd', location=('+21.4278d', '-30.7224d')).sidereal_time('apparent') # Subtract 6713 based on CASA convention to obtain MJD
if lst_format.lower() == 'hourangle':
lstHA = npzdata['last']
lstday = daydata.reshape(1,-1) + TimeDelta(NP.zeros(lstHA.shape[0]).reshape(-1,1)*U.s)
elif lst_format.lower() == 'fracday':
lstfrac, lstint = NP.modf(npzdata['last'])
lstday = Time(lstint.astype(NP.float64) - 6713.0, scale='utc', format='mjd', location=location) # Subtract 6713 based on CASA convention to obtain MJD
lstHA = lstfrac * 24.0 # in hours
else:
raise ValueError('Input lst_format invalid')
cp = cpdata.astype(NP.float64)
flags = flagsdata.astype(NP.bool)
if 'averaged_closures' in npzdata:
day_avg_cpdata = npzdata['averaged_closures']
cp_dayavg = day_avg_cpdata.astype(NP.float64)
if 'std_dev_triad' in npzdata:
std_triads_cpdata = npzdata['std_dev_triad']
cp_std_triads = std_triads_cpdata.astype(NP.float64)
if 'std_dev_lst' in npzdata:
std_lst_cpdata = npzdata['std_dev_lst']
cp_std_lst = std_lst_cpdata.astype(NP.float64)
with h5py.File(hdf5file, 'w') as fobj:
datapool = ['raw']
for dpool in datapool:
if dpool == 'raw':
qtys = ['cphase', 'triads', 'flags', 'lst', 'lst-day', 'days', 'dayavg', 'std_triads', 'std_lst']
for qty in qtys:
data = None
if qty == 'cphase':
data = NP.copy(cp)
elif qty == 'triads':
data = NP.copy(triadsdata)
elif qty == 'flags':
data = NP.copy(flags)
elif qty == 'lst':
data = NP.copy(lstHA)
elif qty == 'lst-day':
data = NP.copy(lstday.jd)
elif qty == 'days':
data = NP.copy(daydata.jd)
elif qty == 'dayavg':
if 'averaged_closures' in npzdata:
data = NP.copy(cp_dayavg)
elif qty == 'std_triads':
if 'std_dev_triad' in npzdata:
data = NP.copy(cp_std_triads)
elif qty == 'std_lst':
if 'std_dev_lst' in npzdata:
data = NP.copy(cp_std_lst)
if data is not None:
dset = fobj.create_dataset('{0}/{1}'.format(dpool, qty), data=data, compression='gzip', compression_opts=9)
################################################################################
def save_CPhase_cross_power_spectrum(xcpdps, outfile):
"""
----------------------------------------------------------------------------
Save cross-power spectrum information in a dictionary to a HDF5 file
Inputs:
xcpdps [dictionary] This dictionary is essentially an output of the
member function compute_power_spectrum() of class
ClosurePhaseDelaySpectrum. It has the following key-value
structure:
'triads' ((ntriads,3) array), 'triads_ind',
((ntriads,) array), 'lstXoffsets' ((ndlst_range,) array), 'lst'
((nlst,) array), 'dlst' ((nlst,) array), 'lst_ind' ((nlst,)
array), 'days' ((ndays,) array), 'day_ind' ((ndays,) array),
'dday' ((ndays,) array), 'oversampled' and 'resampled'
corresponding to whether resample was set to False or True in
call to member function FT(). Values under keys 'triads_ind'
and 'lst_ind' are numpy array corresponding to triad and time
indices used in selecting the data. Values under keys
'oversampled' and 'resampled' each contain a dictionary with
the following keys and values:
'z' [numpy array] Redshifts corresponding to the band
centers in 'freq_center'. It has shape=(nspw,)
'lags' [numpy array] Delays (in seconds). It has shape=(nlags,)
'kprll' [numpy array] k_parallel modes (in h/Mpc) corresponding
to 'lags'. It has shape=(nspw,nlags)
'freq_center'
[numpy array] contains the center frequencies (in Hz)
of the frequency subbands of the subband delay spectra.
It is of size n_win. It is roughly equivalent to
redshift(s)
'freq_wts'
[numpy array] Contains frequency weights applied on
each frequency sub-band during the subband delay
transform. It is of size n_win x nchan.
'bw_eff'
[numpy array] contains the effective bandwidths (in Hz)
of the subbands being delay transformed. It is of size
n_win. It is roughly equivalent to width in redshift or
along line-of-sight
'shape' [string] shape of the frequency window function applied.
Usual values are 'rect' (rectangular), 'bhw'
(Blackman-Harris), 'bnw' (Blackman-Nuttall).
'fftpow'
[scalar] the power to which the FFT of the window was
raised. The value is be a positive scalar with
default = 1.0
'lag_corr_length'
[numpy array] It is the correlation timescale (in
pixels) of the subband delay spectra. It is proportional
to inverse of effective bandwidth. It is of size n_win.
The unit size of a pixel is determined by the difference
between adjacent pixels in lags under key 'lags' which
in turn is effectively inverse of the effective
bandwidth of the subband specified in bw_eff
It further contains one or more of the following keys named
'whole', 'submodel', 'residual', and 'errinfo' each of which is
a dictionary. 'whole' contains power spectrum info about the
input closure phases. 'submodel' contains power spectrum info
about the model that will have been subtracted (as closure
phase) from the 'whole' model. 'residual' contains power
spectrum info about the closure phases obtained as a difference
between 'whole' and 'submodel'. It contains the following keys
and values:
'mean' [numpy array] Delay power spectrum incoherently
estimated over the axes specified in xinfo['axes']
using the 'mean' key in input cpds or attribute
cPhaseDS['processed']['dspec']. It has shape that
depends on the combination of input parameters. See
examples below. If both collapse_axes and avgcov are
not set, those axes will be replaced with square
covariance matrices. If collapse_axes is provided but
avgcov is False, those axes will be of shape 2*Naxis-1.
'median'
[numpy array] Delay power spectrum incoherently averaged
over the axes specified in incohax using the 'median'
key in input cpds or attribute
cPhaseDS['processed']['dspec']. It has shape that
depends on the combination of input parameters. See
examples below. If both collapse_axes and avgcov are not
set, those axes will be replaced with square covariance
matrices. If collapse_axes is provided bu avgcov is
False, those axes will be of shape 2*Naxis-1.
'diagoffsets'
[dictionary] Same keys corresponding to keys under
'collapse_axes' in input containing the diagonal
offsets for those axes. If 'avgcov' was set, those
entries will be removed from 'diagoffsets' since all the
leading diagonal elements have been collapsed (averaged)
further. Value under each key is a numpy array where
each element in the array corresponds to the index of
that leading diagonal. This should match the size of the
output along that axis in 'mean' or 'median' above.
'diagweights'
[dictionary] Each key is an axis specified in
collapse_axes and the value is a numpy array of weights
corresponding to the diagonal offsets in that axis.
'axesmap'
[dictionary] If covariance in cross-power is calculated
but is not collapsed, the number of dimensions in the
output will have changed. This parameter tracks where
the original axis is now placed. The keys are the
original axes that are involved in incoherent
cross-power, and the values are the new locations of
those original axes in the output.
'nsamples_incoh'
[integer] Number of incoherent samples in producing the
power spectrum
'nsamples_coh'
[integer] Number of coherent samples in producing the
power spectrum
outfile [string] Full path to the external HDF5 file where the cross-
power spectrum information provided in xcpdps will be saved
----------------------------------------------------------------------------
"""
if not isinstance(xcpdps, dict):
raise TypeError('Input xcpdps must be a dictionary')
with h5py.File(outfile, 'w') as fileobj:
hdrgrp = fileobj.create_group('header')
hdrkeys = ['triads', 'triads_ind', 'lst', 'lst_ind', 'dlst', 'days', 'day_ind', 'dday']
for key in hdrkeys:
dset = hdrgrp.create_dataset(key, data=xcpdps[key])
sampling = ['oversampled', 'resampled']
sampling_keys = ['z', 'kprll', 'lags', 'freq_center', 'bw_eff', 'shape', 'freq_wts', 'lag_corr_length']
dpool_keys = ['whole', 'submodel', 'residual', 'errinfo']
for smplng in sampling:
if smplng in xcpdps:
smplgrp = fileobj.create_group(smplng)
for key in sampling_keys:
dset = smplgrp.create_dataset(key, data=xcpdps[smplng][key])
for dpool in dpool_keys:
if dpool in xcpdps[smplng]:
dpoolgrp = smplgrp.create_group(dpool)
keys = ['diagoffsets', 'diagweights', 'axesmap', 'nsamples_incoh', 'nsamples_coh']
for key in keys:
if key in xcpdps[smplng][dpool]:
if isinstance(xcpdps[smplng][dpool][key], dict):
subgrp = dpoolgrp.create_group(key)
for subkey in xcpdps[smplng][dpool][key]:
dset = subgrp.create_dataset(str(subkey), data=xcpdps[smplng][dpool][key][subkey])
else:
dset = dpoolgrp.create_dataset(key, data=xcpdps[smplng][dpool][key])
for stat in ['mean', 'median']:
if stat in xcpdps[smplng][dpool]:
if isinstance(xcpdps[smplng][dpool][stat], list):
for ii in range(len(xcpdps[smplng][dpool][stat])):
dset = dpoolgrp.create_dataset(stat+'/diagcomb_{0}'.format(ii), data=xcpdps[smplng][dpool][stat][ii].si.value)
dset.attrs['units'] = str(xcpdps[smplng][dpool][stat][ii].si.unit)
else:
dset = dpoolgrp.create_dataset(stat, data=xcpdps[smplng][dpool][stat].si.value)
dset.attrs['units'] = str(xcpdps[smplng][dpool][stat].si.unit)
################################################################################
def read_CPhase_cross_power_spectrum(infile):
"""
----------------------------------------------------------------------------
Read information about cross power spectrum from an external HDF5 file into
a dictionary. This is the counterpart to save_CPhase_corss_power_spectrum()
Input:
infile [string] Full path to the external HDF5 file that contains info
about cross-power spectrum.
Output:
xcpdps [dictionary] This dictionary has structure the same as output
of the member function compute_power_spectrum() of class
ClosurePhaseDelaySpectrum. It has the following key-value
structure:
'triads' ((ntriads,3) array), 'triads_ind',
((ntriads,) array), 'lstXoffsets' ((ndlst_range,) array), 'lst'
((nlst,) array), 'dlst' ((nlst,) array), 'lst_ind' ((nlst,)
array), 'days' ((ndays,) array), 'day_ind' ((ndays,) array),
'dday' ((ndays,) array), 'oversampled' and 'resampled'
corresponding to whether resample was set to False or True in
call to member function FT(). Values under keys 'triads_ind'
and 'lst_ind' are numpy array corresponding to triad and time
indices used in selecting the data. Values under keys
'oversampled' and 'resampled' each contain a dictionary with
the following keys and values:
'z' [numpy array] Redshifts corresponding to the band
centers in 'freq_center'. It has shape=(nspw,)
'lags' [numpy array] Delays (in seconds). It has shape=(nlags,)
'kprll' [numpy array] k_parallel modes (in h/Mpc) corresponding
to 'lags'. It has shape=(nspw,nlags)
'freq_center'
[numpy array] contains the center frequencies (in Hz)
of the frequency subbands of the subband delay spectra.
It is of size n_win. It is roughly equivalent to
redshift(s)
'freq_wts'
[numpy array] Contains frequency weights applied on
each frequency sub-band during the subband delay
transform. It is of size n_win x nchan.
'bw_eff'
[numpy array] contains the effective bandwidths (in Hz)
of the subbands being delay transformed. It is of size
n_win. It is roughly equivalent to width in redshift or
along line-of-sight
'shape' [string] shape of the frequency window function applied.
Usual values are 'rect' (rectangular), 'bhw'
(Blackman-Harris), 'bnw' (Blackman-Nuttall).
'fftpow'
[scalar] the power to which the FFT of the window was
raised. The value is be a positive scalar with
default = 1.0
'lag_corr_length'
[numpy array] It is the correlation timescale (in
pixels) of the subband delay spectra. It is proportional
to inverse of effective bandwidth. It is of size n_win.
The unit size of a pixel is determined by the difference
between adjacent pixels in lags under key 'lags' which
in turn is effectively inverse of the effective
bandwidth of the subband specified in bw_eff
It further contains one or more of the following keys named
'whole', 'submodel', 'residual', and 'errinfo' each of which is
a dictionary. 'whole' contains power spectrum info about the
input closure phases. 'submodel' contains power spectrum info
about the model that will have been subtracted (as closure
phase) from the 'whole' model. 'residual' contains power
spectrum info about the closure phases obtained as a difference
between 'whole' and 'submodel'. It contains the following keys
and values:
'mean' [numpy array] Delay power spectrum incoherently
estimated over the axes specified in xinfo['axes']
using the 'mean' key in input cpds or attribute
cPhaseDS['processed']['dspec']. It has shape that
depends on the combination of input parameters. See
examples below. If both collapse_axes and avgcov are
not set, those axes will be replaced with square
covariance matrices. If collapse_axes is provided but
avgcov is False, those axes will be of shape 2*Naxis-1.
'median'
[numpy array] Delay power spectrum incoherently averaged
over the axes specified in incohax using the 'median'
key in input cpds or attribute
cPhaseDS['processed']['dspec']. It has shape that
depends on the combination of input parameters. See
examples below. If both collapse_axes and avgcov are not
set, those axes will be replaced with square covariance
matrices. If collapse_axes is provided bu avgcov is
False, those axes will be of shape 2*Naxis-1.
'diagoffsets'
[dictionary] Same keys corresponding to keys under
'collapse_axes' in input containing the diagonal
offsets for those axes. If 'avgcov' was set, those
entries will be removed from 'diagoffsets' since all the
leading diagonal elements have been collapsed (averaged)
further. Value under each key is a numpy array where
each element in the array corresponds to the index of
that leading diagonal. This should match the size of the
output along that axis in 'mean' or 'median' above.
'diagweights'
[dictionary] Each key is an axis specified in
collapse_axes and the value is a numpy array of weights
corresponding to the diagonal offsets in that axis.
'axesmap'
[dictionary] If covariance in cross-power is calculated
but is not collapsed, the number of dimensions in the
output will have changed. This parameter tracks where
the original axis is now placed. The keys are the
original axes that are involved in incoherent
cross-power, and the values are the new locations of
those original axes in the output.
'nsamples_incoh'
[integer] Number of incoherent samples in producing the
power spectrum
'nsamples_coh'
[integer] Number of coherent samples in producing the
power spectrum
outfile [string] Full path to the external HDF5 file where the cross-
power spectrum information provided in xcpdps will be saved
----------------------------------------------------------------------------
"""
if not isinstance(infile, str):
raise TypeError('Input infile must be a string')
xcpdps = {}
with h5py.File(infile, 'r') as fileobj:
hdrgrp = fileobj['header']
hdrkeys = ['triads', 'triads_ind', 'lst', 'lst_ind', 'dlst', 'days', 'day_ind', 'dday']
for key in hdrkeys:
xcpdps[key] = hdrgrp[key].value
sampling = ['oversampled', 'resampled']
sampling_keys = ['z', 'kprll', 'lags', 'freq_center', 'bw_eff', 'shape', 'freq_wts', 'lag_corr_length']
dpool_keys = ['whole', 'submodel', 'residual', 'errinfo']
for smplng in sampling:
if smplng in fileobj:
smplgrp = fileobj[smplng]
xcpdps[smplng] = {}
for key in sampling_keys:
xcpdps[smplng][key] = smplgrp[key].value
for dpool in dpool_keys:
if dpool in smplgrp:
xcpdps[smplng][dpool] = {}
dpoolgrp = smplgrp[dpool]
keys = ['diagoffsets', 'diagweights', 'axesmap', 'nsamples_incoh', 'nsamples_coh']
for key in keys:
if key in dpoolgrp:
if isinstance(dpoolgrp[key], h5py.Group):
xcpdps[smplng][dpool][key] = {}
for subkey in dpoolgrp[key]:
xcpdps[smplng][dpool][key][int(subkey)] = dpoolgrp[key][subkey].value
elif isinstance(dpoolgrp[key], h5py.Dataset):
xcpdps[smplng][dpool][key] = dpoolgrp[key].value
else:
raise TypeError('Invalid h5py data type encountered')
for stat in ['mean', 'median']:
if stat in dpoolgrp:
if isinstance(dpoolgrp[stat], h5py.Dataset):
valunits = dpoolgrp[stat].attrs['units']
xcpdps[smplng][dpool][stat] = dpoolgrp[stat].value * U.Unit(valunits)
elif isinstance(dpoolgrp[stat], h5py.Group):
xcpdps[smplng][dpool][stat] = []
for diagcomb_ind in range(len(dpoolgrp[stat].keys())):
if 'diagcomb_{0}'.format(diagcomb_ind) in dpoolgrp[stat]:
valunits = dpoolgrp[stat]['diagcomb_{0}'.format(diagcomb_ind)].attrs['units']
xcpdps[smplng][dpool][stat] += [dpoolgrp[stat]['diagcomb_{0}'.format(diagcomb_ind)].value * U.Unit(valunits)]
return xcpdps
################################################################################
def incoherent_cross_power_spectrum_average(xcpdps, excpdps=None, diagoffsets=None):
"""
----------------------------------------------------------------------------
Perform incoherent averaging of cross power spectrum along specified axes
Inputs:
xcpdps [dictionary or list of dictionaries] If provided as a list of
dictionaries, each dictionary consists of cross power spectral
information coming possible from different sources, and they
will be averaged be averaged incoherently. If a single
dictionary is provided instead of a list of dictionaries, the
said averaging does not take place. Each dictionary is
essentially an output of the member function
compute_power_spectrum() of class ClosurePhaseDelaySpectrum. It
has the following key-value structure:
'triads' ((ntriads,3) array), 'triads_ind',
((ntriads,) array), 'lstXoffsets' ((ndlst_range,) array), 'lst'
((nlst,) array), 'dlst' ((nlst,) array), 'lst_ind' ((nlst,)
array), 'days' ((ndays,) array), 'day_ind' ((ndays,) array),
'dday' ((ndays,) array), 'oversampled' and 'resampled'
corresponding to whether resample was set to False or True in
call to member function FT(). Values under keys 'triads_ind'
and 'lst_ind' are numpy array corresponding to triad and time
indices used in selecting the data. Values under keys
'oversampled' and 'resampled' each contain a dictionary with
the following keys and values:
'z' [numpy array] Redshifts corresponding to the band
centers in 'freq_center'. It has shape=(nspw,)
'lags' [numpy array] Delays (in seconds). It has shape=(nlags,)
'kprll' [numpy array] k_parallel modes (in h/Mpc) corresponding
to 'lags'. It has shape=(nspw,nlags)
'freq_center'
[numpy array] contains the center frequencies (in Hz)
of the frequency subbands of the subband delay spectra.
It is of size n_win. It is roughly equivalent to
redshift(s)
'freq_wts'
[numpy array] Contains frequency weights applied on
each frequency sub-band during the subband delay
transform. It is of size n_win x nchan.
'bw_eff'
[numpy array] contains the effective bandwidths (in Hz)
of the subbands being delay transformed. It is of size
n_win. It is roughly equivalent to width in redshift or
along line-of-sight
'shape' [string] shape of the frequency window function applied.
Usual values are 'rect' (rectangular), 'bhw'
(Blackman-Harris), 'bnw' (Blackman-Nuttall).
'fftpow'
[scalar] the power to which the FFT of the window was
raised. The value is be a positive scalar with
default = 1.0
'lag_corr_length'
[numpy array] It is the correlation timescale (in
pixels) of the subband delay spectra. It is proportional
to inverse of effective bandwidth. It is of size n_win.
The unit size of a pixel is determined by the difference
between adjacent pixels in lags under key 'lags' which
in turn is effectively inverse of the effective
bandwidth of the subband specified in bw_eff
It further contains 3 keys named 'whole', 'submodel', and
'residual' each of which is a dictionary. 'whole' contains power
spectrum info about the input closure phases. 'submodel'
contains power spectrum info about the model that will have been
subtracted (as closure phase) from the 'whole' model. 'residual'
contains power spectrum info about the closure phases obtained
as a difference between 'whole' and 'submodel'. It contains the
following keys and values:
'mean' [numpy array] Delay power spectrum incoherently
estimated over the axes specified in xinfo['axes']
using the 'mean' key in input cpds or attribute
cPhaseDS['processed']['dspec']. It has shape that
depends on the combination of input parameters. See
examples below. If both collapse_axes and avgcov are
not set, those axes will be replaced with square
covariance matrices. If collapse_axes is provided but
avgcov is False, those axes will be of shape 2*Naxis-1.
'median'
[numpy array] Delay power spectrum incoherently averaged
over the axes specified in incohax using the 'median'
key in input cpds or attribute
cPhaseDS['processed']['dspec']. It has shape that
depends on the combination of input parameters. See
examples below. If both collapse_axes and avgcov are not
set, those axes will be replaced with square covariance
matrices. If collapse_axes is provided bu avgcov is
False, those axes will be of shape 2*Naxis-1.
'diagoffsets'
[dictionary] Same keys corresponding to keys under
'collapse_axes' in input containing the diagonal
offsets for those axes. If 'avgcov' was set, those
entries will be removed from 'diagoffsets' since all the
leading diagonal elements have been collapsed (averaged)
further. Value under each key is a numpy array where
each element in the array corresponds to the index of
that leading diagonal. This should match the size of the
output along that axis in 'mean' or 'median' above.
'diagweights'
[dictionary] Each key is an axis specified in
collapse_axes and the value is a numpy array of weights
corresponding to the diagonal offsets in that axis.
'axesmap'
[dictionary] If covariance in cross-power is calculated
but is not collapsed, the number of dimensions in the
output will have changed. This parameter tracks where
the original axis is now placed. The keys are the
original axes that are involved in incoherent
cross-power, and the values are the new locations of
those original axes in the output.
'nsamples_incoh'
[integer] Number of incoherent samples in producing the
power spectrum
'nsamples_coh'
[integer] Number of coherent samples in producing the
power spectrum
excpdps [dictionary or list of dictionaries] If provided as a list of
dictionaries, each dictionary consists of cross power spectral
information of subsample differences coming possible from
different sources, and they will be averaged be averaged
incoherently. This is optional. If not set (default=None), no
incoherent averaging happens. If a single dictionary is provided
instead of a list of dictionaries, the said averaging does not
take place. Each dictionary is essentially an output of the
member function compute_power_spectrum_uncertainty() of class
ClosurePhaseDelaySpectrum. It has the following key-value
structure:
'triads' ((ntriads,3) array), 'triads_ind',
((ntriads,) array), 'lstXoffsets' ((ndlst_range,) array), 'lst'
((nlst,) array), 'dlst' ((nlst,) array), 'lst_ind' ((nlst,)
array), 'days' ((ndaycomb,) array), 'day_ind' ((ndaycomb,)
array), 'dday' ((ndaycomb,) array), 'oversampled' and
'resampled' corresponding to whether resample was set to False
or True in call to member function FT(). Values under keys
'triads_ind' and 'lst_ind' are numpy array corresponding to
triad and time indices used in selecting the data. Values under
keys 'oversampled' and 'resampled' each contain a dictionary
with the following keys and values:
'z' [numpy array] Redshifts corresponding to the band
centers in 'freq_center'. It has shape=(nspw,)
'lags' [numpy array] Delays (in seconds). It has shape=(nlags,)
'kprll' [numpy array] k_parallel modes (in h/Mpc) corresponding
to 'lags'. It has shape=(nspw,nlags)
'freq_center'
[numpy array] contains the center frequencies (in Hz) of
the frequency subbands of the subband delay spectra. It
is of size n_win. It is roughly equivalent to
redshift(s)
'freq_wts'
[numpy array] Contains frequency weights applied on each
frequency sub-band during the subband delay transform.
It is of size n_win x nchan.
'bw_eff'
[numpy array] contains the effective bandwidths (in Hz)
of the subbands being delay transformed. It is of size
n_win. It is roughly equivalent to width in redshift or
along line-of-sight
'shape' [string] shape of the frequency window function applied.
Usual values are 'rect' (rectangular), 'bhw'
(Blackman-Harris), 'bnw' (Blackman-Nuttall).
'fftpow'
[scalar] the power to which the FFT of the window was
raised. The value is be a positive scalar with
default = 1.0
'lag_corr_length'
[numpy array] It is the correlation timescale (in
pixels) of the subband delay spectra. It is proportional
to inverse of effective bandwidth. It is of size n_win.
The unit size of a pixel is determined by the difference
between adjacent pixels in lags under key 'lags' which
in turn is effectively inverse of the effective
bandwidth of the subband specified in bw_eff
It further contains a key named 'errinfo' which is a dictionary.
It contains information about power spectrum uncertainties
obtained from subsample differences. It contains the following
keys and values:
'mean' [numpy array] Delay power spectrum uncertainties
incoherently estimated over the axes specified in
xinfo['axes'] using the 'mean' key in input cpds or
attribute cPhaseDS['errinfo']['dspec']. It has shape
that depends on the combination of input parameters. See
examples below. If both collapse_axes and avgcov are not
set, those axes will be replaced with square covariance
matrices. If collapse_axes is provided but avgcov is
False, those axes will be of shape 2*Naxis-1.
'median'
[numpy array] Delay power spectrum uncertainties
incoherently averaged over the axes specified in incohax
using the 'median' key in input cpds or attribute
cPhaseDS['errinfo']['dspec']. It has shape that depends
on the combination of input parameters. See examples
below. If both collapse_axes and avgcov are not set,
those axes will be replaced with square covariance
matrices. If collapse_axes is provided but avgcov is
False, those axes will be of shape 2*Naxis-1.
'diagoffsets'
[dictionary] Same keys corresponding to keys under
'collapse_axes' in input containing the diagonal offsets
for those axes. If 'avgcov' was set, those entries will
be removed from 'diagoffsets' since all the leading
diagonal elements have been collapsed (averaged) further.
Value under each key is a numpy array where each element
in the array corresponds to the index of that leading
diagonal. This should match the size of the output along
that axis in 'mean' or 'median' above.
'diagweights'
[dictionary] Each key is an axis specified in
collapse_axes and the value is a numpy array of weights
corresponding to the diagonal offsets in that axis.
'axesmap'
[dictionary] If covariance in cross-power is calculated
but is not collapsed, the number of dimensions in the
output will have changed. This parameter tracks where
the original axis is now placed. The keys are the
original axes that are involved in incoherent
cross-power, and the values are the new locations of
those original axes in the output.
'nsamples_incoh'
[integer] Number of incoherent samples in producing the
power spectrum
'nsamples_coh'
[integer] Number of coherent samples in producing the
power spectrum
diagoffsets [NoneType or dictionary or list of dictionaries] This info is
used for incoherent averaging along specified diagonals along
specified axes. This incoherent averaging is performed after
incoherently averaging multiple cross-power spectra (if any).
If set to None, this incoherent averaging is not performed.
Many combinations of axes and diagonals can be specified as
individual dictionaries in a list. If only one dictionary is
specified, then it assumed that only one combination of axes
and diagonals is requested. If a list of dictionaries is given,
each dictionary in the list specifies a different combination
for incoherent averaging. Each dictionary should have the
following key-value pairs. The key is the axis number (allowed
values are 1, 2, 3) that denote the axis type (1=LST, 2=Days,
3=Triads to be averaged), and the value under they keys is a
list or numpy array of diagonals to be averaged incoherently.
These axes-diagonal combinations apply to both the inputs
xcpdps and excpdps, except axis=2 does not apply to excpdps
(since it is made of subsample differences already) and will be
skipped.
Outputs:
A tuple consisting of two dictionaries. The first dictionary contains the
incoherent averaging of xcpdps as specified by the inputs, while the second
consists of incoherent of excpdps as specified by the inputs. The structure
of these dictionaries are practically the same as the dictionary inputs
xcpdps and excpdps respectively. The only differences in dictionary
structure are:
* Under key ['oversampled'/'resampled']['whole'/'submodel'/'residual'
/'effinfo']['mean'/'median'] is a list of numpy arrays, where each
array in the list corresponds to the dictionary in the list in input
diagoffsets that defines the axes-diagonal combination.
----------------------------------------------------------------------------
"""
if isinstance(xcpdps, dict):
xcpdps = [xcpdps]
if not isinstance(xcpdps, list):
raise TypeError('Invalid data type provided for input xcpdps')
if excpdps is not None:
if isinstance(excpdps, dict):
excpdps = [excpdps]
if not isinstance(excpdps, list):
raise TypeError('Invalid data type provided for input excpdps')
if len(xcpdps) != len(excpdps):
raise ValueError('Inputs xcpdps and excpdps found to have unequal number of values')
out_xcpdps = {'triads': xcpdps[0]['triads'], 'triads_ind': xcpdps[0]['triads_ind'], 'lst': xcpdps[0]['lst'], 'lst_ind': xcpdps[0]['lst_ind'], 'dlst': xcpdps[0]['dlst'], 'days': xcpdps[0]['days'], 'day_ind': xcpdps[0]['day_ind'], 'dday': xcpdps[0]['dday']}
out_excpdps = None
if excpdps is not None:
out_excpdps = {'triads': excpdps[0]['triads'], 'triads_ind': excpdps[0]['triads_ind'], 'lst': excpdps[0]['lst'], 'lst_ind': excpdps[0]['lst_ind'], 'dlst': excpdps[0]['dlst'], 'days': excpdps[0]['days'], 'day_ind': excpdps[0]['day_ind'], 'dday': excpdps[0]['dday']}
for smplng in ['oversampled', 'resampled']:
if smplng in xcpdps[0]:
out_xcpdps[smplng] = {'z': xcpdps[0][smplng]['z'], 'kprll': xcpdps[0][smplng]['kprll'], 'lags': xcpdps[0][smplng]['lags'], 'freq_center': xcpdps[0][smplng]['freq_center'], 'bw_eff': xcpdps[0][smplng]['bw_eff'], 'shape': xcpdps[0][smplng]['shape'], 'freq_wts': xcpdps[0][smplng]['freq_wts'], 'lag_corr_length': xcpdps[0][smplng]['lag_corr_length']}
if excpdps is not None:
out_excpdps[smplng] = {'z': excpdps[0][smplng]['z'], 'kprll': excpdps[0][smplng]['kprll'], 'lags': excpdps[0][smplng]['lags'], 'freq_center': excpdps[0][smplng]['freq_center'], 'bw_eff': excpdps[0][smplng]['bw_eff'], 'shape': excpdps[0][smplng]['shape'], 'freq_wts': excpdps[0][smplng]['freq_wts'], 'lag_corr_length': excpdps[0][smplng]['lag_corr_length']}
for dpool in ['whole', 'submodel', 'residual']:
if dpool in xcpdps[0][smplng]:
out_xcpdps[smplng][dpool] = {'diagoffsets': xcpdps[0][smplng][dpool]['diagoffsets'], 'axesmap': xcpdps[0][smplng][dpool]['axesmap']}
for stat in ['mean', 'median']:
if stat in xcpdps[0][smplng][dpool]:
out_xcpdps[smplng][dpool][stat] = {}
arr = []
diagweights = []
for i in range(len(xcpdps)):
arr += [xcpdps[i][smplng][dpool][stat].si.value]
arr_units = xcpdps[i][smplng][dpool][stat].si.unit
if isinstance(xcpdps[i][smplng][dpool]['diagweights'], dict):
diagwts = 1.0
diagwts_shape = NP.ones(xcpdps[i][smplng][dpool][stat].ndim, dtype=NP.int)
for ax in xcpdps[i][smplng][dpool]['diagweights']:
tmp_shape = NP.copy(diagwts_shape)
tmp_shape[xcpdps[i][smplng][dpool]['axesmap'][ax]] = xcpdps[i][smplng][dpool]['diagweights'][ax].size
diagwts = diagwts * xcpdps[i][smplng][dpool]['diagweights'][ax].reshape(tuple(tmp_shape))
elif isinstance(xcpdps[i][smplng][dpool]['diagweights'], NP.ndarray):
diagwts = NP.copy(xcpdps[i][smplng][dpool]['diagweights'])
else:
raise TypeError('Diagonal weights in input must be a dictionary or a numpy array')
diagweights += [diagwts]
diagweights = NP.asarray(diagweights)
arr = NP.asarray(arr)
arr = NP.nansum(arr * diagweights, axis=0) / NP.nansum(diagweights, axis=0) * arr_units
diagweights = NP.nansum(diagweights, axis=0)
out_xcpdps[smplng][dpool][stat] = arr
out_xcpdps[smplng][dpool]['diagweights'] = diagweights
for dpool in ['errinfo']:
if dpool in excpdps[0][smplng]:
out_excpdps[smplng][dpool] = {'diagoffsets': excpdps[0][smplng][dpool]['diagoffsets'], 'axesmap': excpdps[0][smplng][dpool]['axesmap']}
for stat in ['mean', 'median']:
if stat in excpdps[0][smplng][dpool]:
out_excpdps[smplng][dpool][stat] = {}
arr = []
diagweights = []
for i in range(len(excpdps)):
arr += [excpdps[i][smplng][dpool][stat].si.value]
arr_units = excpdps[i][smplng][dpool][stat].si.unit
if isinstance(excpdps[i][smplng][dpool]['diagweights'], dict):
diagwts = 1.0
diagwts_shape = NP.ones(excpdps[i][smplng][dpool][stat].ndim, dtype=NP.int)
for ax in excpdps[i][smplng][dpool]['diagweights']:
tmp_shape = NP.copy(diagwts_shape)
tmp_shape[excpdps[i][smplng][dpool]['axesmap'][ax]] = excpdps[i][smplng][dpool]['diagweights'][ax].size
diagwts = diagwts * excpdps[i][smplng][dpool]['diagweights'][ax].reshape(tuple(tmp_shape))
elif isinstance(excpdps[i][smplng][dpool]['diagweights'], NP.ndarray):
diagwts = NP.copy(excpdps[i][smplng][dpool]['diagweights'])
else:
raise TypeError('Diagonal weights in input must be a dictionary or a numpy array')
diagweights += [diagwts]
diagweights = NP.asarray(diagweights)
arr = NP.asarray(arr)
arr = NP.nansum(arr * diagweights, axis=0) / NP.nansum(diagweights, axis=0) * arr_units
diagweights = NP.nansum(diagweights, axis=0)
out_excpdps[smplng][dpool][stat] = arr
out_excpdps[smplng][dpool]['diagweights'] = diagweights
if diagoffsets is not None:
if isinstance(diagoffsets, dict):
diagoffsets = [diagoffsets]
if not isinstance(diagoffsets, list):
raise TypeError('Input diagoffsets must be a list of dictionaries')
for ind in range(len(diagoffsets)):
for ax in diagoffsets[ind]:
if not isinstance(diagoffsets[ind][ax], (list, NP.ndarray)):
raise TypeError('Values in input dictionary diagoffsets must be a list or numpy array')
diagoffsets[ind][ax] = NP.asarray(diagoffsets[ind][ax])
for smplng in ['oversampled', 'resampled']:
if smplng in out_xcpdps:
for dpool in ['whole', 'submodel', 'residual']:
if dpool in out_xcpdps[smplng]:
masks = []
for ind in range(len(diagoffsets)):
mask_ones = NP.ones(out_xcpdps[smplng][dpool]['diagweights'].shape, dtype=NP.bool)
mask_agg = None
for ax in diagoffsets[ind]:
mltdim_slice = [slice(None)] * mask_ones.ndim
mltdim_slice[out_xcpdps[smplng][dpool]['axesmap'][ax].squeeze()] = NP.where(NP.isin(out_xcpdps[smplng][dpool]['diagoffsets'][ax], diagoffsets[ind][ax]))[0]
mask_tmp = NP.copy(mask_ones)
mask_tmp[tuple(mltdim_slice)] = False
if mask_agg is None:
mask_agg = NP.copy(mask_tmp)
else:
mask_agg = NP.logical_or(mask_agg, mask_tmp)
masks += [NP.copy(mask_agg)]
diagwts = NP.copy(out_xcpdps[smplng][dpool]['diagweights'])
out_xcpdps[smplng][dpool]['diagweights'] = []
for stat in ['mean', 'median']:
if stat in out_xcpdps[smplng][dpool]:
arr = NP.copy(out_xcpdps[smplng][dpool][stat].si.value)
arr_units = out_xcpdps[smplng][dpool][stat].si.unit
out_xcpdps[smplng][dpool][stat] = []
for ind in range(len(diagoffsets)):
masked_diagwts = MA.array(diagwts, mask=masks[ind])
axes_to_avg = tuple([out_xcpdps[smplng][dpool]['axesmap'][ax][0] for ax in diagoffsets[ind]])
out_xcpdps[smplng][dpool][stat] += [MA.sum(arr * masked_diagwts, axis=axes_to_avg, keepdims=True) / MA.sum(masked_diagwts, axis=axes_to_avg, keepdims=True) * arr_units]
if len(out_xcpdps[smplng][dpool]['diagweights']) < len(diagoffsets):
out_xcpdps[smplng][dpool]['diagweights'] += [MA.sum(masked_diagwts, axis=axes_to_avg, keepdims=True)]
if excpdps is not None:
for smplng in ['oversampled', 'resampled']:
if smplng in out_excpdps:
for dpool in ['errinfo']:
if dpool in out_excpdps[smplng]:
masks = []
for ind in range(len(diagoffsets)):
mask_ones = NP.ones(out_excpdps[smplng][dpool]['diagweights'].shape, dtype=NP.bool)
mask_agg = None
for ax in diagoffsets[ind]:
if ax != 2:
mltdim_slice = [slice(None)] * mask_ones.ndim
mltdim_slice[out_excpdps[smplng][dpool]['axesmap'][ax].squeeze()] = NP.where(NP.isin(out_excpdps[smplng][dpool]['diagoffsets'][ax], diagoffsets[ind][ax]))[0]
mask_tmp = NP.copy(mask_ones)
mask_tmp[tuple(mltdim_slice)] = False
if mask_agg is None:
mask_agg = NP.copy(mask_tmp)
else:
mask_agg = NP.logical_or(mask_agg, mask_tmp)
masks += [NP.copy(mask_agg)]
diagwts = NP.copy(out_excpdps[smplng][dpool]['diagweights'])
out_excpdps[smplng][dpool]['diagweights'] = []
for stat in ['mean', 'median']:
if stat in out_excpdps[smplng][dpool]:
arr = NP.copy(out_excpdps[smplng][dpool][stat].si.value)
arr_units = out_excpdps[smplng][dpool][stat].si.unit
out_excpdps[smplng][dpool][stat] = []
for ind in range(len(diagoffsets)):
masked_diagwts = MA.array(diagwts, mask=masks[ind])
axes_to_avg = tuple([out_excpdps[smplng][dpool]['axesmap'][ax][0] for ax in diagoffsets[ind] if ax!=2])
out_excpdps[smplng][dpool][stat] += [MA.sum(arr * masked_diagwts, axis=axes_to_avg, keepdims=True) / MA.sum(masked_diagwts, axis=axes_to_avg, keepdims=True) * arr_units]
if len(out_excpdps[smplng][dpool]['diagweights']) < len(diagoffsets):
out_excpdps[smplng][dpool]['diagweights'] += [MA.sum(masked_diagwts, axis=axes_to_avg, keepdims=True)]
return (out_xcpdps, out_excpdps)
################################################################################
def incoherent_kbin_averaging(xcpdps, kbins=None, num_kbins=None, kbintype='log'):
"""
----------------------------------------------------------------------------
Averages the power spectrum incoherently by binning in bins of k. Returns
the power spectrum in units of both standard power spectrum and \Delta^2
Inputs:
xcpdps [dictionary] A dictionary that contains the incoherent averaged
power spectrum along LST and/or triads axes. This dictionary is
essentially the one(s) returned as the output of the function
incoherent_cross_power_spectrum_average()
kbins [NoneType, list or numpy array] Bins in k. If set to None
(default), it will be determined automatically based on the
inputs in num_kbins, and kbintype. If num_kbins is None and
kbintype='linear', the negative and positive values of k are
folded into a one-sided power spectrum. In this case, the
bins will approximately have the same resolution as the k-values
in the input power spectrum for all the spectral windows.
num_kbins [NoneType or integer] Number of k-bins. Used only if kbins is
set to None. If kbintype is set to 'linear', the negative and
positive values of k are folded into a one-sided power spectrum.
In this case, the bins will approximately have the same
resolution as the k-values in the input power spectrum for all
the spectral windows.
kbintype [string] Specifies the type of binning, used only if kbins is
set to None. Accepted values are 'linear' and 'log' for linear
and logarithmic bins respectively.
Outputs:
Dictionary containing the power spectrum information. At the top level, it
contains keys specifying the sampling to be 'oversampled' or 'resampled'.
Under each of these keys is another dictionary containing the following
keys:
'z' [numpy array] Redshifts corresponding to the band centers in
'freq_center'. It has shape=(nspw,)
'lags' [numpy array] Delays (in seconds). It has shape=(nlags,).
'freq_center'
[numpy array] contains the center frequencies (in Hz) of the
frequency subbands of the subband delay spectra. It is of size
n_win. It is roughly equivalent to redshift(s)
'freq_wts'
[numpy array] Contains frequency weights applied on each
frequency sub-band during the subband delay transform. It is
of size n_win x nchan.
'bw_eff'
[numpy array] contains the effective bandwidths (in Hz) of the
subbands being delay transformed. It is of size n_win. It is
roughly equivalent to width in redshift or along line-of-sight
'shape' [string] shape of the frequency window function applied. Usual
values are 'rect' (rectangular), 'bhw' (Blackman-Harris),
'bnw' (Blackman-Nuttall).
'fftpow'
[scalar] the power to which the FFT of the window was raised.
The value is be a positive scalar with default = 1.0
'lag_corr_length'
[numpy array] It is the correlation timescale (in pixels) of
the subband delay spectra. It is proportional to inverse of
effective bandwidth. It is of size n_win. The unit size of a
pixel is determined by the difference between adjacent pixels
in lags under key 'lags' which in turn is effectively inverse
of the effective bandwidth of the subband specified in bw_eff
It further contains 3 keys named 'whole', 'submodel', and 'residual'
or one key named 'errinfo' each of which is a dictionary. 'whole'
contains power spectrum info about the input closure phases. 'submodel'
contains power spectrum info about the model that will have been
subtracted (as closure phase) from the 'whole' model. 'residual'
contains power spectrum info about the closure phases obtained as a
difference between 'whole' and 'submodel'. 'errinfo' contains power
spectrum information about the subsample differences. There is also
another dictionary under key 'kbininfo' that contains information about
k-bins. These dictionaries contain the following keys and values:
'whole'/'submodel'/'residual'/'errinfo'
[dictionary] It contains the following keys and values:
'mean' [dictionary] Delay power spectrum information under the
'mean' statistic incoherently obtained by averaging the
input power spectrum in bins of k. It contains output power
spectrum expressed as two quantities each of which is a
dictionary with the following key-value pairs:
'PS' [list of numpy arrays] Standard power spectrum in
units of 'K2 Mpc3'. Each numpy array in the list
maps to a specific combination of axes and axis
diagonals chosen for incoherent averaging in
earlier processing such as in the function
incoherent_cross_power_spectrum_average(). The
numpy array has a shape similar to the input power
spectrum, but that last axis (k-axis) will have a
different size that depends on the k-bins that
were used in the incoherent averaging along that
axis.
'Del2' [list of numpy arrays] power spectrum in Delta^2
units of 'K2'. Each numpy array in the list
maps to a specific combination of axes and axis
diagonals chosen for incoherent averaging in
earlier processing such as in the function
incoherent_cross_power_spectrum_average(). The
numpy array has a shape similar to the input power
spectrum, but that last axis (k-axis) will have a
different size that depends on the k-bins that
were used in the incoherent averaging along that
axis.
'median'
[dictionary] Delay power spectrum information under the
'median' statistic incoherently obtained by averaging the
input power spectrum in bins of k. It contains output power
spectrum expressed as two quantities each of which is a
dictionary with the following key-value pairs:
'PS' [list of numpy arrays] Standard power spectrum in
units of 'K2 Mpc3'. Each numpy array in the list
maps to a specific combination of axes and axis
diagonals chosen for incoherent averaging in
earlier processing such as in the function
incoherent_cross_power_spectrum_average(). The
numpy array has a shape similar to the input power
spectrum, but that last axis (k-axis) will have a
different size that depends on the k-bins that
were used in the incoherent averaging along that
axis.
'Del2' [list of numpy arrays] power spectrum in Delta^2
units of 'K2'. Each numpy array in the list
maps to a specific combination of axes and axis
diagonals chosen for incoherent averaging in
earlier processing such as in the function
incoherent_cross_power_spectrum_average(). The
numpy array has a shape similar to the input power
spectrum, but that last axis (k-axis) will have a
different size that depends on the k-bins that
were used in the incoherent averaging along that
axis.
'kbininfo'
[dictionary] Contains the k-bin information. It contains the
following key-value pairs:
'counts'
[list] List of numpy arrays where each numpy array in the stores
the counts in the determined k-bins. Each numpy array in the
list corresponds to a spectral window (redshift subband). The
shape of each numpy array is (nkbins,)
'kbin_edges'
[list] List of numpy arrays where each numpy array contains the
k-bin edges. Each array in the list corresponds to a spectral
window (redshift subband). The shape of each array is
(nkbins+1,).
'kbinnum'
[list] List of numpy arrays containing the bin number under
which the k value falls. Each array in the list corresponds to
a spectral window (redshift subband). The shape of each array
is (nlags,).
'ri'
[list] List of numpy arrays containing the reverse indices for
each k-bin. Each array in the list corresponds to a spectral
window (redshift subband). The shape of each array is
(nlags+nkbins+1,).
'whole'/'submodel'/'residual' or 'errinfo' [dictionary] k-bin info
estimated for the different datapools under different stats
and PS definitions. It has the keys 'mean' and 'median' for the
mean and median statistic respectively. Each of them contain a
dictionary with the following key-value pairs:
'PS' [list] List of numpy arrays where each numpy array
contains a standard power spectrum typically in units of
'K2 Mpc3'. Its shape is the same as input power spectrum
except the k-axis which now has nkbins number of
elements.
'Del2' [list] List of numpy arrays where each numpy array
contains a Delta^2 power spectrum typically in units of
'K2'. Its shape is the same as input power spectrum
except the k-axis which now has nkbins number of
elements.
----------------------------------------------------------------------------
"""
if not isinstance(xcpdps, dict):
raise TypeError('Input xcpdps must be a dictionary')
if kbins is not None:
if not isinstance(kbins, (list,NP.ndarray)):
raise TypeError('Input kbins must be a list or numpy array')
else:
if not isinstance(kbintype, str):
raise TypeError('Input kbintype must be a string')
if kbintype.lower() not in ['linear', 'log']:
raise ValueError('Input kbintype must be set to "linear" or "log"')
if kbintype.lower() == 'log':
if num_kbins is None:
num_kbins = 10
psinfo = {}
keys = ['triads', 'triads_ind', 'lst', 'lst_ind', 'dlst', 'days', 'day_ind', 'dday']
for key in keys:
psinfo[key] = xcpdps[key]
sampling = ['oversampled', 'resampled']
sampling_keys = ['z', 'freq_center', 'bw_eff', 'shape', 'freq_wts', 'lag_corr_length']
dpool_keys = ['whole', 'submodel', 'residual', 'errinfo']
for smplng in sampling:
if smplng in xcpdps:
psinfo[smplng] = {}
for key in sampling_keys:
psinfo[smplng][key] = xcpdps[smplng][key]
kprll = xcpdps[smplng]['kprll']
lags = xcpdps[smplng]['lags']
eps = 1e-10
if kbins is None:
dkprll = NP.max(NP.mean(NP.diff(kprll, axis=-1), axis=-1))
if kbintype.lower() == 'linear':
bins_kprll = NP.linspace(eps, NP.abs(kprll).max()+eps, num=kprll.shape[1]/2+1, endpoint=True)
else:
bins_kprll = NP.geomspace(eps, NP.abs(kprll).max()+eps, num=num_kbins+1, endpoint=True)
bins_kprll = NP.insert(bins_kprll, 0, -eps)
else:
bins_kprll = NP.asarray(kbins)
num_kbins = bins_kprll.size - 1
psinfo[smplng]['kbininfo'] = {'counts': [], 'kbin_edges': [], 'kbinnum': [], 'ri': []}
for spw in range(kprll.shape[0]):
counts, kbin_edges, kbinnum, ri = OPS.binned_statistic(NP.abs(kprll[spw,:]), statistic='count', bins=bins_kprll)
counts = counts.astype(NP.int)
psinfo[smplng]['kbininfo']['counts'] += [NP.copy(counts)]
psinfo[smplng]['kbininfo']['kbin_edges'] += [kbin_edges / U.Mpc]
psinfo[smplng]['kbininfo']['kbinnum'] += [NP.copy(kbinnum)]
psinfo[smplng]['kbininfo']['ri'] += [NP.copy(ri)]
for dpool in dpool_keys:
if dpool in xcpdps[smplng]:
psinfo[smplng][dpool] = {}
psinfo[smplng]['kbininfo'][dpool] = {}
keys = ['diagoffsets', 'diagweights', 'axesmap']
for key in keys:
psinfo[smplng][dpool][key] = xcpdps[smplng][dpool][key]
for stat in ['mean', 'median']:
if stat in xcpdps[smplng][dpool]:
psinfo[smplng][dpool][stat] = {'PS': [], 'Del2': []}
psinfo[smplng]['kbininfo'][dpool][stat] = []
for combi in range(len(xcpdps[smplng][dpool][stat])):
outshape = NP.asarray(xcpdps[smplng][dpool][stat][combi].shape)
outshape[-1] = num_kbins
tmp_dps = NP.full(tuple(outshape), NP.nan, dtype=NP.complex) * U.Unit(xcpdps[smplng][dpool][stat][combi].unit)
tmp_Del2 = NP.full(tuple(outshape), NP.nan, dtype=NP.complex) * U.Unit(xcpdps[smplng][dpool][stat][combi].unit / U.Mpc**3)
tmp_kprll = NP.full(tuple(outshape), NP.nan, dtype=NP.float) / U.Mpc
for spw in range(kprll.shape[0]):
counts = NP.copy(psinfo[smplng]['kbininfo']['counts'][spw])
ri = NP.copy(psinfo[smplng]['kbininfo']['ri'][spw])
print('Processing datapool={0}, stat={1}, LST-Day-Triad combination={2:0d}, spw={3:0d}...'.format(dpool, stat, combi, spw))
progress = PGB.ProgressBar(widgets=[PGB.Percentage(), PGB.Bar(marker='-', left=' |', right='| '), PGB.Counter(), '/{0:0d} k-bins '.format(num_kbins), PGB.ETA()], maxval=num_kbins).start()
for binnum in range(num_kbins):
if counts[binnum] > 0:
ind_kbin = ri[ri[binnum]:ri[binnum+1]]
tmp_dps[spw,...,binnum] = NP.nanmean(NP.take(xcpdps[smplng][dpool][stat][combi][spw], ind_kbin, axis=-1), axis=-1)
k_shape = NP.ones(NP.take(xcpdps[smplng][dpool][stat][combi][spw], ind_kbin, axis=-1).ndim, dtype=NP.int)
k_shape[-1] = -1
tmp_Del2[spw,...,binnum] = NP.nanmean(NP.abs(kprll[spw,ind_kbin].reshape(tuple(k_shape))/U.Mpc)**3 * NP.take(xcpdps[smplng][dpool][stat][combi][spw], ind_kbin, axis=-1), axis=-1) / (2*NP.pi**2)
tmp_kprll[spw,...,binnum] = NP.nansum(NP.abs(kprll[spw,ind_kbin].reshape(tuple(k_shape))/U.Mpc) * NP.abs(NP.take(xcpdps[smplng][dpool][stat][combi][spw], ind_kbin, axis=-1)), axis=-1) / NP.nansum(NP.abs(NP.take(xcpdps[smplng][dpool][stat][combi][spw], ind_kbin, axis=-1)), axis=-1)
progress.update(binnum+1)
progress.finish()
psinfo[smplng][dpool][stat]['PS'] += [copy.deepcopy(tmp_dps)]
psinfo[smplng][dpool][stat]['Del2'] += [copy.deepcopy(tmp_Del2)]
psinfo[smplng]['kbininfo'][dpool][stat] += [copy.deepcopy(tmp_kprll)]
return psinfo
################################################################################
class ClosurePhase(object):
"""
----------------------------------------------------------------------------
Class to hold and operate on Closure Phase information.
It has the following attributes and member functions.
Attributes:
extfile [string] Full path to external file containing information
of ClosurePhase instance. The file is in HDF5 format
cpinfo [dictionary] Contains the following top level keys,
namely, 'raw', 'processed', and 'errinfo'
Under key 'raw' which holds a dictionary, the subkeys
include 'cphase' (nlst,ndays,ntriads,nchan),
'triads' (ntriads,3), 'lst' (nlst,ndays), and 'flags'
(nlst,ndays,ntriads,nchan).
Under the 'processed' key are more subkeys, namely,
'native', 'prelim', and optionally 'submodel' and 'residual'
each holding a dictionary.
Under 'native' dictionary, the subsubkeys for further
dictionaries are 'cphase' (masked array:
(nlst,ndays,ntriads,nchan)), 'eicp' (complex masked
array: (nlst,ndays,ntriads,nchan)), and 'wts' (masked
array: (nlst,ndays,ntriads,nchan)).
Under 'prelim' dictionary, the subsubkeys for further
dictionaries are 'tbins' (numpy array of tbin centers
after smoothing), 'dtbins' (numpy array of tbin
intervals), 'wts' (masked array:
(ntbins,ndays,ntriads,nchan)), 'eicp' and 'cphase'.
The dictionaries under 'eicp' are indexed by keys
'mean' (complex masked array:
(ntbins,ndays,ntriads,nchan)), and 'median' (complex
masked array: (ntbins,ndays,ntriads,nchan)).
The dictionaries under 'cphase' are indexed by keys
'mean' (masked array: (ntbins,ndays,ntriads,nchan)),
'median' (masked array: (ntbins,ndays,ntriads,nchan)),
'rms' (masked array: (ntbins,ndays,ntriads,nchan)), and
'mad' (masked array: (ntbins,ndays,ntriads,nchan)). The
last one denotes Median Absolute Deviation.
Under 'submodel' dictionary, the subsubkeys for further
dictionaries are 'cphase' (masked array:
(nlst,ndays,ntriads,nchan)), and 'eicp' (complex masked
array: (nlst,ndays,ntriads,nchan)).
Under 'residual' dictionary, the subsubkeys for further
dictionaries are 'cphase' and 'eicp'. These are
dictionaries too. The dictionaries under 'eicp' are
indexed by keys 'mean' (complex masked array:
(ntbins,ndays,ntriads,nchan)), and 'median' (complex
masked array: (ntbins,ndays,ntriads,nchan)).
The dictionaries under 'cphase' are indexed by keys
'mean' (masked array: (ntbins,ndays,ntriads,nchan)),
and 'median' (masked array:
(ntbins,ndays,ntriads,nchan)).
Under key 'errinfo', it contains the following keys and
values:
'list_of_pair_of_pairs'
List of pair of pairs for which differences of
complex exponentials have been computed, where the
elements are bins of days. The number of elements
in the list is ncomb. And each element is a smaller
(4-element) list of pair of pairs
'eicp_diff'
Difference of complex exponentials between pairs
of day bins. This will be used in evaluating noise
properties in power spectrum. It is a dictionary
with two keys '0' and '1' where each contains the
difference from a pair of subsamples. Each of these
keys contains a numpy array of shape
(nlstbins,ncomb,2,ntriads,nchan)
'wts' Weights in difference of complex exponentials
obtained by sum of squares of weights that are
associated with the pair that was used in the
differencing. It is a dictionary with two keys '0'
and '1' where each contains the weights associated
It is of shape (nlstbins,ncomb,2,ntriads,nchan)
Member functions:
__init__() Initialize an instance of class ClosurePhase
expicp() Compute and return complex exponential of the closure phase
as a masked array
smooth_in_tbins()
Smooth the complex exponentials of closure phases in LST
bins. Both mean and median smoothing is produced.
subtract() Subtract complex exponential of the bispectrum phase
from the current instance and updates the cpinfo attribute
subsample_differencing()
Create subsamples and differences between subsamples to
evaluate noise properties from the data set.
save() Save contents of attribute cpinfo in external HDF5 file
----------------------------------------------------------------------------
"""
def __init__(self, infile, freqs, infmt='npz'):
"""
------------------------------------------------------------------------
Initialize an instance of class ClosurePhase
Inputs:
infile [string] Input file including full path. It could be a NPZ
with raw data, or a HDF5 file that could contain raw or
processed data. The input file format is specified in the
input infmt. If it is a NPZ file, it must contain the
following keys/files:
'closures' [numpy array] Closure phase (radians). It is of
shape (nlst,ndays,ntriads,nchan)
'triads' [numpy array] Array of triad tuples, of shape
(ntriads,3)
'flags' [numpy array] Array of flags (boolean), of shape
(nlst,ndays,ntriads,nchan)
'last' [numpy array] Array of LST for each day (CASA
units which is MJD+6713). Shape is (nlst,ndays)
'days' [numpy array] Array of days, shape is (ndays,)
'averaged_closures'
[numpy array] optional array of closure phases
averaged across days. Shape is
(nlst,ntriads,nchan)
'std_dev_lst'
[numpy array] optional array of standard
deviation of closure phases across days. Shape
is (nlst,ntriads,nchan)
'std_dev_triads'
[numpy array] optional array of standard
deviation of closure phases across triads.
Shape is (nlst,ndays,nchan)
freqs [numpy array] Frequencies (in Hz) in the input. Size is
nchan.
infmt [string] Input file format. Accepted values are 'npz'
(default) and 'hdf5'.
------------------------------------------------------------------------
"""
if not isinstance(infile, str):
raise TypeError('Input infile must be a string')
if not isinstance(freqs, NP.ndarray):
raise TypeError('Input freqs must be a numpy array')
freqs = freqs.ravel()
if not isinstance(infmt, str):
raise TypeError('Input infmt must be a string')
if infmt.lower() not in ['npz', 'hdf5']:
raise ValueError('Input infmt must be "npz" or "hdf5"')
if infmt.lower() == 'npz':
infilesplit = infile.split('.npz')
infile_noext = infilesplit[0]
self.cpinfo = loadnpz(infile)
# npz2hdf5(infile, infile_noext+'.hdf5')
self.extfile = infile_noext + '.hdf5'
else:
# if not isinstance(infile, h5py.File):
# raise TypeError('Input infile is not a valid HDF5 file')
self.extfile = infile
self.cpinfo = NMO.load_dict_from_hdf5(self.extfile)
if freqs.size != self.cpinfo['raw']['cphase'].shape[-1]:
raise ValueError('Input frequencies do not match with dimensions of the closure phase data')
self.f = freqs
self.df = freqs[1] - freqs[0]
force_expicp = False
if 'processed' not in self.cpinfo:
force_expicp = True
else:
if 'native' not in self.cpinfo['processed']:
force_expicp = True
self.expicp(force_action=force_expicp)
if 'prelim' not in self.cpinfo['processed']:
self.cpinfo['processed']['prelim'] = {}
self.cpinfo['errinfo'] = {}
############################################################################
def expicp(self, force_action=False):
"""
------------------------------------------------------------------------
Compute the complex exponential of the closure phase as a masked array
Inputs:
force_action [boolean] If set to False (default), the complex
exponential is computed only if it has not been done so
already. Otherwise the computation is forced.
------------------------------------------------------------------------
"""
if 'processed' not in self.cpinfo:
self.cpinfo['processed'] = {}
force_action = True
if 'native' not in self.cpinfo['processed']:
self.cpinfo['processed']['native'] = {}
force_action = True
if 'cphase' not in self.cpinfo['processed']['native']:
self.cpinfo['processed']['native']['cphase'] = MA.array(self.cpinfo['raw']['cphase'].astype(NP.float64), mask=self.cpinfo['raw']['flags'])
force_action = True
if not force_action:
if 'eicp' not in self.cpinfo['processed']['native']:
self.cpinfo['processed']['native']['eicp'] = NP.exp(1j * self.cpinfo['processed']['native']['cphase'])
self.cpinfo['processed']['native']['wts'] = MA.array(NP.logical_not(self.cpinfo['raw']['flags']).astype(NP.float), mask=self.cpinfo['raw']['flags'])
else:
self.cpinfo['processed']['native']['eicp'] = NP.exp(1j * self.cpinfo['processed']['native']['cphase'])
self.cpinfo['processed']['native']['wts'] = MA.array( | NP.logical_not(self.cpinfo['raw']['flags']) | numpy.logical_not |
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 4 09:30:20 2019
E-mail: <EMAIL>
@author: xieydd
@description:
"""
import scipy.io as sio
import numpy as np
import time
import os
import sys
sys.path.append(os.path.abspath("../"))
from FullSpectrum import fv_hibert
def mat2pb(abspath,output):
"""
*Args: abspath - mat abspath xxx/xxx/xxx/1.mat
output - npy path xxx/xxx/xxx/
*output: null
"""
name = abspath.split('/')[-1].split('.')[0]
data = sio.loadmat(abspath)[name]
print(data.shape)
np.save(output+name,data)
def get_time(start_time):
end_time = time.time()
return timedelta(seconds=int(round(end_time-start_time)))
# Read the bear data
def readfile(path):
files = os.listdir(path)
E = []
nums = 0
for filename in files:
E.append(sio.loadmat(path+"/"+filename)[filename.split('.',-1)[0]])
nums += 1
print(filename.split('.',-1)[0])
return E[0],E[1],E[2],E[3],E[4],E[5]
def loadmat(abs_filename):
return sio.loadmat(abs_filename)[abs_filename.split('/')[-1].split('.')[0]]
def test(argv):
e3,e4,e5,e6,e7,e8 = util.readfile(argv[0])
del e3,e4,e7,e8
gc.collect()
shape = e5.shape
FvHibert = fv_hibert.FvHibert(shape[1],shape[1])
e5_1 = e5[0,:]
e6_1 = e6[0,:]
del e5,e6
gc.collect()
x_amplitude_envelope_inner,y_amplitude_envelope_inner = FvHibert.hibert(e5_1,e6_1)
#x_amplitude_envelope_roll,y_amplitude_envelope_roll = FvHibert.hibert(e7,e8)
#Plot = plot.Plot(shape[1],shape[1])
#Plot.plot_amplitude_envelope(e5_1,e6_1)
RL_inner = FvHibert.fv_hibert(x_amplitude_envelope_inner,y_amplitude_envelope_inner)
Plot = plot.Plot(RL_inner.shape[0],RL_inner.shape[0]*2)
Plot.plot_fv_hibert(RL_inner)
print(RL_inner.shape)
def mat2npy(argv):
"""
Get the full vector specturm and compress the
memory usage by convert the float64 to float32
"""
x = util.loadmat('J:/FullSpectrum_CRNN_Tensorflow/IMS_data/E5.mat')
y = util.loadmat('J:/FullSpectrum_CRNN_Tensorflow/IMS_data/E6.mat')
shape = x.shape
FvHibert = fv_hibert.FvHibert(shape[1],shape[1])
output = np.zeros((shape[0],int(shape[1]/2)),dtype=np.float32)
for i in range(shape[0]):
x_amplitude_envelope, y_amplitude_envelope = FvHibert.hibert(x[i,:],y[i,:])
RL = FvHibert.fv_hibert(x_amplitude_envelope, y_amplitude_envelope)
RL = RL.astype(np.float32)
output[i,:] = RL
outputs = output.reshape(shape[0]*10,-1)
print(outputs.shape)
| np.save('J:/FullSpectrum_CRNN_Tensorflow/IMS_data/inner',outputs) | numpy.save |
#!/usr/bin/env python
###############################################################################
# Copyright Kitware Inc. and Contributors
# Distributed under the Apache License, 2.0 (apache.org/licenses/LICENSE-2.0)
# See accompanying Copyright.txt and LICENSE files for details
###############################################################################
import re
import numpy as np
from shapely.geometry import Polygon, LineString
from shapely.ops import polygonize, unary_union
def list_intersect(a, b):
""" return the intersection of two lists """
return list(set(a) & set(b))
def list_union(a, b):
""" return the union of two lists """
return list(set(a) | set(b))
def ply_parser(fp):
'''
:param fp: PLY file path
:return: Surface coordinates and surface index
'''
tf = open(fp)
lines = tf.readlines()
flag = 0
for l in lines:
if re.search("\s*element\s*vertex\s*\d*", l) is not None:
vertex_num = int(re.findall("\d+\.?\d*", l)[0])
if re.search("\s*element\s*face\s*\d*", l) is not None:
face_num = int(re.findall("\d+\.?\d*", l)[0])
if re.search("end_header", l) is not None:
begin_num = flag + 1
flag += 1
x = [float(re.findall("-*\d+\.?\d*", l)[0]) for l in lines[begin_num:begin_num + vertex_num]]
y = [float(re.findall("-*\d+\.?\d*", l)[1]) for l in lines[begin_num:begin_num + vertex_num]]
z = [float(re.findall("-*\d+\.?\d*", l)[2]) for l in lines[begin_num:begin_num + vertex_num]]
cor = [[x[i], y[i], z[i]] for i in range(0, len(x))]
cor = np.asarray(cor)
f = [re.findall("\d+\.?\d*", l)
for l in lines[begin_num + vertex_num:begin_num + vertex_num + face_num]]
return cor, f
def check_relation(plane1, plane2):
'''
Checking spatial relationship between planes.
:param plane1:
:param plane2:
:return: spatial relationship tag
'''
p1 = Polygon(plane1)
p2 = Polygon(plane2)
try:
if p1.intersects(p2):
if p1.contains(p2):
flag = 1
else:
if p1.area >= p2.area:
flag = 2
else:
flag = 3
else:
flag = 4
return flag
except: # noqa: E722
return 4
def get_height_from_dem(cor, dem_parameter):
'''
Get Z coordinate from DEM based on given XY coordinate.
r1-r4 represent the image boundaries for coordinates outside.
:param cor: XY coordinate
:param dem: DEM object
:return: Z coordinate
'''
xOrigin = dem_parameter[0]
yOrigin = dem_parameter[1]
pixelWidth = dem_parameter[2]
pixelHeight = dem_parameter[3]
data = dem_parameter[4]
r = dem_parameter[5]
base_height = []
for i in range(cor.shape[0]):
x = cor[i, 0]
y = cor[i, 1]
xOffset = int((x - xOrigin) / pixelWidth)
yOffset = int((y - yOrigin) / pixelHeight)
try:
value = data[yOffset][xOffset]
base_height.append(value)
except: # noqa: E722
dist_2 = np.sum((r - np.array([yOffset, xOffset])) ** 2, axis=1)
index = np.argmin(dist_2)
value = data[r[index, 0]][r[index, 1]]
base_height.append(value)
return np.array(base_height)
def get_height_from_lower_surface(plane1, plane2):
'''
:param plane1: Higher surface
:param plane2: Lower surface
:return: Z coordinate on lower surface
'''
[a, b, c, d] = fit_plane(plane1)
def z(x):
return -(a * x[0] + b * x[1] + d) / c
return z([plane2[:, 0], plane2[:, 1]])
def get_difference_plane(plane1, plane2):
'''
Get difference and intersection part for two planes
:param plane1:
:param plane2:
:return:
'''
try:
p1 = Polygon(plane1)
p2 = Polygon(plane2)
pd = p2.difference(p1)
pi = p2.intersection(p1)
flag = True
p3 = np.array(pd.exterior.coords[:])
p4 = np.array(pi.exterior.coords[:])
return [flag, p3, p4]
except: # noqa: E722
flag = False
p3 = None
p4 = None
return [flag, p3, p4]
def fit_plane(point):
'''
Using normal vector and distance to origin to represent a plane.
:param point: Plane coordinates
:return: Plane parameters
'''
xyz_mean = np.array([point[:, 0].mean(), point[:, 1].mean(), point[:, 2].mean()])
xyz_m = np.array(
[point[:, 0] - xyz_mean[0], point[:, 1] - xyz_mean[1], point[:, 2] - xyz_mean[2]])
[U, S, V] = | np.linalg.svd(xyz_m) | numpy.linalg.svd |
import numpy as np
import os ,pickle ,math ,time
from Methods.Iterations.iters import IterOneAsync
from Methods.Tools.Funcs import *
from Methods.Tools.initial import *
from Methods.Tools.Update import *
from Methods.Tools.Core import *
start_time = time.time()
''' This code is in Alpha phase. Don't trust this at all..... '''
def Dynamics(IniVector,inter_mat,steps,values,fixed_state,turn_state,networkx,plot_nodes,PlotNodes):
''' Updates the initial vector for given time steps and gives the steady state vectors '''
state_traj = []
state_vect = []
''' For numba functionality '''
inter_mat = inter_mat.astype('float64') #For better calculations
values = np.array(values) #For numba
prevVector = IniVector #Initial vector
state_vect = [prevVector] #Appending initial vector to state vectors
index = 0
for i in range(steps): #Time dynamics for given steps
if turn_state:
prevVector,turn_state,fixed_state = UpdateTurnState(prevVector, i,turn_state, fixed_state)
nextVector = IterOneAsync(inter_mat, prevVector,values)
if fixed_state:
nextVector = UpdateFixedState(nextVector, fixed_state)
if networkx:
state_vect.append([nextVector])
prevV = vect2num(prevVector)
nextV = vect2num(nextVector)
traj = (prevV,nextV)
state_traj.append(traj)
if i < steps-1:
prevVector = nextVector
#adding this trajecctory to the State traj vector
if plot_nodes:
state_vect1 = | np.transpose(state_vect) | numpy.transpose |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.