prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
"""TNQMetro: Tensor-network based package for efficient quantum metrology computations."""
# Table of Contents
#
# 1 Functions for finite size systems......................................29
# 1.1 High level functions...............................................37
# 1.2 Low level functions...............................................257
# 1.2.1 Problems with exact derivative.............................1207
# 1.2.2 Problems with discrete approximation of the derivative.....2411
# 2 Functions for infinite size systems..................................3808
# 2.1 High level functions.............................................3816
# 2.2 Low level functions..............................................4075
# 3 Auxiliary functions..................................................5048
import itertools
import math
import warnings
import numpy as np
from ncon import ncon
########################################
# #
# #
# 1 Functions for finite size systems. #
# #
# #
########################################
#############################
# #
# 1.1 High level functions. #
# #
#############################
def fin(N, so_before_list, h, so_after_list, BC='O', L_ini=None, psi0_ini=None, imprecision=10**-2, D_L_max=100, D_L_max_forced=False, L_herm=True, D_psi0_max=100, D_psi0_max_forced=False):
"""
Optimization of the QFI over operator L (in MPO representation) and wave function psi0 (in MPS representation) and check of convergence in their bond dimensions. Function for finite size systems.
User has to provide information about the dynamics by specifying the quantum channel. It is assumed that the quantum channel is translationally invariant and is built from layers of quantum operations.
User has to provide one defining operation for each layer as a local superoperator. These local superoperators have to be input in order of their action on the system.
Parameter encoding is a stand out quantum operation. It is assumed that the parameter encoding acts only once and is unitary so the user has to provide only its generator h.
Generator h has to be diagonal in computational basis, or in other words, it is assumed that local superoperators are expressed in the eigenbasis of h.
Parameters:
N: integer
Number of sites in the chain of tensors (usually number of particles).
so_before_list: list of ndarrays of a shape (d**(2*k),d**(2*k)) where k describes on how many sites a particular local superoperator acts
List of local superoperators (in order) which act before unitary parameter encoding.
h: ndarray of a shape (d,d)
Generator of unitary parameter encoding. Dimension d is the dimension of local Hilbert space (dimension of physical index).
Generator h has to be diagonal in the computational basis, or in other words, it is assumed that local superoperators are expressed in the eigenbasis of h.
so_after_list: list of ndarrays of a shape (d**(2*k),d**(2*k)) where k describes on how many sites particular local superoperator acts
List of local superoperators (in order) which act after unitary parameter encoding.
BC: 'O' or 'P', optional
Boundary conditions, 'O' for OBC, 'P' for PBC.
L_ini: list of length N of ndarrays of a shape (Dl_L,Dr_L,d,d) for OBC (Dl_L, Dr_L can vary between sites) or ndarray of a shape (D_L,D_L,d,d,N) for PBC, optional
Initial MPO for L.
psi0_ini: list of length N of ndarrays of a shape (Dl_psi0,Dr_psi0,d) for OBC (Dl_psi0, Dr_psi0 can vary between sites) or ndarray of a shape (D_psi0,D_psi0,d,N) for PBC, optional
Initial MPS for psi0.
imprecision: float, optional
Expected relative imprecision of the end results.
D_L_max: integer, optional
Maximal value of D_L (D_L is bond dimension for MPO representing L).
D_L_max_forced: bool, optional
True if D_L_max has to be reached, otherwise False.
L_herm: bool, optional
True if Hermitian gauge has to be imposed on MPO representing L, otherwise False.
D_psi0_max: integer, optional
Maximal value of D_psi0 (D_psi0 is bond dimension for MPS representing psi0).
D_psi0_max_forced: bool, optional
True if D_psi0_max has to be reached, otherwise False.
Returns:
result: float
Optimal value of figure of merit.
result_m: ndarray
Matrix describing the figure of merit as a function of bond dimensions of respectively L [rows] and psi0 [columns].
L: list of length N of ndarrays of a shape (Dl_L,Dr_L,d,d) for OBC (Dl_L, Dr_L can vary between sites) or ndarray of a shape (D_L,D_L,d,d,N) for PBC
Optimal L in MPO representation.
psi0: list of length N of ndarrays of a shape (Dl_psi0,Dr_psi0,d) for OBC (Dl_psi0, Dr_psi0 can vary between sites) or ndarray of a shape (D_psi0,D_psi0,d,N) for PBC
Optimal psi0 in MPS representation.
"""
if np.linalg.norm(h - np.diag(np.diag(h))) > 10**-10:
warnings.warn('Generator h have to be diagonal in computational basis, or in other words it is assumed that local superoperators are expressed in the eigenbasis of h.')
d = np.shape(h)[0]
ch = fin_create_channel(N, d, BC, so_before_list + so_after_list)
ch2 = fin_create_channel_derivative(N, d, BC, so_before_list, h, so_after_list)
result, result_m, L, psi0 = fin_gen(N, d, BC, ch, ch2, None, L_ini, psi0_ini, imprecision, D_L_max, D_L_max_forced, L_herm, D_psi0_max, D_psi0_max_forced)
return result, result_m, L, psi0
def fin_gen(N, d, BC, ch, ch2, epsilon=None, L_ini=None, psi0_ini=None, imprecision=10**-2, D_L_max=100, D_L_max_forced=False, L_herm=True, D_psi0_max=100, D_psi0_max_forced=False):
"""
Optimization of the figure of merit (usually interpreted as the QFI) over operator L (in MPO representation) and wave function psi0 (in MPS representation) and check of convergence when increasing their bond dimensions. Function for finite size systems.
User has to provide information about the dynamics by specifying a quantum channel ch and its derivative ch2 (or two channels separated by small parameter epsilon) as superoperators in MPO representation.
There are no constraints on the structure of the channel but the complexity of calculations highly depends on the channel's bond dimension.
Parameters:
N: integer
Number of sites in the chain of tensors (usually number of particles).
d: integer
Dimension of local Hilbert space (dimension of physical index).
BC: 'O' or 'P'
Boundary conditions, 'O' for OBC, 'P' for PBC.
ch: list of length N of ndarrays of a shape (Dl_ch,Dr_ch,d**2,d**2) for OBC (Dl_ch, Dr_ch can vary between sites) or ndarray of a shape (D_ch,D_ch,d**2,d**2,N) for PBC
Quantum channel as a superoperator in MPO representation.
ch2: list of length N of ndarrays of a shape (Dl_ch2,Dr_ch2,d**2,d**2) for OBC (Dl_ch2, Dr_ch2 can vary between sites) or ndarray of a shape (D_ch2,D_ch2,d**2,d**2,N) for PBC
Interpretiaon depends on whether epsilon is specifed (2) or not (1, default approach):
1) derivative of the quantum channel as a superoperator in the MPO representation,
2) the quantum channel as superoperator in the MPO representation for the value of estimated parameter shifted by epsilon in relation to ch.
epsilon: float, optional
If specified then interpeted as value of a separation between estimated parameters encoded in ch and ch2.
L_ini: list of length N of ndarrays of a shape (Dl_L,Dr_L,d,d) for OBC (Dl_L, Dr_L can vary between sites) or ndarray of a shape (D_L,D_L,d,d,N) for PBC, optional
Initial MPO for L.
psi0_ini: list of length N of ndarrays of a shape (Dl_psi0,Dr_psi0,d) for OBC (Dl_psi0, Dr_psi0 can vary between sites) or ndarray of a shape (D_psi0,D_psi0,d,N) for PBC, optional
Initial MPS for psi0.
imprecision: float, optional
Expected relative imprecision of the end results.
D_L_max: integer, optional
Maximal value of D_L (D_L is bond dimension for MPO representing L).
D_L_max_forced: bool, optional
True if D_L_max has to be reached, otherwise False.
L_herm: bool, optional
True if the Hermitian gauge has to be imposed on MPO representing L, otherwise False.
D_psi0_max: integer, optional
Maximal value of D_psi0 (D_psi0 is bond dimension for MPS representing psi0).
D_psi0_max_forced: bool, optional
True if D_psi0_max has to be reached, otherwise False.
Returns:
result: float
Optimal value of the figure of merit.
result_m: ndarray
Matrix describing the figure of merit as a function of bond dimensions of respectively L [rows] and psi0 [columns].
L: list of length N of ndarrays of a shape (Dl_L,Dr_L,d,d) for OBC (Dl_L, Dr_L can vary between sites) or ndarray of a shape (D_L,D_L,d,d,N) for PBC
Optimal L in MPO representation.
psi0: list of length N of ndarrays of a shape (Dl_psi0,Dr_psi0,d) for OBC (Dl_psi0, Dr_psi0 can vary between sites) or ndarray of a shape (D_psi0,D_psi0,d,N) for PBC
Optimal psi0 in MPS representation.
"""
if epsilon is None:
result, result_m, L, psi0 = fin_FoM_FoMD_optbd(N, d, BC, ch, ch2, L_ini, psi0_ini, imprecision, D_L_max, D_L_max_forced, L_herm, D_psi0_max, D_psi0_max_forced)
else:
result, result_m, L, psi0 = fin2_FoM_FoMD_optbd(N, d, BC, ch, ch2, epsilon, L_ini, psi0_ini, imprecision, D_L_max, D_L_max_forced, L_herm, D_psi0_max, D_psi0_max_forced)
return result, result_m, L, psi0
def fin_state(N, so_before_list, h, so_after_list, rho0, BC='O', L_ini=None, imprecision=10**-2, D_L_max=100, D_L_max_forced=False, L_herm=True):
"""
Optimization of the QFI over operator L (in MPO representation) and check of convergence when increasing its bond dimension. Function for finite size systems and fixed state of the system.
User has to provide information about the dynamics by specifying a quantum channel. It is assumed that the quantum channel is translationally invariant and is built from layers of quantum operations.
User has to provide one defining operation for each layer as a local superoperator. Those local superoperator have to be input in order of their action on the system.
Parameter encoding is a stand out quantum operation. It is assumed that parameter encoding acts only once and is unitary so the user has to provide only its generator h.
Generator h has to be diagonal in the computational basis, or in other words it is assumed that local superoperators are expressed in the eigenbasis of h.
Parameters:
N: integer
Number of sites in the chain of tensors (usually number of particles).
so_before_list: list of ndarrays of a shape (d**(2*k),d**(2*k)) where k describes on how many sites particular local superoperator acts
List of local superoperators (in order) which act before unitary parameter encoding.
h: ndarray of a shape (d,d)
Generator of unitary parameter encoding. Dimension d is the dimension of local Hilbert space (dimension of physical index).
Generator h have to be diagonal in computational basis, or in other words it is assumed that local superoperators are expressed in the eigenbasis of h.
so_after_list: list of ndarrays of a shape (d**(2*k),d**(2*k)) where k describes on how many sites particular local superoperator acts
List of local superoperators (in order) which act after unitary parameter encoding.
rho0: list of length N of ndarrays of a shape (Dl_rho0,Dr_rho0,d,d) for OBC (Dl_rho0, Dr_rho0 can vary between sites) or ndarray of a shape (D_rho0,D_rho0,d,d,N) for PBC
Density matrix describing initial state of the system in MPO representation.
BC: 'O' or 'P', optional
Boundary conditions, 'O' for OBC, 'P' for PBC.
L_ini: list of length N of ndarrays of shape (Dl_L,Dr_L,d,d) for OBC, (Dl_L, Dr_L can vary between sites) or ndarray of shape (D_L,D_L,d,d,N) for PBC, optional
Initial MPO for L.
imprecision: float, optional
Expected relative imprecision of the end results.
D_L_max: integer, optional
Maximal value of D_L (D_L is bond dimension for MPO representing L).
D_L_max_forced: bool, optional
True if D_L_max has to be reached, otherwise False.
L_herm: bool, optional
True if Hermitian gauge has to be imposed on MPO representing L, otherwise False.
Returns:
result: float
Optimal value of figure of merit.
result_v: ndarray
Vector describing figure of merit in function of bond dimensions of L.
L: list of length N of ndarrays of a shape (Dl_L,Dr_L,d,d) for OBC (Dl_L, Dr_L can vary between sites) or ndarray of a shape (D_L,D_L,d,d,N) for PBC
Optimal L in the MPO representation.
"""
if np.linalg.norm(h - np.diag(np.diag(h))) > 10**-10:
warnings.warn('Generator h have to be diagonal in computational basis, or in other words it is assumed that local superoperators are expressed in the eigenbasis of h.')
d = np.shape(h)[0]
ch = fin_create_channel(N, d, BC, so_before_list + so_after_list)
ch2 = fin_create_channel_derivative(N, d, BC, so_before_list, h, so_after_list)
rho = channel_acting_on_operator(ch, rho0)
rho2 = channel_acting_on_operator(ch2, rho0)
result, result_v, L = fin_state_gen(N, d, BC, rho, rho2, None, L_ini, imprecision, D_L_max, D_L_max_forced, L_herm)
return result, result_v, L
def fin_state_gen(N, d, BC, rho, rho2, epsilon=None, L_ini=None, imprecision=10**-2, D_L_max=100, D_L_max_forced=False, L_herm=True):
"""
Optimization of the the figure of merit (usually interpreted as the QFI) over operator L (in MPO representation) and check of convergence when increasing its bond dimension. Function for finite size systems and fixed state of the system.
User has to provide information about the dynamics by specifying a quantum channel ch and its derivative ch2 (or two channels separated by small parameter epsilon) as superoperators in the MPO representation.
There are no constraints on the structure of the channel but the complexity of calculations highly depends on channel's bond dimension.
Parameters:
N: integer
Number of sites in the chain of tensors (usually number of particles).
d: integer
Dimension of local Hilbert space (dimension of physical index).
BC: 'O' or 'P'
Boundary conditions, 'O' for OBC, 'P' for PBC.
rho: list of length N of ndarrays of a shape (Dl_rho,Dr_rho,d,d) for OBC (Dl_rho, Dr_rho can vary between sites) or ndarray of a shape (D_rho,D_rho,d,d,N) for PBC
Density matrix at the output of the quantum channel in the MPO representation.
rho2: list of length N of ndarrays of a shape (Dl_rho2,Dr_rho2,d,d) for OBC (Dl_rho2, Dr_rho2 can vary between sites) or ndarray of a shape (D_rho2,D_rho2,d,d,N) for PBC
Interpretaion depends on whether epsilon is specifed (2) or not (1, default approach):
1) derivative of density matrix at the output of quantum channel in MPO representation,
2) density matrix at the output of quantum channel in MPO representation for the value of estimated parameter shifted by epsilon in relation to rho.
epsilon: float, optional
If specified then it is interpeted as the value of separation between estimated parameters encoded in rho and rho2.
L_ini: list of length N of ndarrays of a shape (Dl_L,Dr_L,d,d) for OBC (Dl_L, Dr_L can vary between sites) or ndarray of a shape (D_L,D_L,d,d,N) for PBC, optional
Initial MPO for L.
imprecision: float, optional
Expected relative imprecision of the end results.
D_L_max: integer, optional
Maximal value of D_L (D_L is bond dimension for MPO representing L).
D_L_max_forced: bool, optional
True if D_L_max has to be reached, otherwise False.
L_herm: bool, optional
True if Hermitian gauge has to be imposed on MPO representing L, otherwise False.
Returns:
result: float
Optimal value of figure of merit.
result_v: ndarray
Vector describing figure of merit as a function of bond dimensions of L.
L: list of length N of ndarrays of a shape (Dl_L,Dr_L,d,d) for OBC (Dl_L, Dr_L can vary between sites) or ndarray of a shape (D_L,D_L,d,d,N) for PBC
Optimal L in MPO representation.
"""
if epsilon is None:
result, result_v, L = fin_FoM_optbd(N, d, BC, rho, rho2, L_ini, imprecision, D_L_max, D_L_max_forced, L_herm)
else:
result, result_v, L = fin2_FoM_optbd(N, d, BC, rho, rho2, epsilon, L_ini, imprecision, D_L_max, D_L_max_forced, L_herm)
return result, result_v, L
############################
# #
# 1.2 Low level functions. #
# #
############################
def fin_create_channel(N, d, BC, so_list, tol=10**-10):
"""
Creates MPO for a superoperator describing translationally invariant quantum channel from list of local superoperators. Function for finite size systems.
For OBC, tensor-network length N has to be at least 2k-1, where k is the correlation length (number of sites on which acts the biggest local superoperator).
Local superoperators acting on more then 4 neighbouring sites are not currently supported.
Parameters:
N: integer
Number of sites in the chain of tensors (usually number of particles).
For OBC tensor-network length N has to be at least 2k-1 where k is the correlation length (number of sites on which acts the biggest local superoperator).
d: integer
Dimension of local Hilbert space (dimension of physical index).
BC: 'O' or 'P'
Boundary conditions, 'O' for OBC, 'P' for PBC.
so_list: list of ndarrays of a shape (d**(2*k),d**(2*k)) where k describes on how many sites a particular local superoperator acts
List of local superoperators in order of their action on the system.
Local superoperators acting on more then 4 neighbour sites are not currently supported.
tol: float, optional
Factor which after multiplication by the highest singular value gives a cutoff on singular values that are treated as nonzero.
Returns:
ch: list of length N of ndarrays of shape (Dl_ch,Dr_ch,d**2,d**2) for OBC (Dl_ch, Dr_ch can vary between sites) or ndarray of shape (D_ch,D_ch,d**2,d**2,N) for PBC
Quantum channel as a superoperator in the MPO representation.
"""
if so_list == []:
if BC == 'O':
ch = np.eye(d**2,dtype=complex)
ch = ch[np.newaxis,np.newaxis,:,:]
ch = [ch]*N
elif BC == 'P':
ch = np.eye(d**2,dtype=complex)
ch = ch[np.newaxis,np.newaxis,:,:,np.newaxis]
ch = np.tile(ch,(1,1,1,1,N))
return ch
if BC == 'O':
ch = [0]*N
kmax = max([int(math.log(np.shape(so_list[i])[0],d**2)) for i in range(len(so_list))])
if N < 2*kmax-1:
warnings.warn('For OBC tensor-network length N have to be at least 2k-1 where k is correlation length (number of sites on which acts the biggest local superoperator).')
for x in range(N):
if x >= kmax and N-x >= kmax:
ch[x] = ch[x-1]
continue
for i in range(len(so_list)):
so = so_list[i]
k = int(math.log(np.shape(so)[0],d**2))
if np.linalg.norm(so-np.diag(np.diag(so))) < 10**-10:
so = np.diag(so)
if k == 1:
bdchil = 1
bdchir = 1
chi = np.zeros((bdchil,bdchir,d**2,d**2),dtype=complex)
for nx in range(d**2):
chi[:,:,nx,nx] = so[nx]
elif k == 2:
so = np.reshape(so,(d**2,d**2),order='F')
u,s,vh = np.linalg.svd(so)
s = s[s > s[0]*tol]
bdchi = np.shape(s)[0]
u = u[:,:bdchi]
vh = vh[:bdchi,:]
us = u @ np.diag(np.sqrt(s))
sv = np.diag(np.sqrt(s)) @ vh
if x == 0:
bdchil = 1
bdchir = bdchi
chi = np.zeros((bdchil,bdchir,d**2,d**2),dtype=complex)
for nx in range(d**2):
tensors = [us[nx,:]]
legs = [[-1]]
chi[:,:,nx,nx] = np.reshape(ncon(tensors,legs),(bdchil,bdchir),order='F')
elif x > 0 and x < N-1:
bdchil = bdchi
bdchir = bdchi
chi = np.zeros((bdchil,bdchir,d**2,d**2),dtype=complex)
for nx in range(d**2):
tensors = [sv[:,nx],us[nx,:]]
legs = [[-1],[-2]]
chi[:,:,nx,nx] = np.reshape(ncon(tensors,legs),(bdchil,bdchir),order='F')
elif x == N-1:
bdchil = bdchi
bdchir = 1
chi = np.zeros((bdchil,bdchir,d**2,d**2),dtype=complex)
for nx in range(d**2):
tensors = [sv[:,nx]]
legs = [[-1]]
chi[:,:,nx,nx] = np.reshape(ncon(tensors,legs),(bdchil,bdchir),order='F')
elif k == 3:
so = np.reshape(so,(d**2,d**4),order='F')
u1,s1,vh1 = np.linalg.svd(so,full_matrices=False)
s1 = s1[s1 > s1[0]*tol]
bdchi1 = np.shape(s1)[0]
u1 = u1[:,:bdchi1]
vh1 = vh1[:bdchi1,:]
us1 = u1 @ np.diag(np.sqrt(s1))
sv1 = np.diag(np.sqrt(s1)) @ vh1
sv1 = np.reshape(sv1,(bdchi1*d**2,d**2),order='F')
u2,s2,vh2 = np.linalg.svd(sv1,full_matrices=False)
s2 = s2[s2 > s2[0]*tol]
bdchi2 = np.shape(s2)[0]
u2 = u2[:,:bdchi2]
vh2 = vh2[:bdchi2,:]
us2 = u2 @ np.diag(np.sqrt(s2))
us2 = np.reshape(us2,(bdchi1,d**2,bdchi2),order='F')
sv2 = np.diag(np.sqrt(s2)) @ vh2
if x == 0:
bdchil = 1
bdchir = bdchi1
chi = np.zeros((bdchil,bdchir,d**2,d**2),dtype=complex)
for nx in range(d**2):
tensors = [us1[nx,:]]
legs = [[-1]]
chi[:,:,nx,nx] = np.reshape(ncon(tensors,legs),(bdchil,bdchir),order='F')
elif x == 1:
bdchil = bdchi1
bdchir = bdchi2*bdchi1
chi = np.zeros((bdchil,bdchir,d**2,d**2),dtype=complex)
for nx in range(d**2):
tensors = [us2[:,nx,:],us1[nx,:]]
legs = [[-1,-2],[-3]]
chi[:,:,nx,nx] = np.reshape(ncon(tensors,legs),(bdchil,bdchir),order='F')
elif x > 1 and x < N-2:
bdchil = bdchi2*bdchi1
bdchir = bdchi2*bdchi1
chi = np.zeros((bdchil,bdchir,d**2,d**2),dtype=complex)
for nx in range(d**2):
tensors = [sv2[:,nx],us2[:,nx,:],us1[nx,:]]
legs = [[-1],[-2,-3],[-4]]
chi[:,:,nx,nx] = np.reshape(ncon(tensors,legs),(bdchil,bdchir),order='F')
elif x == N-2:
bdchil = bdchi2*bdchi1
bdchir = bdchi2
chi = np.zeros((bdchil,bdchir,d**2,d**2),dtype=complex)
for nx in range(d**2):
tensors = [sv2[:,nx],us2[:,nx,:]]
legs = [[-1],[-2,-3]]
chi[:,:,nx,nx] = np.reshape(ncon(tensors,legs),(bdchil,bdchir),order='F')
elif x == N-1:
bdchil = bdchi2
bdchir = 1
chi = np.zeros((bdchil,bdchir,d**2,d**2),dtype=complex)
for nx in range(d**2):
tensors = [sv2[:,nx]]
legs = [[-1]]
chi[:,:,nx,nx] = np.reshape(ncon(tensors,legs),(bdchil,bdchir),order='F')
elif k == 4:
so = np.reshape(so,(d**2,d**6),order='F')
u1,s1,vh1 = np.linalg.svd(so,full_matrices=False)
s1 = s1[s1 > s1[0]*tol]
bdchi1 = np.shape(s1)[0]
u1 = u1[:,:bdchi1]
vh1 = vh1[:bdchi1,:]
us1 = u1 @ np.diag(np.sqrt(s1))
sv1 = np.diag(np.sqrt(s1)) @ vh1
sv1 = np.reshape(sv1,(bdchi1*d**2,d**4),order='F')
u2,s2,vh2 = np.linalg.svd(sv1,full_matrices=False)
s2 = s2[s2 > s2[0]*tol]
bdchi2 = np.shape(s2)[0]
u2 = u2[:,:bdchi2]
vh2 = vh2[:bdchi2,:]
us2 = u2 @ np.diag(np.sqrt(s2))
us2 = np.reshape(us2,(bdchi1,d**2,bdchi2),order='F')
sv2 = np.diag(np.sqrt(s2)) @ vh2
sv2 = np.reshape(sv2,(bdchi2*d**2,d**2),order='F')
u3,s3,vh3 = np.linalg.svd(sv2,full_matrices=False)
s3 = s3[s3 > s3[0]*tol]
bdchi3 = np.shape(s3)[0]
u3 = u3[:,:bdchi3]
vh3 = vh3[:bdchi3,:]
us3 = u3 @ np.diag(np.sqrt(s3))
us3 = np.reshape(us3,(bdchi2,d**2,bdchi3),order='F')
sv3 = np.diag(np.sqrt(s3)) @ vh3
if x == 0:
bdchil = 1
bdchir = bdchi1
chi = np.zeros((bdchil,bdchir,d**2,d**2),dtype=complex)
for nx in range(d**2):
tensors = [us1[nx,:]]
legs = [[-1]]
chi[:,:,nx,nx] = np.reshape(ncon(tensors,legs),(bdchil,bdchir),order='F')
elif x == 1:
bdchil = bdchi1
bdchir = bdchi2*bdchi1
chi = np.zeros((bdchil,bdchir,d**2,d**2),dtype=complex)
for nx in range(d**2):
tensors = [us2[:,nx,:],us1[nx,:]]
legs = [[-1,-2],[-3]]
chi[:,:,nx,nx] = np.reshape(ncon(tensors,legs),(bdchil,bdchir),order='F')
elif x == 2:
bdchil = bdchi2*bdchi1
bdchir = bdchi3*bdchi2*bdchi1
chi = np.zeros((bdchil,bdchir,d**2,d**2),dtype=complex)
for nx in range(d**2):
tensors = [us3[:,nx,:],us2[:,nx,:],us1[nx,:]]
legs = [[-1,-3],[-2,-4],[-5]]
chi[:,:,nx,nx] = np.reshape(ncon(tensors,legs),(bdchil,bdchir),order='F')
elif x > 2 and x < N-3:
bdchil = bdchi3*bdchi2*bdchi1
bdchir = bdchi3*bdchi2*bdchi1
chi = np.zeros((bdchil,bdchir,d**2,d**2),dtype=complex)
for nx in range(d**2):
tensors = [sv3[:,nx],us3[:,nx,:],us2[:,nx,:],us1[nx,:]]
legs = [[-1],[-2,-4],[-3,-5],[-6]]
chi[:,:,nx,nx] = np.reshape(ncon(tensors,legs),(bdchil,bdchir),order='F')
elif x == N-3:
bdchil = bdchi3*bdchi2*bdchi1
bdchir = bdchi3*bdchi2
chi = np.zeros((bdchil,bdchir,d**2,d**2),dtype=complex)
for nx in range(d**2):
tensors = [sv3[:,nx],us3[:,nx,:],us2[:,nx,:]]
legs = [[-1],[-2,-4],[-3,-5]]
chi[:,:,nx,nx] = np.reshape(ncon(tensors,legs),(bdchil,bdchir),order='F')
elif x == N-2:
bdchil = bdchi3*bdchi2
bdchir = bdchi3
chi = np.zeros((bdchil,bdchir,d**2,d**2),dtype=complex)
for nx in range(d**2):
tensors = [sv3[:,nx],us3[:,nx,:]]
legs = [[-1],[-2,-3]]
chi[:,:,nx,nx] = np.reshape(ncon(tensors,legs),(bdchil,bdchir),order='F')
elif x == N-1:
bdchil = bdchi3
bdchir = 1
chi = np.zeros((bdchil,bdchir,d**2,d**2),dtype=complex)
for nx in range(d**2):
tensors = [sv3[:,nx]]
legs = [[-1]]
chi[:,:,nx,nx] = np.reshape(ncon(tensors,legs),(bdchil,bdchir),order='F')
else:
warnings.warn('Local superoperators acting on more then 4 neighbour sites are not currently supported.')
else:
if k == 1:
bdchil = 1
bdchir = 1
chi = so[np.newaxis,np.newaxis,:,:]
elif k == 2:
u,s,vh = np.linalg.svd(so)
s = s[s > s[0]*tol]
bdchi = np.shape(s)[0]
u = u[:,:bdchi]
vh = vh[:bdchi,:]
us = u @ np.diag(np.sqrt(s))
sv = np.diag(np.sqrt(s)) @ vh
us = np.reshape(us,(d**2,d**2,bdchi),order='F')
sv = np.reshape(sv,(bdchi,d**2,d**2),order='F')
tensors = [sv,us]
legs = [[-1,-3,1],[1,-4,-2]]
chi = ncon(tensors,legs)
if x == 0:
tensors = [us]
legs = [[-2,-3,-1]]
chi = ncon(tensors,legs)
bdchil = 1
bdchir = bdchi
elif x > 0 and x < N-1:
tensors = [sv,us]
legs = [[-1,-3,1],[1,-4,-2]]
chi = ncon(tensors,legs)
bdchil = bdchi
bdchir = bdchi
elif x == N-1:
tensors = [sv]
legs = [[-1,-2,-3]]
chi = ncon(tensors,legs)
bdchil = bdchi
bdchir = 1
chi = np.reshape(chi,(bdchil,bdchir,d**2,d**2),order='F')
elif k == 3:
so = np.reshape(so,(d**4,d**8),order='F')
u1,s1,vh1 = np.linalg.svd(so,full_matrices=False)
s1 = s1[s1 > s1[0]*tol]
bdchi1 = np.shape(s1)[0]
u1 = u1[:,:bdchi1]
vh1 = vh1[:bdchi1,:]
us1 = u1 @ np.diag(np.sqrt(s1))
sv1 = np.diag(np.sqrt(s1)) @ vh1
us1 = np.reshape(us1,(d**2,d**2,bdchi1),order='F')
sv1 = np.reshape(sv1,(bdchi1*d**4,d**4),order='F')
u2,s2,vh2 = np.linalg.svd(sv1,full_matrices=False)
s2 = s2[s2 > s2[0]*tol]
bdchi2 = np.shape(s2)[0]
u2 = u2[:,:bdchi2]
vh2 = vh2[:bdchi2,:]
us2 = u2 @ np.diag(np.sqrt(s2))
us2 = np.reshape(us2,(bdchi1,d**2,d**2,bdchi2),order='F')
sv2 = np.diag(np.sqrt(s2)) @ vh2
sv2 = np.reshape(sv2,(bdchi2,d**2,d**2),order='F')
if x == 0:
tensors = [us1]
legs = [[-2,-3,-1]]
chi = ncon(tensors,legs)
bdchil = 1
bdchir = bdchi1
elif x == 1:
tensors = [us2,us1]
legs = [[-1,-5,1,-2],[1,-6,-3]]
chi = ncon(tensors,legs)
bdchil = bdchi1
bdchir = bdchi2*bdchi1
elif x > 1 and x < N-2:
tensors = [sv2,us2,us1]
legs = [[-1,-5,1],[-2,1,2,-3],[2,-6,-4]]
chi = ncon(tensors,legs)
bdchil = bdchi2*bdchi1
bdchir = bdchi2*bdchi1
elif x == N-2:
tensors = [sv2,us2]
legs = [[-1,-4,1],[-2,1,-5,-3]]
chi = ncon(tensors,legs)
bdchil = bdchi2*bdchi1
bdchir = bdchi2
elif x == N-1:
tensors = [sv2]
legs = [[-1,-2,-3]]
chi = ncon(tensors,legs)
bdchil = bdchi2
bdchir = 1
chi = np.reshape(chi,(bdchil,bdchir,d**2,d**2),order='F')
elif k == 4:
so = np.reshape(so,(d**4,d**12),order='F')
u1,s1,vh1 = np.linalg.svd(so,full_matrices=False)
s1 = s1[s1 > s1[0]*tol]
bdchi1 = np.shape(s1)[0]
u1 = u1[:,:bdchi1]
vh1 = vh1[:bdchi1,:]
us1 = u1 @ np.diag(np.sqrt(s1))
sv1 = np.diag(np.sqrt(s1)) @ vh1
us1 = np.reshape(us1,(d**2,d**2,bdchi1),order='F')
sv1 = np.reshape(sv1,(bdchi1*d**4,d**8),order='F')
u2,s2,vh2 = np.linalg.svd(sv1,full_matrices=False)
s2 = s2[s2 > s2[0]*tol]
bdchi2 = np.shape(s2)[0]
u2 = u2[:,:bdchi2]
vh2 = vh2[:bdchi2,:]
us2 = u2 @ np.diag(np.sqrt(s2))
us2 = np.reshape(us2,(bdchi1,d**2,d**2,bdchi2),order='F')
sv2 = np.diag(np.sqrt(s2)) @ vh2
sv2 = np.reshape(sv2,(bdchi2*d**4,d**4),order='F')
u3,s3,vh3 = np.linalg.svd(sv2,full_matrices=False)
s3 = s3[s3 > s3[0]*tol]
bdchi3 = np.shape(s3)[0]
u3 = u3[:,:bdchi3]
vh3 = vh3[:bdchi3,:]
us3 = u3 @ np.diag(np.sqrt(s3))
us3 = np.reshape(us3,(bdchi2,d**2,d**2,bdchi3),order='F')
sv3 = np.diag(np.sqrt(s3)) @ vh3
sv3 = np.reshape(sv3,(bdchi3,d**2,d**2),order='F')
if x == 0:
tensors = [us1]
legs = [[-2,-3,-1]]
chi = ncon(tensors,legs)
bdchil = 1
bdchir = bdchi1
elif x == 1:
tensors = [us2,us1]
legs = [[-1,-4,1,-2],[1,-5,-3]]
chi = ncon(tensors,legs)
bdchil = bdchi1
bdchir = bdchi2*bdchi1
elif x == 2:
tensors = [us3,us2,us1]
legs = [[-1,-6,1,-3],[-2,1,2,-4],[2,-7,-5]]
chi = ncon(tensors,legs)
bdchil = bdchi2*bdchi1
bdchir = bdchi3*bdchi2*bdchi1
elif x > 2 and x < N-3:
tensors = [sv3,us3,us2,us1]
legs = [[-1,-7,1],[-2,1,2,-4],[-3,2,3,-5],[3,-8,-6]]
chi = ncon(tensors,legs)
bdchil = bdchi3*bdchi2*bdchi1
bdchir = bdchi3*bdchi2*bdchi1
elif x == N-3:
tensors = [sv3,us3,us2]
legs = [[-1,-6,1],[-2,1,2,-4],[-3,2,-7,-5]]
chi = ncon(tensors,legs)
bdchil = bdchi3*bdchi2*bdchi1
bdchir = bdchi3*bdchi2
elif x == N-2:
tensors = [sv3,us3]
legs = [[-1,-4,1],[-2,1,-5,-3]]
chi = ncon(tensors,legs)
bdchil = bdchi3*bdchi2
bdchir = bdchi3
elif x == N-1:
tensors = [sv3]
legs = [[-1,-2,-3]]
chi = ncon(tensors,legs)
bdchil = bdchi3
bdchir = 1
chi = np.reshape(chi,(bdchi,bdchi,d**2,d**2),order='F')
else:
warnings.warn('Local superoperators acting on more then 4 neighbour sites are not currently supported.')
if i == 0:
bdchl = bdchil
bdchr = bdchir
ch[x] = chi
else:
bdchl = bdchil*bdchl
bdchr = bdchir*bdchr
tensors = [chi,ch[x]]
legs = [[-1,-3,-5,1],[-2,-4,1,-6]]
ch[x] = ncon(tensors,legs)
ch[x] = np.reshape(ch[x],(bdchl,bdchr,d**2,d**2),order='F')
elif BC == 'P':
for i in range(len(so_list)):
so = so_list[i]
k = int(math.log(np.shape(so)[0],d**2))
if np.linalg.norm(so-np.diag(np.diag(so))) < 10**-10:
so = np.diag(so)
if k == 1:
bdchi = 1
chi = np.zeros((bdchi,bdchi,d**2,d**2),dtype=complex)
for nx in range(d**2):
chi[:,:,nx,nx] = so[nx]
elif k == 2:
so = np.reshape(so,(d**2,d**2),order='F')
u,s,vh = np.linalg.svd(so)
s = s[s > s[0]*tol]
bdchi = np.shape(s)[0]
u = u[:,:bdchi]
vh = vh[:bdchi,:]
us = u @ np.diag(np.sqrt(s))
sv = np.diag(np.sqrt(s)) @ vh
chi = np.zeros((bdchi,bdchi,d**2,d**2),dtype=complex)
for nx in range(d**2):
chi[:,:,nx,nx] = np.outer(sv[:,nx],us[nx,:])
elif k == 3:
so = np.reshape(so,(d**2,d**4),order='F')
u1,s1,vh1 = np.linalg.svd(so,full_matrices=False)
s1 = s1[s1 > s1[0]*tol]
bdchi1 = np.shape(s1)[0]
u1 = u1[:,:bdchi1]
vh1 = vh1[:bdchi1,:]
us1 = u1 @ np.diag(np.sqrt(s1))
sv1 = np.diag(np.sqrt(s1)) @ vh1
sv1 = np.reshape(sv1,(bdchi1*d**2,d**2),order='F')
u2,s2,vh2 = np.linalg.svd(sv1,full_matrices=False)
s2 = s2[s2 > s2[0]*tol]
bdchi2 = np.shape(s2)[0]
u2 = u2[:,:bdchi2]
vh2 = vh2[:bdchi2,:]
us2 = u2 @ np.diag(np.sqrt(s2))
us2 = np.reshape(us2,(bdchi1,d**2,bdchi2),order='F')
sv2 = np.diag(np.sqrt(s2)) @ vh2
bdchi = bdchi2*bdchi1
chi = np.zeros((bdchi,bdchi,d**2,d**2),dtype=complex)
for nx in range(d**2):
tensors = [sv2[:,nx],us2[:,nx,:],us1[nx,:]]
legs = [[-1],[-2,-3],[-4]]
chi[:,:,nx,nx] = np.reshape(ncon(tensors,legs),(bdchi,bdchi),order='F')
elif k == 4:
so = np.reshape(so,(d**2,d**6),order='F')
u1,s1,vh1 = np.linalg.svd(so,full_matrices=False)
s1 = s1[s1 > s1[0]*tol]
bdchi1 = np.shape(s1)[0]
u1 = u1[:,:bdchi1]
vh1 = vh1[:bdchi1,:]
us1 = u1 @ np.diag(np.sqrt(s1))
sv1 = np.diag(np.sqrt(s1)) @ vh1
sv1 = np.reshape(sv1,(bdchi1*d**2,d**4),order='F')
u2,s2,vh2 = np.linalg.svd(sv1,full_matrices=False)
s2 = s2[s2 > s2[0]*tol]
bdchi2 = np.shape(s2)[0]
u2 = u2[:,:bdchi2]
vh2 = vh2[:bdchi2,:]
us2 = u2 @ np.diag(np.sqrt(s2))
us2 = np.reshape(us2,(bdchi1,d**2,bdchi2),order='F')
sv2 = np.diag(np.sqrt(s2)) @ vh2
sv2 = np.reshape(sv2,(bdchi2*d**2,d**2),order='F')
u3,s3,vh3 = np.linalg.svd(sv2,full_matrices=False)
s3 = s3[s3 > s3[0]*tol]
bdchi3 = np.shape(s3)[0]
u3 = u3[:,:bdchi3]
vh3 = vh3[:bdchi3,:]
us3 = u3 @ np.diag(np.sqrt(s3))
us3 = np.reshape(us3,(bdchi2,d**2,bdchi3),order='F')
sv3 = np.diag(np.sqrt(s3)) @ vh3
bdchi = bdchi3*bdchi2*bdchi1
chi = np.zeros((bdchi,bdchi,d**2,d**2),dtype=complex)
for nx in range(d**2):
tensors = [sv3[:,nx],us3[:,nx,:],us2[:,nx,:],us1[nx,:]]
legs = [[-1],[-2,-4],[-3,-5],[-6]]
chi[:,:,nx,nx] = np.reshape(ncon(tensors,legs),(bdchi,bdchi),order='F')
else:
warnings.warn('Local superoperators acting on more then 4 neighbour sites are not currently supported.')
else:
if k == 1:
bdchi = 1
chi = so[np.newaxis,np.newaxis,:,:]
elif k == 2:
u,s,vh = np.linalg.svd(so)
s = s[s > s[0]*tol]
bdchi = np.shape(s)[0]
u = u[:,:bdchi]
vh = vh[:bdchi,:]
us = u @ np.diag(np.sqrt(s))
sv = np.diag(np.sqrt(s)) @ vh
us = np.reshape(us,(d**2,d**2,bdchi),order='F')
sv = np.reshape(sv,(bdchi,d**2,d**2),order='F')
tensors = [sv,us]
legs = [[-1,-3,1],[1,-4,-2]]
chi = ncon(tensors,legs)
elif k == 3:
so = np.reshape(so,(d**4,d**8),order='F')
u1,s1,vh1 = np.linalg.svd(so,full_matrices=False)
s1 = s1[s1 > s1[0]*tol]
bdchi1 = np.shape(s1)[0]
u1 = u1[:,:bdchi1]
vh1 = vh1[:bdchi1,:]
us1 = u1 @ np.diag(np.sqrt(s1))
sv1 = np.diag(np.sqrt(s1)) @ vh1
us1 = np.reshape(us1,(d**2,d**2,bdchi1),order='F')
sv1 = np.reshape(sv1,(bdchi1*d**4,d**4),order='F')
u2,s2,vh2 = np.linalg.svd(sv1,full_matrices=False)
s2 = s2[s2 > s2[0]*tol]
bdchi2 = np.shape(s2)[0]
u2 = u2[:,:bdchi2]
vh2 = vh2[:bdchi2,:]
us2 = u2 @ np.diag(np.sqrt(s2))
us2 = np.reshape(us2,(bdchi1,d**2,d**2,bdchi2),order='F')
sv2 = np.diag(np.sqrt(s2)) @ vh2
sv2 = np.reshape(sv2,(bdchi2,d**2,d**2),order='F')
tensors = [sv2,us2,us1]
legs = [[-1,-5,1],[-2,1,2,-3],[2,-6,-4]]
chi = ncon(tensors,legs)
bdchi = bdchi2*bdchi1
chi = np.reshape(chi,(bdchi,bdchi,d**2,d**2),order='F')
elif k == 4:
so = np.reshape(so,(d**4,d**12),order='F')
u1,s1,vh1 = np.linalg.svd(so,full_matrices=False)
s1 = s1[s1 > s1[0]*tol]
bdchi1 = np.shape(s1)[0]
u1 = u1[:,:bdchi1]
vh1 = vh1[:bdchi1,:]
us1 = u1 @ np.diag(np.sqrt(s1))
sv1 = np.diag(np.sqrt(s1)) @ vh1
us1 = np.reshape(us1,(d**2,d**2,bdchi1),order='F')
sv1 = np.reshape(sv1,(bdchi1*d**4,d**8),order='F')
u2,s2,vh2 = np.linalg.svd(sv1,full_matrices=False)
s2 = s2[s2 > s2[0]*tol]
bdchi2 = np.shape(s2)[0]
u2 = u2[:,:bdchi2]
vh2 = vh2[:bdchi2,:]
us2 = u2 @ np.diag(np.sqrt(s2))
us2 = np.reshape(us2,(bdchi1,d**2,d**2,bdchi2),order='F')
sv2 = np.diag(np.sqrt(s2)) @ vh2
sv2 = np.reshape(sv2,(bdchi2*d**4,d**4),order='F')
u3,s3,vh3 = np.linalg.svd(sv2,full_matrices=False)
s3 = s3[s3 > s3[0]*tol]
bdchi3 = np.shape(s3)[0]
u3 = u3[:,:bdchi3]
vh3 = vh3[:bdchi3,:]
us3 = u3 @ np.diag(np.sqrt(s3))
us3 = np.reshape(us3,(bdchi2,d**2,d**2,bdchi3),order='F')
sv3 = np.diag(np.sqrt(s3)) @ vh3
sv3 = np.reshape(sv3,(bdchi3,d**2,d**2),order='F')
tensors = [sv3,us3,us2,us1]
legs = [[-1,-7,1],[-2,1,2,-4],[-3,2,3,-5],[3,-8,-6]]
chi = ncon(tensors,legs)
bdchi = bdchi3*bdchi2*bdchi1
chi = np.reshape(chi,(bdchi,bdchi,d**2,d**2),order='F')
else:
warnings.warn('Local superoperators acting on more then 4 neighbour sites are not currently supported.')
if i == 0:
bdch = bdchi
ch = chi
else:
bdch = bdchi*bdch
tensors = [chi,ch]
legs = [[-1,-3,-5,1],[-2,-4,1,-6]]
ch = ncon(tensors,legs)
ch = np.reshape(ch,(bdch,bdch,d**2,d**2),order='F')
ch = ch[:,:,:,:,np.newaxis]
ch = np.tile(ch,(1,1,1,1,N))
return ch
def fin_create_channel_derivative(N, d, BC, so_before_list, h, so_after_list):
"""
Creates a MPO for the derivative (over estimated parameter) of the superoperator describing the quantum channel. Function for finite size systems.
Function for translationally invariant channels with unitary parameter encoding generated by h.
Generator h has to be diagonal in the computational basis, or in other words it is assumed that local superoperators are expressed in the eigenbasis of h.
Parameters:
N: integer
Number of sites in the chain of tensors (usually number of particles).
d: integer
Dimension of local Hilbert space (dimension of physical index).
BC: 'O' or 'P'
Boundary conditions, 'O' for OBC, 'P' for PBC.
so_before_list: list of ndarrays of a shape (d**(2*k),d**(2*k)) where k describes on how many sites particular local superoperator acts
List of local superoperators (in order) which act before unitary parameter encoding.
h: ndarray of a shape (d,d)
Generator of unitary parameter encoding.
Generator h have to be diagonal in computational basis, or in other words it is assumed that local superoperators are expressed in the eigenbasis of h.
so_after_list: list of ndarrays of a shape (d**(2*k),d**(2*k)) where k describes on how many sites particular local superoperator acts
List of local superoperators (in order) which act after unitary parameter encoding.
Returns:
chd: list of length N of ndarrays of a shape (Dl_chd,Dr_chd,d**2,d**2) for OBC (Dl_chd, Dr_chd can vary between sites) or ndarray of a shape (D_chd,D_chd,d**2,d**2,N) for PBC
Derivative of superoperator describing quantum channel in MPO representation.
"""
if np.linalg.norm(h-np.diag(np.diag(h))) > 10**-10:
warnings.warn('Generator h have to be diagonal in computational basis, or in other words it is assumed that local superoperators are expressed in the eigenbasis of h.')
if len(so_before_list) == 0:
if BC == 'O':
ch1 = np.eye(d**2,dtype=complex)
ch1 = ch1[np.newaxis,np.newaxis,:,:]
ch1 = [ch1]*N
elif BC == 'P':
ch1 = np.eye(d**2,dtype=complex)
ch1 = ch1[np.newaxis,np.newaxis,:,:,np.newaxis]
ch1 = np.tile(ch1,(1,1,1,1,N))
ch1d = fin_commutator(N,d,BC,ch1,h,1j)
ch2 = fin_create_channel(N,d,BC,so_after_list)
if BC == 'O':
chd = [0]*N
for x in range(N):
bdch1dl = np.shape(ch1d[x])[0]
bdch1dr = np.shape(ch1d[x])[1]
bdch2l = np.shape(ch2[x])[0]
bdch2r = np.shape(ch2[x])[1]
tensors = [ch2[x],ch1d[x]]
legs = [[-1,-3,-5,1],[-2,-4,1,-6]]
chd[x] = np.reshape(ncon(tensors,legs),(bdch1dl*bdch2l,bdch1dr*bdch2r,d**2,d**2),order='F')
elif BC == 'P':
bdch1d = np.shape(ch1d)[0]
bdch2 = np.shape(ch2)[0]
chd = np.zeros((bdch1d*bdch2,bdch1d*bdch2,d**2,d**2,N),dtype=complex)
for x in range(N):
tensors = [ch2[:,:,:,:,x],ch1d[:,:,:,:,x]]
legs = [[-1,-3,-5,1],[-2,-4,1,-6]]
chd[:,:,:,:,x] = np.reshape(ncon(tensors,legs),(bdch1d*bdch2,bdch1d*bdch2,d**2,d**2),order='F')
elif len(so_after_list) == 0:
ch1 = fin_create_channel(N,d,BC,so_before_list)
chd = fin_commutator(N,d,BC,ch1,h,1j)
else:
ch1 = fin_create_channel(N,d,BC,so_before_list)
ch1d = fin_commutator(N,d,BC,ch1,h,1j)
ch2 = fin_create_channel(N,d,BC,so_after_list)
if BC == 'O':
chd = [0]*N
for x in range(N):
bdch1dl = np.shape(ch1d[x])[0]
bdch1dr = np.shape(ch1d[x])[1]
bdch2l = np.shape(ch2[x])[0]
bdch2r = np.shape(ch2[x])[1]
tensors = [ch2[x],ch1d[x]]
legs = [[-1,-3,-5,1],[-2,-4,1,-6]]
chd[x] = np.reshape(ncon(tensors,legs),(bdch1dl*bdch2l,bdch1dr*bdch2r,d**2,d**2),order='F')
elif BC == 'P':
bdch1d = np.shape(ch1d)[0]
bdch2 = np.shape(ch2)[0]
chd = np.zeros((bdch1d*bdch2,bdch1d*bdch2,d**2,d**2,N),dtype=complex)
for x in range(N):
tensors = [ch2[:,:,:,:,x],ch1d[:,:,:,:,x]]
legs = [[-1,-3,-5,1],[-2,-4,1,-6]]
chd[:,:,:,:,x] = np.reshape(ncon(tensors,legs),(bdch1d*bdch2,bdch1d*bdch2,d**2,d**2),order='F')
return chd
def fin_commutator(N, d, BC, a, h, c):
"""
Calculate MPO for commutator b = [a, c*sum{h}] of MPO a with sum of local generators h and with arbitrary multiplicative scalar factor c.
Generator h have to be diagonal in computational basis, or in other words it is assumed that a is expressed in the eigenbasis of h.
Parameters:
N: integer
Number of sites in the chain of tensors (usually number of particles).
d: integer
Dimension of local Hilbert space (dimension of physical index).
BC: 'O' or 'P'
Boundary conditions, 'O' for OBC, 'P' for PBC.
a: list of length N of ndarrays of a shape (Dl_a,Dr_a,d,d) for OBC (Dl_a, Dr_a can vary between sites) or ndarray of a shape (D_a,D_a,d,d,N) for PBC
MPO.
h: ndarray of a shape (d,d)
Generator of unitary parameter encoding.
Generator h have to be diagonal in computational basis, or in other words it is assumed that a is expressed in the eigenbasis of h.
c: complex
Scalar factor which multiplies sum of local generators.
Returns:
b: list of length N of ndarrays of a shape (Dl_b,Dr_b,d,d) for OBC (Dl_b, Dr_b can vary between sites) or ndarray of a shape (D_b,D_b,d,d,N) for PBC
Commutator [a, c*sum{h}] in MPO representation.
"""
if np.linalg.norm(h-np.diag(np.diag(h))) > 10**-10:
warnings.warn('Generator h have to be diagonal in computational basis, or in other words it is assumed that a is expressed in the eigenbasis of h.')
if BC == 'O':
bh = [0]*N
b = [0]*N
for x in range(N):
da = np.shape(a[x])[2]
bda1 = np.shape(a[x])[0]
bda2 = np.shape(a[x])[1]
if x == 0:
bdbh1 = 1
bdbh2 = 2
bh[x] = np.zeros((bdbh1,bdbh2,d,d),dtype=complex)
for nx in range(d):
for nxp in range(d):
bh[x][:,:,nx,nxp] = np.array([[c*(h[nxp,nxp]-h[nx,nx]),1]])
elif x > 0 and x < N-1:
bdbh1 = 2
bdbh2 = 2
bh[x] = np.zeros((bdbh1,bdbh2,d,d),dtype=complex)
for nx in range(d):
for nxp in range(d):
bh[x][:,:,nx,nxp] = np.array([[1,0],[c*(h[nxp,nxp]-h[nx,nx]),1]])
elif x == N-1:
bdbh1 = 2
bdbh2 = 1
bh[x] = np.zeros((bdbh1,bdbh2,d,d),dtype=complex)
for nx in range(d):
for nxp in range(d):
bh[x][:,:,nx,nxp] = np.array([[1],[c*(h[nxp,nxp]-h[nx,nx])]])
if da == d:
# a is operator
b[x] = np.zeros((bdbh1*bda1,bdbh2*bda2,d,d),dtype=complex)
for nx in range(d):
for nxp in range(d):
b[x][:,:,nx,nxp] = np.kron(bh[x][:,:,nx,nxp],a[x][:,:,nx,nxp])
elif da == d**2:
# a is superoperator (vectorized channel)
bh[x] = np.reshape(bh[x],(bdbh1,bdbh2,d**2),order='F')
b[x] = np.zeros((bdbh1*bda1,bdbh2*bda2,d**2,d**2),dtype=complex)
for nx in range(d**2):
for nxp in range(d**2):
b[x][:,:,nx,nxp] = np.kron(bh[x][:,:,nx],a[x][:,:,nx,nxp])
elif BC == 'P':
da = np.shape(a)[2]
bda = np.shape(a)[0]
if N == 1:
bdbh = 1
else:
bdbh = 2
bh = np.zeros((bdbh,bdbh,d,d,N),dtype=complex)
for nx in range(d):
for nxp in range(d):
if N == 1:
bh[:,:,nx,nxp,0] = c*(h[nxp,nxp]-h[nx,nx])
else:
bh[:,:,nx,nxp,0] = np.array([[c*(h[nxp,nxp]-h[nx,nx]),1],[0,0]])
for x in range(1,N-1):
bh[:,:,nx,nxp,x] = np.array([[1,0],[c*(h[nxp,nxp]-h[nx,nx]),1]])
bh[:,:,nx,nxp,N-1] = np.array([[1,0],[c*(h[nxp,nxp]-h[nx,nx]),0]])
if da == d:
# a is operator
b = np.zeros((bdbh*bda,bdbh*bda,d,d,N),dtype=complex)
for nx in range(d):
for nxp in range(d):
for x in range(N):
b[:,:,nx,nxp,x] = np.kron(bh[:,:,nx,nxp,x],a[:,:,nx,nxp,x])
elif da == d**2:
# a is superoperator (vectorized channel)
bh = np.reshape(bh,(bdbh,bdbh,d**2,N),order='F')
b = np.zeros((bdbh*bda,bdbh*bda,d**2,d**2,N),dtype=complex)
for nx in range(d**2):
for nxp in range(d**2):
for x in range(N):
b[:,:,nx,nxp,x] = np.kron(bh[:,:,nx,x],a[:,:,nx,nxp,x])
return b
def fin_enlarge_bdl(cold,factor):
"""
Enlarge bond dimension of SLD MPO. Function for finite size systems.
Parameters:
cold: SLD MPO, expected list of length n of ndarrays of a shape (bd,bd,d,d) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d,d,n) for PBC
factor: factor which determine on average relation between old and newly added values of SLD MPO
Returns:
c: SLD MPO with bd += 1
"""
rng = np.random.default_rng()
if type(cold) is list:
n = len(cold)
if n == 1:
warnings.warn('Tensor networks with OBC and length one have to have bond dimension equal to one.')
else:
c = [0]*n
x = 0
d = np.shape(cold[x])[2]
bdl1 = 1
bdl2 = np.shape(cold[x])[1]+1
c[x] = np.zeros((bdl1,bdl2,d,d),dtype=complex)
for nx in range(d):
for nxp in range(d):
meanrecold = np.sum(np.abs(np.real(cold[x][:,:,nx,nxp])))/(bdl2-1)
meanimcold = np.sum(np.abs(np.imag(cold[x][:,:,nx,nxp])))/(bdl2-1)
c[x][:,:,nx,nxp] = (meanrecold*rng.random((bdl1,bdl2))+1j*meanimcold*rng.random((bdl1,bdl2)))*factor
c[x] = (c[x] + np.conj(np.moveaxis(c[x],2,3)))/2
c[x][0:bdl1-1,0:bdl2-1,:,:] = cold[x]
for x in range(1,n-1):
d = np.shape(cold[x])[2]
bdl1 = np.shape(cold[x])[0]+1
bdl2 = np.shape(cold[x])[1]+1
c[x] = np.zeros((bdl1,bdl2,d,d),dtype=complex)
for nx in range(d):
for nxp in range(d):
meanrecold = np.sum(np.abs(np.real(cold[x][:,:,nx,nxp])))/((bdl1-1)*(bdl2-1))
meanimcold = np.sum(np.abs(np.imag(cold[x][:,:,nx,nxp])))/((bdl1-1)*(bdl2-1))
c[x][:,:,nx,nxp] = (meanrecold*rng.random((bdl1,bdl2))+1j*meanimcold*rng.random((bdl1,bdl2)))*factor
c[x] = (c[x] + np.conj(np.moveaxis(c[x],2,3)))/2
c[x][0:bdl1-1,0:bdl2-1,:,:] = cold[x]
x = n-1
d = np.shape(cold[x])[2]
bdl1 = np.shape(cold[x])[0]+1
bdl2 = 1
c[x] = np.zeros((bdl1,bdl2,d,d),dtype=complex)
for nx in range(d):
for nxp in range(d):
meanrecold = np.sum(np.abs(np.real(cold[x][:,:,nx,nxp])))/(bdl1-1)
meanimcold = np.sum(np.abs(np.imag(cold[x][:,:,nx,nxp])))/(bdl1-1)
c[x][:,:,nx,nxp] = (meanrecold*rng.random((bdl1,bdl2))+1j*meanimcold*rng.random((bdl1,bdl2)))*factor
c[x] = (c[x] + np.conj(np.moveaxis(c[x],2,3)))/2
c[x][0:bdl1-1,0:bdl2-1,:,:] = cold[x]
elif type(cold) is np.ndarray:
n = np.shape(cold)[4]
d = np.shape(cold)[2]
bdl = np.shape(cold)[0]+1
c = np.zeros((bdl,bdl,d,d,n),dtype=complex)
for nx in range(d):
for nxp in range(d):
for x in range(n):
meanrecold = np.sum(np.abs(np.real(cold[:,:,nx,nxp,x])))/(bdl-1)**2
meanimcold = np.sum(np.abs(np.imag(cold[:,:,nx,nxp,x])))/(bdl-1)**2
c[:,:,nx,nxp,x] = (meanrecold*rng.random((bdl,bdl))+1j*meanimcold*rng.random((bdl,bdl)))*factor
c = (c + np.conj(np.moveaxis(c,2,3)))/2
c[0:bdl-1,0:bdl-1,:,:,:] = cold
return c
def fin_enlarge_bdpsi(a0old,factor):
"""
Enlarge bond dimension of wave function MPS. Function for finite size systems.
Parameters:
a0old: wave function MPS, expected list of length n of ndarrays of a shape (bd,bd,d) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d,n) for PBC
ratio: factor which determine on average relation between last and next to last values of diagonals of wave function MPS
Returns:
a0: wave function MPS with bd += 1
"""
rng = np.random.default_rng()
if type(a0old) is list:
n = len(a0old)
if n == 1:
warnings.warn('Tensor networks with OBC and length one have to have bond dimension equal to one.')
else:
a0 = [0]*n
x = 0
d = np.shape(a0old[x])[2]
bdpsi1 = 1
bdpsi2 = np.shape(a0old[x])[1]+1
a0[x] = np.zeros((bdpsi1,bdpsi2,d),dtype=complex)
for nx in range(d):
meanrea0old = np.sum(np.abs(np.real(a0old[x][:,:,nx])))/(bdpsi2-1)
meanima0old = np.sum(np.abs(np.imag(a0old[x][:,:,nx])))/(bdpsi2-1)
a0[x][:,:,nx] = (meanrea0old*rng.random((bdpsi1,bdpsi2))+1j*meanima0old*rng.random((bdpsi1,bdpsi2)))*factor
a0[x][0:bdpsi1-1,0:bdpsi2-1,:] = a0old[x]
for x in range(1,n-1):
d = np.shape(a0old[x])[2]
bdpsi1 = np.shape(a0old[x])[0]+1
bdpsi2 = np.shape(a0old[x])[1]+1
a0[x] = np.zeros((bdpsi1,bdpsi2,d),dtype=complex)
for nx in range(d):
meanrea0old = np.sum(np.abs(np.real(a0old[x][:,:,nx])))/((bdpsi1-1)*(bdpsi2-1))
meanima0old = np.sum(np.abs(np.imag(a0old[x][:,:,nx])))/((bdpsi1-1)*(bdpsi2-1))
a0[x][:,:,nx] = (meanrea0old*rng.random((bdpsi1,bdpsi2))+1j*meanima0old*rng.random((bdpsi1,bdpsi2)))*factor
a0[x][0:bdpsi1-1,0:bdpsi2-1,:] = a0old[x]
x = n-1
d = np.shape(a0old[x])[2]
bdpsi1 = np.shape(a0old[x])[0]+1
bdpsi2 = 1
a0[x] = np.zeros((bdpsi1,bdpsi2,d),dtype=complex)
for nx in range(d):
meanrea0old = np.sum(np.abs(np.real(a0old[x][:,:,nx])))/(bdpsi1-1)
meanima0old = np.sum(np.abs(np.imag(a0old[x][:,:,nx])))/(bdpsi1-1)
a0[x][:,:,nx] = (meanrea0old*rng.random((bdpsi1,bdpsi2))+1j*meanima0old*rng.random((bdpsi1,bdpsi2)))*factor
a0[x][0:bdpsi1-1,0:bdpsi2-1,:] = a0old[x]
tensors = [np.conj(a0[n-1]),a0[n-1]]
legs = [[-1,-3,1],[-2,-4,1]]
r1 = ncon(tensors,legs)
a0[n-1] = a0[n-1]/np.sqrt(np.linalg.norm(np.reshape(r1,-1,order='F')))
tensors = [np.conj(a0[n-1]),a0[n-1]]
legs = [[-1,-3,1],[-2,-4,1]]
r2 = ncon(tensors,legs)
for x in range(n-2,0,-1):
tensors = [np.conj(a0[x]),a0[x],r2]
legs = [[-1,2,1],[-2,3,1],[2,3,-3,-4]]
r1 = ncon(tensors,legs)
a0[x] = a0[x]/np.sqrt(np.linalg.norm(np.reshape(r1,-1,order='F')))
tensors = [np.conj(a0[x]),a0[x],r2]
legs = [[-1,2,1],[-2,3,1],[2,3,-3,-4]]
r2 = ncon(tensors,legs)
tensors = [np.conj(a0[0]),a0[0],r2]
legs = [[4,2,1],[5,3,1],[2,3,4,5]]
r1 = ncon(tensors,legs)
a0[0] = a0[0]/np.sqrt(np.abs(r1))
elif type(a0old) is np.ndarray:
n = np.shape(a0old)[3]
d = np.shape(a0old)[2]
bdpsi = np.shape(a0old)[0]+1
a0 = np.zeros((bdpsi,bdpsi,d,n),dtype=complex)
for nx in range(d):
for x in range(n):
meanrea0old = np.sum(np.abs(np.real(a0old[:,:,nx,x])))/(bdpsi-1)**2
meanima0old = np.sum(np.abs(np.imag(a0old[:,:,nx,x])))/(bdpsi-1)**2
a0[:,:,nx,x] = (meanrea0old*rng.random((bdpsi,bdpsi))+1j*meanima0old*rng.random((bdpsi,bdpsi)))*factor
a0[0:bdpsi-1,0:bdpsi-1,:,:] = a0old
if n == 1:
tensors = [np.conj(a0[:,:,:,0]),a0[:,:,:,0]]
legs = [[2,2,1],[3,3,1]]
r1 = ncon(tensors,legs)
a0[:,:,:,0] = a0[:,:,:,0]/np.sqrt(np.abs(r1))
else:
tensors = [np.conj(a0[:,:,:,n-1]),a0[:,:,:,n-1]]
legs = [[-1,-3,1],[-2,-4,1]]
r1 = ncon(tensors,legs)
a0[:,:,:,n-1] = a0[:,:,:,n-1]/np.sqrt(np.linalg.norm(np.reshape(r1,-1,order='F')))
tensors = [np.conj(a0[:,:,:,n-1]),a0[:,:,:,n-1]]
legs = [[-1,-3,1],[-2,-4,1]]
r2 = ncon(tensors,legs)
for x in range(n-2,0,-1):
tensors = [np.conj(a0[:,:,:,x]),a0[:,:,:,x],r2]
legs = [[-1,2,1],[-2,3,1],[2,3,-3,-4]]
r1 = ncon(tensors,legs)
a0[:,:,:,x] = a0[:,:,:,x]/np.sqrt(np.linalg.norm(np.reshape(r1,-1,order='F')))
tensors = [np.conj(a0[:,:,:,x]),a0[:,:,:,x],r2]
legs = [[-1,2,1],[-2,3,1],[2,3,-3,-4]]
r2 = ncon(tensors,legs)
tensors = [np.conj(a0[:,:,:,0]),a0[:,:,:,0],r2]
legs = [[4,2,1],[5,3,1],[2,3,4,5]]
r1 = ncon(tensors,legs)
a0[:,:,:,0] = a0[:,:,:,0]/np.sqrt(np.abs(r1))
return a0
#########################################
# 1.2.1 Problems with exact derivative. #
#########################################
def fin_FoM_FoMD_optbd(n,d,bc,ch,chp,cini=None,a0ini=None,imprecision=10**-2,bdlmax=100,alwaysbdlmax=False,lherm=True,bdpsimax=100,alwaysbdpsimax=False):
"""
Iterative optimization of FoM/FoMD over SLD MPO and initial wave function MPS and also check of convergence in bond dimensions. Function for finite size systems.
Parameters:
n: number of sites in TN
d: dimension of local Hilbert space (dimension of physical index)
bc: boundary conditions, 'O' for OBC, 'P' for PBC
ch: MPO for quantum channel, expected list of length n of ndarrays of a shape (bd,bd,d**2,d**2) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d**2,d**2,n) for PBC
chp: MPO for generalized derivative of quantum channel, expected list of length n of ndarrays of a shape (bd,bd,d**2,d**2) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d**2,d**2,n) for PBC
cini: initial MPO for SLD, expected list of length n of ndarrays of a shape (bd,bd,d,d) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d,d,n) for PBC
a0ini: initial MPS for initial wave function, expected list of length n of ndarrays of a shape (bd,bd,d) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d,n) for PBC
imprecision: expected imprecision of the end results, default value is 10**-2
bdlmax: maximal value of bd for SLD MPO, default value is 100
alwaysbdlmax: boolean value, True if maximal value of bd for SLD MPO have to be reached, otherwise False (default value)
lherm: boolean value, True (default value) when Hermitian gauge is imposed on SLD MPO, otherwise False
bdpsimax: maximal value of bd for initial wave function MPS, default value is 100
alwaysbdpsimax: boolean value, True if maximal value of bd for initial wave function MPS have to be reached, otherwise False (default value)
Returns:
result: optimal value of FoM/FoMD
resultm: matrix describing FoM/FoMD in function of bd of respectively SLD MPO [rows] and initial wave function MPS [columns]
c: optimal MPO for SLD
a0: optimal MPS for initial wave function
"""
while True:
if a0ini is None:
bdpsi = 1
a0 = np.zeros(d,dtype=complex)
for i in range(d):
a0[i] = np.sqrt(math.comb(d-1,i))*2**(-(d-1)/2) # prod
# a0[i] = np.sqrt(2/(d+1))*np.sin((1+i)*np.pi/(d+1)) # sine
if bc == 'O':
a0 = a0[np.newaxis,np.newaxis,:]
a0 = [a0]*n
elif bc == 'P':
a0 = a0[np.newaxis,np.newaxis,:,np.newaxis]
a0 = np.tile(a0,(1,1,1,n))
else:
a0 = a0ini
if bc == 'O':
bdpsi = max([np.shape(a0[i])[0] for i in range(n)])
a0 = [a0[i].astype(complex) for i in range(n)]
elif bc == 'P':
bdpsi = np.shape(a0)[0]
a0 = a0.astype(complex)
if cini is None:
bdl = 1
rng = np.random.default_rng()
if bc == 'O':
c = [0]*n
c[0] = (rng.random((1,bdl,d,d)) + 1j*rng.random((1,bdl,d,d)))/bdl
c[0] = (c[0] + np.conj(np.moveaxis(c[0],2,3)))/2
for x in range(1,n-1):
c[x] = (rng.random((bdl,bdl,d,d)) + 1j*rng.random((bdl,bdl,d,d)))/bdl
c[x] = (c[x] + np.conj(np.moveaxis(c[x],2,3)))/2
c[n-1] = (rng.random((bdl,1,d,d)) + 1j*rng.random((bdl,1,d,d)))/bdl
c[n-1] = (c[n-1] + np.conj(np.moveaxis(c[n-1],2,3)))/2
elif bc == 'P':
c = (rng.random((bdl,bdl,d,d,n)) + 1j*rng.random((bdl,bdl,d,d,n)))/bdl
c = (c + np.conj(np.moveaxis(c,2,3)))/2
else:
c = cini
if bc == 'O':
bdl = max([np.shape(c[i])[0] for i in range(n)])
c = [c[i].astype(complex) for i in range(n)]
elif bc == 'P':
bdl = np.shape(c)[0]
c = c.astype(complex)
resultm = np.zeros((bdlmax,bdpsimax),dtype=float)
resultm[bdl-1,bdpsi-1],c,a0 = fin_FoM_FoMD_optm(n,d,bc,c,a0,ch,chp,imprecision,lherm)
if bc == 'O' and n == 1:
resultm = resultm[0:bdl,0:bdpsi]
result = resultm[bdl-1,bdpsi-1]
return result,resultm,c,a0
factorv = np.array([0.5,0.25,0.1,1,0.01])
problem = False
while True:
while True:
if bdpsi == bdpsimax:
break
else:
a0old = a0
bdpsi += 1
i = 0
while True:
a0 = fin_enlarge_bdpsi(a0,factorv[i])
resultm[bdl-1,bdpsi-1],cnew,a0new = fin_FoM_FoMD_optm(n,d,bc,c,a0,ch,chp,imprecision,lherm)
if resultm[bdl-1,bdpsi-1] >= resultm[bdl-1,bdpsi-2]:
break
i += 1
if i == np.size(factorv):
problem = True
break
if problem:
break
if not(alwaysbdpsimax) and resultm[bdl-1,bdpsi-1] < (1+imprecision)*resultm[bdl-1,bdpsi-2]:
bdpsi += -1
a0 = a0old
a0copy = a0new
ccopy = cnew
break
else:
a0 = a0new
c = cnew
if problem:
break
if bdl == bdlmax:
if bdpsi == bdpsimax:
resultm = resultm[0:bdl,0:bdpsi]
result = resultm[bdl-1,bdpsi-1]
else:
a0 = a0copy
c = ccopy
resultm = resultm[0:bdl,0:bdpsi+1]
result = resultm[bdl-1,bdpsi]
break
else:
bdl += 1
i = 0
while True:
c = fin_enlarge_bdl(c,factorv[i])
resultm[bdl-1,bdpsi-1],cnew,a0new = fin_FoM_FoMD_optm(n,d,bc,c,a0,ch,chp,imprecision,lherm)
if resultm[bdl-1,bdpsi-1] >= resultm[bdl-2,bdpsi-1]:
a0 = a0new
c = cnew
break
i += 1
if i == np.size(factorv):
problem = True
break
if problem:
break
if not(alwaysbdlmax) and resultm[bdl-1,bdpsi-1] < (1+imprecision)*resultm[bdl-2,bdpsi-1]:
if bdpsi == bdpsimax:
resultm = resultm[0:bdl,0:bdpsi]
result = resultm[bdl-1,bdpsi-1]
else:
if resultm[bdl-1,bdpsi-1] < resultm[bdl-2,bdpsi]:
a0 = a0copy
c = ccopy
resultm = resultm[0:bdl,0:bdpsi+1]
bdl += -1
bdpsi += 1
result = resultm[bdl-1,bdpsi-1]
else:
resultm = resultm[0:bdl,0:bdpsi+1]
result = resultm[bdl-1,bdpsi-1]
break
if not(problem):
break
return result,resultm,c,a0
def fin_FoM_optbd(n,d,bc,a,b,cini=None,imprecision=10**-2,bdlmax=100,alwaysbdlmax=False,lherm=True):
"""
Optimization of FoM over SLD MPO and also check of convergence in bond dimension. Function for finite size systems.
Parameters:
n: number of sites in TN
d: dimension of local Hilbert space (dimension of physical index)
bc: boundary conditions, 'O' for OBC, 'P' for PBC
a: MPO for density matrix, expected list of length n of ndarrays of a shape (bd,bd,d,d) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d,d,n) for PBC
b: MPO for generalized derivative of density matrix, expected list of length n of ndarrays of a shape (bd,bd,d,d) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d,d,n) for PBC
cini: initial MPO for SLD, expected list of length n of ndarrays of a shape (bd,bd,d,d) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d,d,n) for PBC
imprecision: expected imprecision of the end results, default value is 10**-2
bdlmax: maximal value of bd for SLD MPO, default value is 100
alwaysbdlmax: boolean value, True if maximal value of bd for SLD MPO have to be reached, otherwise False (default value)
lherm: boolean value, True (default value) when Hermitian gauge is imposed on SLD MPO, otherwise False
Returns:
result: optimal value of FoM
resultv: vector describing FoM in function of bd of SLD MPO
c: optimal MPO for SLD
"""
while True:
if cini is None:
bdl = 1
rng = np.random.default_rng()
if bc == 'O':
c = [0]*n
c[0] = (rng.random((1,bdl,d,d)) + 1j*rng.random((1,bdl,d,d)))/bdl
c[0] = (c[0] + np.conj(np.moveaxis(c[0],2,3)))/2
for x in range(1,n-1):
c[x] = (rng.random((bdl,bdl,d,d)) + 1j*rng.random((bdl,bdl,d,d)))/bdl
c[x] = (c[x] + np.conj(np.moveaxis(c[x],2,3)))/2
c[n-1] = (rng.random((bdl,1,d,d)) + 1j*rng.random((bdl,1,d,d)))/bdl
c[n-1] = (c[n-1] + np.conj(np.moveaxis(c[n-1],2,3)))/2
elif bc == 'P':
c = (rng.random((bdl,bdl,d,d,n)) + 1j*rng.random((bdl,bdl,d,d,n)))/bdl
c = (c + np.conj(np.moveaxis(c,2,3)))/2
else:
c = cini
if bc == 'O':
bdl = max([np.shape(c[i])[0] for i in range(n)])
c = [c[i].astype(complex) for i in range(n)]
elif bc == 'P':
bdl = np.shape(c)[0]
c = c.astype(complex)
resultv = np.zeros(bdlmax,dtype=float)
if bc == 'O':
resultv[bdl-1],c = fin_FoM_OBC_optm(a,b,c,imprecision,lherm)
if n == 1:
resultv = resultv[0:bdl]
result = resultv[bdl-1]
return result,resultv,c
elif bc == 'P':
resultv[bdl-1],c = fin_FoM_PBC_optm(a,b,c,imprecision,lherm)
factorv = np.array([0.5,0.25,0.1,1,0.01])
problem = False
while True:
if bdl == bdlmax:
resultv = resultv[0:bdl]
result = resultv[bdl-1]
break
else:
bdl += 1
i = 0
while True:
c = fin_enlarge_bdl(c,factorv[i])
if bc == 'O':
resultv[bdl-1],cnew = fin_FoM_OBC_optm(a,b,c,imprecision,lherm)
elif bc == 'P':
resultv[bdl-1],cnew = fin_FoM_PBC_optm(a,b,c,imprecision,lherm)
if resultv[bdl-1] >= resultv[bdl-2]:
c = cnew
break
i += 1
if i == np.size(factorv):
problem = True
break
if problem:
break
if not(alwaysbdlmax) and resultv[bdl-1] < (1+imprecision)*resultv[bdl-2]:
resultv = resultv[0:bdl]
result = resultv[bdl-1]
break
if not(problem):
break
return result,resultv,c
def fin_FoMD_optbd(n,d,bc,c2d,cpd,a0ini=None,imprecision=10**-2,bdpsimax=100,alwaysbdpsimax=False):
"""
Optimization of FoMD over initial wave function MPS and also check of convergence in bond dimension. Function for finite size systems.
Parameters:
n: number of sites in TN
d: dimension of local Hilbert space (dimension of physical index)
bc: boundary conditions, 'O' for OBC, 'P' for PBC
c2d: MPO for square of dual of SLD, expected list of length n of ndarrays of a shape (bd,bd,d,d) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d,d,n) for PBC
cpd: MPO for dual of generalized derivative of SLD, expected list of length n of ndarrays of a shape (bd,bd,d,d) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d,d,n) for PBC
a0ini: initial MPS for initial wave function, expected list of length n of ndarrays of a shape (bd,bd,d) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d,n) for PBC
imprecision: expected imprecision of the end results, default value is 10**-2
bdpsimax: maximal value of bd for initial wave function MPS, default value is 100
alwaysbdpsimax: boolean value, True if maximal value of bd for initial wave function MPS have to be reached, otherwise False (default value)
Returns:
result: optimal value of FoMD
resultv: vector describing FoMD in function of bd of initial wave function MPS
a0: optimal MPS for initial wave function
"""
while True:
if a0ini is None:
bdpsi = 1
a0 = np.zeros(d,dtype=complex)
for i in range(d):
a0[i] = np.sqrt(math.comb(d-1,i))*2**(-(d-1)/2) # prod
# a0[i] = np.sqrt(2/(d+1))*np.sin((1+i)*np.pi/(d+1)) # sine
if bc == 'O':
a0 = a0[np.newaxis,np.newaxis,:]
a0 = [a0]*n
elif bc == 'P':
a0 = a0[np.newaxis,np.newaxis,:,np.newaxis]
a0 = np.tile(a0,(1,1,1,n))
else:
a0 = a0ini
if bc == 'O':
bdpsi = max([np.shape(a0[i])[0] for i in range(n)])
a0 = [a0[i].astype(complex) for i in range(n)]
elif bc == 'P':
bdpsi = np.shape(a0)[0]
a0 = a0.astype(complex)
resultv = np.zeros(bdpsimax,dtype=float)
if bc == 'O':
resultv[bdpsi-1],a0 = fin_FoMD_OBC_optm(c2d,cpd,a0,imprecision)
if n == 1:
resultv = resultv[0:bdpsi]
result = resultv[bdpsi-1]
return result,resultv,a0
elif bc == 'P':
resultv[bdpsi-1],a0 = fin_FoMD_PBC_optm(c2d,cpd,a0,imprecision)
factorv = np.array([0.5,0.25,0.1,1,0.01])
problem = False
while True:
if bdpsi == bdpsimax:
resultv = resultv[0:bdpsi]
result = resultv[bdpsi-1]
break
else:
bdpsi += 1
i = 0
while True:
a0 = fin_enlarge_bdpsi(a0,factorv[i])
if bc == 'O':
resultv[bdpsi-1],a0new = fin_FoMD_OBC_optm(c2d,cpd,a0,imprecision)
elif bc == 'P':
resultv[bdpsi-1],a0new = fin_FoMD_PBC_optm(c2d,cpd,a0,imprecision)
if resultv[bdpsi-1] >= resultv[bdpsi-2]:
a0 = a0new
break
i += 1
if i == np.size(factorv):
problem = True
break
if problem:
break
if not(alwaysbdpsimax) and resultv[bdpsi-1] < (1+imprecision)*resultv[bdpsi-2]:
resultv = resultv[0:bdpsi]
result = resultv[bdpsi-1]
break
if not(problem):
break
return result,resultv,a0
def fin_FoM_FoMD_optm(n,d,bc,c,a0,ch,chp,imprecision=10**-2,lherm=True):
"""
Iterative optimization of FoM/FoMD over SLD MPO and initial wave function MPS. Function for finite size systems.
Parameters:
n: number of sites in TN
d: dimension of local Hilbert space (dimension of physical index)
bc: boundary conditions, 'O' for OBC, 'P' for PBC
c: MPO for SLD, expected list of length n of ndarrays of a shape (bd,bd,d,d) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d,d,n) for PBC
a0: MPS for initial wave function, expected list of length n of ndarrays of a shape (bd,bd,d) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d,n) for PBC
ch: MPO for quantum channel, expected list of length n of ndarrays of a shape (bd,bd,d**2,d**2) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d**2,d**2,n) for PBC
chp: MPO for generalized derivative of quantum channel, expected list of length n of ndarrays of a shape (bd,bd,d**2,d**2) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d**2,d**2,n) for PBC
imprecision: expected imprecision of the end results, default value is 10**-2
lherm: boolean value, True (default value) when Hermitian gauge is imposed on SLD MPO, otherwise False
Returns:
fval: optimal value of FoM/FoMD
c: optimal MPO for SLD
a0: optimal MPS for initial wave function
"""
relunc_f = 0.1*imprecision
if bc == 'O':
chd = [0]*n
chpd = [0]*n
for x in range(n):
chd[x] = np.conj(np.moveaxis(ch[x],2,3))
chpd[x] = np.conj(np.moveaxis(chp[x],2,3))
elif bc == 'P':
chd = np.conj(np.moveaxis(ch,2,3))
chpd = np.conj(np.moveaxis(chp,2,3))
f = np.array([])
iter_f = 0
while True:
a0_dm = wave_function_to_density_matrix(a0)
a = channel_acting_on_operator(ch,a0_dm)
b = channel_acting_on_operator(chp,a0_dm)
if bc == 'O':
fom,c = fin_FoM_OBC_optm(a,b,c,imprecision,lherm)
elif bc == 'P':
fom,c = fin_FoM_PBC_optm(a,b,c,imprecision,lherm)
f = np.append(f,fom)
if iter_f >= 2 and np.std(f[-4:])/np.mean(f[-4:]) <= relunc_f:
break
if bc == 'O':
c2 = [0]*n
for x in range(n):
bdl1 = np.shape(c[x])[0]
bdl2 = np.shape(c[x])[1]
c2[x] = np.zeros((bdl1**2,bdl2**2,d,d),dtype=complex)
for nx in range(d):
for nxp in range(d):
for nxpp in range(d):
c2[x][:,:,nx,nxp] = c2[x][:,:,nx,nxp]+np.kron(c[x][:,:,nx,nxpp],c[x][:,:,nxpp,nxp])
elif bc == 'P':
bdl = np.shape(c)[0]
c2 = np.zeros((bdl**2,bdl**2,d,d,n),dtype=complex)
for nx in range(d):
for nxp in range(d):
for nxpp in range(d):
for x in range(n):
c2[:,:,nx,nxp,x] = c2[:,:,nx,nxp,x]+np.kron(c[:,:,nx,nxpp,x],c[:,:,nxpp,nxp,x])
c2d = channel_acting_on_operator(chd,c2)
cpd = channel_acting_on_operator(chpd,c)
if bc == 'O':
fomd,a0 = fin_FoMD_OBC_optm(c2d,cpd,a0,imprecision)
elif bc == 'P':
fomd,a0 = fin_FoMD_PBC_optm(c2d,cpd,a0,imprecision)
f = np.append(f,fomd)
iter_f += 1
fval = f[-1]
return fval,c,a0
def fin_FoM_OBC_optm(a,b,c,imprecision=10**-2,lherm=True):
"""
Optimization of FoM over MPO for SLD. Function for finite size systems with OBC.
Parameters:
a: MPO for density matrix, expected list of length n of ndarrays of a shape (bd,bd,d,d) (bd can vary between sites)
b: MPO for generalized derivative of density matrix, expected list of length n of ndarrays of a shape (bd,bd,d,d) (bd can vary between sites)
c: MPO for SLD, expected list of length n of ndarrays of a shape (bd,bd,d,d) (bd can vary between sites)
imprecision: expected imprecision of the end results, default value is 10**-2
lherm: boolean value, True (default value) when Hermitian gauge is imposed on SLD MPO, otherwise False
Returns:
fomval: optimal value of FoM
c: optimal MPO for SLD
"""
n = len(c)
tol_fom = 0.1*imprecision/n**2
if n == 1:
if np.shape(a[0])[0] == 1 and np.shape(b[0])[0] == 1 and np.shape(c[0])[0] == 1:
d = np.shape(c[0])[2]
tensors = [b[0][0,0,:,:]]
legs = [[-2,-1]]
l1 = ncon(tensors,legs)
l1 = np.reshape(l1,-1,order='F')
tensors = [a[0][0,0,:,:],np.eye(d)]
legs = [[-2,-3],[-4,-1]]
l2 = ncon(tensors,legs)
l2 = np.reshape(l2,(d*d,d*d),order='F')
dl2 = l2+l2.T
dl1 = 2*l1
dl2pinv = np.linalg.pinv(dl2,tol_fom)
dl2pinv = (dl2pinv+dl2pinv.T)/2
cv = dl2pinv @ dl1
c[0][0,0,:,:] = np.reshape(cv,(d,d),order='F')
if lherm:
c[0] = (c[0]+np.conj(np.moveaxis(c[0],2,3)))/2
cv = np.reshape(c[0],-1,order='F')
fomval = np.real(2*cv @ l1 - cv @ l2 @ cv)
else:
warnings.warn('Tensor networks with OBC and length one have to have bond dimension equal to one.')
else:
relunc_fom = 0.1*imprecision
l1f = [0]*n
l2f = [0]*n
fom = np.array([])
iter_fom = 0
while True:
tensors = [c[n-1],b[n-1]]
legs = [[-1,-3,1,2],[-2,-4,2,1]]
l1f[n-2] = ncon(tensors,legs)
l1f[n-2] = l1f[n-2][:,:,0,0]
tensors = [c[n-1],a[n-1],c[n-1]]
legs = [[-1,-4,1,2],[-2,-5,2,3],[-3,-6,3,1]]
l2f[n-2] = ncon(tensors,legs)
l2f[n-2] = l2f[n-2][:,:,:,0,0,0]
for x in range(n-2,0,-1):
tensors = [c[x],b[x],l1f[x]]
legs = [[-1,3,1,2],[-2,4,2,1],[3,4]]
l1f[x-1] = ncon(tensors,legs)
tensors = [c[x],a[x],c[x],l2f[x]]
legs = [[-1,4,1,2],[-2,5,2,3],[-3,6,3,1],[4,5,6]]
l2f[x-1] = ncon(tensors,legs)
bdl1,bdl2,d,d = np.shape(c[0])
tensors = [b[0],l1f[0]]
legs = [[-5,1,-4,-3],[-2,1]]
l1 = ncon(tensors,legs)
l1 = np.reshape(l1,-1,order='F')
tensors = [a[0],np.eye(d),l2f[0]]
legs = [[-9,1,-4,-7],[-8,-3],[-2,1,-6]]
l2 = ncon(tensors,legs)
l2 = np.reshape(l2,(bdl1*bdl2*d*d,bdl1*bdl2*d*d),order='F')
dl2 = l2+l2.T
dl1 = 2*l1
dl2pinv = np.linalg.pinv(dl2,tol_fom)
dl2pinv = (dl2pinv+dl2pinv.T)/2
cv = dl2pinv @ dl1
c[0] = np.reshape(cv,(bdl1,bdl2,d,d),order='F')
if lherm:
c[0] = (c[0]+np.conj(np.moveaxis(c[0],2,3)))/2
cv = np.reshape(c[0],-1,order='F')
fom = np.append(fom,np.real(2*cv @ l1 - cv @ l2 @ cv))
tensors = [c[0],b[0]]
legs = [[-3,-1,1,2],[-4,-2,2,1]]
l1c = ncon(tensors,legs)
l1c = l1c[:,:,0,0]
tensors = [c[0],a[0],c[0]]
legs = [[-4,-1,1,2],[-5,-2,2,3],[-6,-3,3,1]]
l2c = ncon(tensors,legs)
l2c = l2c[:,:,:,0,0,0]
for x in range(1,n-1):
bdl1,bdl2,d,d = np.shape(c[x])
tensors = [l1c,b[x],l1f[x]]
legs = [[-1,1],[1,2,-4,-3],[-2,2]]
l1 = ncon(tensors,legs)
l1 = np.reshape(l1,-1,order='F')
tensors = [l2c,a[x],np.eye(d),l2f[x]]
legs = [[-1,1,-5],[1,2,-4,-7],[-8,-3],[-2,2,-6]]
l2 = ncon(tensors,legs)
l2 = np.reshape(l2,(bdl1*bdl2*d*d,bdl1*bdl2*d*d),order='F')
dl2 = l2+l2.T
dl1 = 2*l1
dl2pinv = np.linalg.pinv(dl2,tol_fom)
dl2pinv = (dl2pinv+dl2pinv.T)/2
cv = dl2pinv @ dl1
c[x] = np.reshape(cv,(bdl1,bdl2,d,d),order='F')
if lherm:
c[x] = (c[x]+np.conj(np.moveaxis(c[x],2,3)))/2
cv = np.reshape(c[x],-1,order='F')
fom = np.append(fom,np.real(2*cv @ l1 - cv @ l2 @ cv))
tensors = [l1c,c[x],b[x]]
legs = [[3,4],[3,-1,1,2],[4,-2,2,1]]
l1c = ncon(tensors,legs)
tensors = [l2c,c[x],a[x],c[x]]
legs = [[4,5,6],[4,-1,1,2],[5,-2,2,3],[6,-3,3,1]]
l2c = ncon(tensors,legs)
bdl1,bdl2,d,d = np.shape(c[n-1])
tensors = [l1c,b[n-1]]
legs = [[-1,1],[1,-5,-4,-3]]
l1 = ncon(tensors,legs)
l1 = np.reshape(l1,-1,order='F')
tensors = [l2c,a[n-1],np.eye(d)]
legs = [[-1,1,-5],[1,-9,-4,-7],[-8,-3]]
l2 = ncon(tensors,legs)
l2 = np.reshape(l2,(bdl1*bdl2*d*d,bdl1*bdl2*d*d),order='F')
dl2 = l2+l2.T
dl1 = 2*l1
dl2pinv = np.linalg.pinv(dl2,tol_fom)
dl2pinv = (dl2pinv+dl2pinv.T)/2
cv = dl2pinv @ dl1
c[n-1] = np.reshape(cv,(bdl1,bdl2,d,d),order='F')
if lherm:
c[n-1] = (c[n-1]+np.conj(np.moveaxis(c[n-1],2,3)))/2
cv = np.reshape(c[n-1],-1,order='F')
fom = np.append(fom,np.real(2*cv @ l1 - cv @ l2 @ cv))
iter_fom += 1
if iter_fom >= 2 and all(fom[-2*n:] > 0) and np.std(fom[-2*n:])/np.mean(fom[-2*n:]) <= relunc_fom:
break
fomval = fom[-1]
return fomval,c
def fin_FoM_PBC_optm(a,b,c,imprecision=10**-2,lherm=True):
"""
Optimization of FoM over MPO for SLD. Function for finite size systems with PBC.
Parameters:
a: MPO for density matrix, expected ndarray of a shape (bd,bd,d,d,n)
b: MPO for generalized derivative of density matrix, expected ndarray of a shape (bd,bd,d,d,n)
c: MPO for SLD, expected ndarray of a shape (bd,bd,d,d,n)
imprecision: expected imprecision of the end results, default value is 10**-2
lherm: boolean value, True (default value) when Hermitian gauge is imposed on SLD MPO, otherwise False
Returns:
fomval: optimal value of FoM
c: optimal MPO for SLD
"""
n = np.shape(a)[4]
d = np.shape(a)[2]
bdr = np.shape(a)[0]
bdrp = np.shape(b)[0]
bdl = np.shape(c)[0]
tol_fom = 0.1*imprecision/n**2
if n == 1:
tensors = [b[:,:,:,:,0],np.eye(bdl)]
legs = [[1,1,-4,-3],[-2,-1]]
l1 = ncon(tensors,legs)
l1 = np.reshape(l1,-1,order='F')
tensors = [a[:,:,:,:,0],np.eye(d),np.eye(bdl),np.eye(bdl)]
legs = [[1,1,-4,-7],[-8,-3],[-2,-1],[-6,-5]]
l2 = ncon(tensors,legs)
l2 = np.reshape(l2,(bdl*bdl*d*d,bdl*bdl*d*d),order='F')
dl2 = l2+l2.T
dl1 = 2*l1
dl2pinv = np.linalg.pinv(dl2,tol_fom)
dl2pinv = (dl2pinv+dl2pinv.T)/2
cv = dl2pinv @ dl1
c[:,:,:,:,0] = np.reshape(cv,(bdl,bdl,d,d),order='F')
if lherm:
c[:,:,:,:,0] = (c[:,:,:,:,0]+np.conj(np.moveaxis(c[:,:,:,:,0],2,3)))/2
cv = np.reshape(c[:,:,:,:,0],-1,order='F')
fomval = np.real(2*cv @ l1 - cv @ l2 @ cv)
else:
relunc_fom = 0.1*imprecision
l1f = np.zeros((bdl,bdrp,bdl,bdrp,n-1),dtype=complex)
l2f = np.zeros((bdl,bdr,bdl,bdl,bdr,bdl,n-1),dtype=complex)
fom = np.array([])
iter_fom = 0
while True:
tensors = [c[:,:,:,:,n-1],b[:,:,:,:,n-1]]
legs = [[-1,-3,1,2],[-2,-4,2,1]]
l1f[:,:,:,:,n-2] = ncon(tensors,legs)
tensors = [c[:,:,:,:,n-1],a[:,:,:,:,n-1],c[:,:,:,:,n-1]]
legs = [[-1,-4,1,2],[-2,-5,2,3],[-3,-6,3,1]]
l2f[:,:,:,:,:,:,n-2] = ncon(tensors,legs)
for x in range(n-2,0,-1):
tensors = [c[:,:,:,:,x],b[:,:,:,:,x],l1f[:,:,:,:,x]]
legs = [[-1,3,1,2],[-2,4,2,1],[3,4,-3,-4]]
l1f[:,:,:,:,x-1] = ncon(tensors,legs)
tensors = [c[:,:,:,:,x],a[:,:,:,:,x],c[:,:,:,:,x],l2f[:,:,:,:,:,:,x]]
legs = [[-1,4,1,2],[-2,5,2,3],[-3,6,3,1],[4,5,6,-4,-5,-6]]
l2f[:,:,:,:,:,:,x-1] = ncon(tensors,legs)
tensors = [b[:,:,:,:,0],l1f[:,:,:,:,0]]
legs = [[2,1,-4,-3],[-2,1,-1,2]]
l1 = ncon(tensors,legs)
l1 = np.reshape(l1,-1,order='F')
tensors = [a[:,:,:,:,0],np.eye(d),l2f[:,:,:,:,:,:,0]]
legs = [[2,1,-4,-7],[-8,-3],[-2,1,-6,-1,2,-5]]
l2 = ncon(tensors,legs)
l2 = np.reshape(l2,(bdl*bdl*d*d,bdl*bdl*d*d),order='F')
dl2 = l2+l2.T
dl1 = 2*l1
dl2pinv = np.linalg.pinv(dl2,tol_fom)
dl2pinv = (dl2pinv+dl2pinv.T)/2
cv = dl2pinv @ dl1
c[:,:,:,:,0] = np.reshape(cv,(bdl,bdl,d,d),order='F')
if lherm:
c[:,:,:,:,0] = (c[:,:,:,:,0]+np.conj(np.moveaxis(c[:,:,:,:,0],2,3)))/2
cv = np.reshape(c[:,:,:,:,0],-1,order='F')
fom = np.append(fom,np.real(2*cv @ l1 - cv @ l2 @ cv))
tensors = [c[:,:,:,:,0],b[:,:,:,:,0]]
legs = [[-1,-3,1,2],[-2,-4,2,1]]
l1c = ncon(tensors,legs)
tensors = [c[:,:,:,:,0],a[:,:,:,:,0],c[:,:,:,:,0]]
legs = [[-1,-4,1,2],[-2,-5,2,3],[-3,-6,3,1]]
l2c = ncon(tensors,legs)
for x in range(1,n-1):
tensors = [l1c,b[:,:,:,:,x],l1f[:,:,:,:,x]]
legs = [[3,4,-1,1],[1,2,-4,-3],[-2,2,3,4]]
l1 = ncon(tensors,legs)
l1 = np.reshape(l1,-1,order='F')
tensors = [l2c,a[:,:,:,:,x],np.eye(d),l2f[:,:,:,:,:,:,x]]
legs = [[3,4,5,-1,1,-5],[1,2,-4,-7],[-8,-3],[-2,2,-6,3,4,5]]
l2 = ncon(tensors,legs)
l2 = np.reshape(l2,(bdl*bdl*d*d,bdl*bdl*d*d),order='F')
dl2 = l2+l2.T
dl1 = 2*l1
dl2pinv = np.linalg.pinv(dl2,tol_fom)
dl2pinv = (dl2pinv+dl2pinv.T)/2
cv = dl2pinv @ dl1
c[:,:,:,:,x] = np.reshape(cv,(bdl,bdl,d,d),order='F')
if lherm:
c[:,:,:,:,x] = (c[:,:,:,:,x]+np.conj(np.moveaxis(c[:,:,:,:,x],2,3)))/2
cv = np.reshape(c[:,:,:,:,x],-1,order='F')
fom = np.append(fom,np.real(2*cv @ l1 - cv @ l2 @ cv))
tensors = [l1c,c[:,:,:,:,x],b[:,:,:,:,x]]
legs = [[-1,-2,3,4],[3,-3,1,2],[4,-4,2,1]]
l1c = ncon(tensors,legs)
tensors = [l2c,c[:,:,:,:,x],a[:,:,:,:,x],c[:,:,:,:,x]]
legs = [[-1,-2,-3,4,5,6],[4,-4,1,2],[5,-5,2,3],[6,-6,3,1]]
l2c = ncon(tensors,legs)
tensors = [l1c,b[:,:,:,:,n-1]]
legs = [[-2,2,-1,1],[1,2,-4,-3]]
l1 = ncon(tensors,legs)
l1 = np.reshape(l1,-1,order='F')
tensors = [l2c,a[:,:,:,:,n-1],np.eye(d)]
legs = [[-2,2,-6,-1,1,-5],[1,2,-4,-7],[-8,-3]]
l2 = ncon(tensors,legs)
l2 = np.reshape(l2,(bdl*bdl*d*d,bdl*bdl*d*d),order='F')
dl2 = l2+l2.T
dl1 = 2*l1
dl2pinv = np.linalg.pinv(dl2,tol_fom)
dl2pinv = (dl2pinv+dl2pinv.T)/2
cv = dl2pinv @ dl1
c[:,:,:,:,n-1] = np.reshape(cv,(bdl,bdl,d,d),order='F')
if lherm:
c[:,:,:,:,n-1] = (c[:,:,:,:,n-1]+np.conj(np.moveaxis(c[:,:,:,:,n-1],2,3)))/2
cv = np.reshape(c[:,:,:,:,n-1],-1,order='F')
fom = np.append(fom,np.real(2*cv @ l1 - cv @ l2 @ cv))
iter_fom += 1
if iter_fom >= 2 and all(fom[-2*n:] > 0) and np.std(fom[-2*n:])/np.mean(fom[-2*n:]) <= relunc_fom:
break
fomval = fom[-1]
return fomval,c
def fin_FoMD_OBC_optm(c2d,cpd,a0,imprecision=10**-2):
"""
Optimization of FoMD over MPS for initial wave function. Function for finite size systems with OBC.
Parameters:
c2d: MPO for square of dual of SLD, expected list of length n of ndarrays of a shape (bd,bd,d,d) (bd can vary between sites)
cpd: MPO for dual of generalized derivative of SLD, expected list of length n of ndarrays of a shape (bd,bd,d,d) (bd can vary between sites)
a0: MPS for initial wave function, expected list of length n of ndarrays of a shape (bd,bd,d,d) (bd can vary between sites)
imprecision: expected imprecision of the end results, default value is 10**-2
Returns:
fomdval: optimal value of FoMD
a0: optimal MPS for initial wave function
"""
n = len(a0)
if n == 1:
if np.shape(c2d[0])[0] == 1 and np.shape(cpd[0])[0] == 1 and np.shape(a0[0])[0] == 1:
d = np.shape(a0[0])[2]
tensors = [c2d[0][0,0,:,:]]
legs = [[-1,-2]]
l2d = ncon(tensors,legs)
l2d = np.reshape(l2d,(d,d),order='F')
tensors = [cpd[0][0,0,:,:]]
legs = [[-1,-2]]
lpd = ncon(tensors,legs)
lpd = np.reshape(lpd,(d,d),order='F')
eiginput = 2*lpd-l2d
eiginput = (eiginput+np.conj(eiginput).T)/2
fomdval,a0v = np.linalg.eig(eiginput)
position = np.argmax(np.real(fomdval))
a0v = np.reshape(a0v[:,position],-1,order='F')
a0v = a0v/np.sqrt(np.abs(np.conj(a0v) @ a0v))
a0[0][0,0,:] = np.reshape(a0v,(d),order='F')
fomdval = np.real(fomdval[position])
else:
warnings.warn('Tensor networks with OBC and length one have to have bond dimension equal to one.')
else:
relunc_fomd = 0.1*imprecision
l2df = [0]*n
lpdf = [0]*n
fomd = np.array([])
iter_fomd = 0
while True:
tensors = [np.conj(a0[n-1]),c2d[n-1],a0[n-1]]
legs = [[-1,-4,1],[-2,-5,1,2],[-3,-6,2]]
l2df[n-2] = ncon(tensors,legs)
l2df[n-2] = l2df[n-2][:,:,:,0,0,0]
tensors = [np.conj(a0[n-1]),cpd[n-1],a0[n-1]]
legs = [[-1,-4,1],[-2,-5,1,2],[-3,-6,2]]
lpdf[n-2] = ncon(tensors,legs)
lpdf[n-2] = lpdf[n-2][:,:,:,0,0,0]
for x in range(n-2,0,-1):
tensors = [np.conj(a0[x]),c2d[x],a0[x],l2df[x]]
legs = [[-1,3,1],[-2,4,1,2],[-3,5,2],[3,4,5]]
l2df[x-1] = ncon(tensors,legs)
tensors = [np.conj(a0[x]),cpd[x],a0[x],lpdf[x]]
legs = [[-1,3,1],[-2,4,1,2],[-3,5,2],[3,4,5]]
lpdf[x-1] = ncon(tensors,legs)
bdpsi1,bdpsi2,d = np.shape(a0[0])
tensors = [c2d[0],l2df[0]]
legs = [[-7,1,-3,-6],[-2,1,-5]]
l2d = ncon(tensors,legs)
l2d = np.reshape(l2d,(bdpsi1*bdpsi2*d,bdpsi1*bdpsi2*d),order='F')
tensors = [cpd[0],lpdf[0]]
legs = [[-7,1,-3,-6],[-2,1,-5]]
lpd = ncon(tensors,legs)
lpd = np.reshape(lpd,(bdpsi1*bdpsi2*d,bdpsi1*bdpsi2*d),order='F')
eiginput = 2*lpd-l2d
eiginput = (eiginput+np.conj(eiginput).T)/2
fomdval,a0v = np.linalg.eig(eiginput)
position = np.argmax(np.real(fomdval))
a0v = np.reshape(a0v[:,position],-1,order='F')
a0v = a0v/np.sqrt(np.abs(np.conj(a0v) @ a0v))
a0[0] = np.reshape(a0v,(bdpsi1,bdpsi2,d),order='F')
fomd = np.append(fomd,np.real(fomdval[position]))
a0[0] = np.moveaxis(a0[0],2,0)
a0[0] = np.reshape(a0[0],(d*bdpsi1,bdpsi2),order='F')
u,s,vh = np.linalg.svd(a0[0],full_matrices=False)
a0[0] = np.reshape(u,(d,bdpsi1,np.shape(s)[0]),order='F')
a0[0] = np.moveaxis(a0[0],0,2)
tensors = [np.diag(s) @ vh,a0[1]]
legs = [[-1,1],[1,-2,-3]]
a0[1] = ncon(tensors,legs)
tensors = [np.conj(a0[0]),c2d[0],a0[0]]
legs = [[-4,-1,1],[-5,-2,1,2],[-6,-3,2]]
l2dc = ncon(tensors,legs)
l2dc = l2dc[:,:,:,0,0,0]
tensors = [np.conj(a0[0]),cpd[0],a0[0]]
legs = [[-4,-1,1],[-5,-2,1,2],[-6,-3,2]]
lpdc = ncon(tensors,legs)
lpdc = lpdc[:,:,:,0,0,0]
for x in range(1,n-1):
bdpsi1,bdpsi2,d = np.shape(a0[x])
tensors = [l2dc,c2d[x],l2df[x]]
legs = [[-1,1,-4],[1,2,-3,-6],[-2,2,-5]]
l2d = ncon(tensors,legs)
l2d = np.reshape(l2d,(bdpsi1*bdpsi2*d,bdpsi1*bdpsi2*d),order='F')
tensors = [lpdc,cpd[x],lpdf[x]]
legs = [[-1,1,-4],[1,2,-3,-6],[-2,2,-5]]
lpd = ncon(tensors,legs)
lpd = np.reshape(lpd,(bdpsi1*bdpsi2*d,bdpsi1*bdpsi2*d),order='F')
eiginput = 2*lpd-l2d
eiginput = (eiginput+np.conj(eiginput).T)/2
fomdval,a0v = np.linalg.eig(eiginput)
position = np.argmax(np.real(fomdval))
a0v = np.reshape(a0v[:,position],-1,order='F')
a0v = a0v/np.sqrt(np.abs(np.conj(a0v) @ a0v))
a0[x] = np.reshape(a0v,(bdpsi1,bdpsi2,d),order='F')
fomd = np.append(fomd,np.real(fomdval[position]))
a0[x] = np.moveaxis(a0[x],2,0)
a0[x] = np.reshape(a0[x],(d*bdpsi1,bdpsi2),order='F')
u,s,vh = np.linalg.svd(a0[x],full_matrices=False)
a0[x] = np.reshape(u,(d,bdpsi1,np.shape(s)[0]),order='F')
a0[x] = np.moveaxis(a0[x],0,2)
tensors = [np.diag(s) @ vh,a0[x+1]]
legs = [[-1,1],[1,-2,-3]]
a0[x+1] = ncon(tensors,legs)
tensors = [l2dc,np.conj(a0[x]),c2d[x],a0[x]]
legs = [[3,4,5],[3,-1,1],[4,-2,1,2],[5,-3,2]]
l2dc = ncon(tensors,legs)
tensors = [lpdc,np.conj(a0[x]),cpd[x],a0[x]]
legs = [[3,4,5],[3,-1,1],[4,-2,1,2],[5,-3,2]]
lpdc = ncon(tensors,legs)
bdpsi1,bdpsi2,d = np.shape(a0[n-1])
tensors = [l2dc,c2d[n-1]]
legs = [[-1,1,-4],[1,-7,-3,-6]]
l2d = ncon(tensors,legs)
l2d = np.reshape(l2d,(bdpsi1*bdpsi2*d,bdpsi1*bdpsi2*d),order='F')
tensors = [lpdc,cpd[n-1]]
legs = [[-1,1,-4],[1,-7,-3,-6]]
lpd = ncon(tensors,legs)
lpd = np.reshape(lpd,(bdpsi1*bdpsi2*d,bdpsi1*bdpsi2*d),order='F')
eiginput = 2*lpd-l2d
eiginput = (eiginput+np.conj(eiginput).T)/2
fomdval,a0v = np.linalg.eig(eiginput)
position = np.argmax(np.real(fomdval))
a0v = np.reshape(a0v[:,position],-1,order='F')
a0v = a0v/np.sqrt(np.abs(np.conj(a0v) @ a0v))
a0[n-1] = np.reshape(a0v,(bdpsi1,bdpsi2,d),order='F')
fomd = np.append(fomd,np.real(fomdval[position]))
iter_fomd += 1
for x in range(n-1,0,-1):
bdpsi1,bdpsi2,d = np.shape(a0[x])
a0[x] = np.moveaxis(a0[x],2,1)
a0[x] = np.reshape(a0[x],(bdpsi1,d*bdpsi2),order='F')
u,s,vh = np.linalg.svd(a0[x],full_matrices=False)
a0[x] = np.reshape(vh,(np.shape(s)[0],d,bdpsi2),order='F')
a0[x] = np.moveaxis(a0[x],1,2)
tensors = [a0[x-1],u @ np.diag(s)]
legs = [[-1,1,-3],[1,-2]]
a0[x-1] = ncon(tensors,legs)
if iter_fomd >= 2 and all(fomd[-2*n:] > 0) and np.std(fomd[-2*n:])/np.mean(fomd[-2*n:]) <= relunc_fomd:
break
fomdval = fomd[-1]
return fomdval,a0
def fin_FoMD_PBC_optm(c2d,cpd,a0,imprecision=10**-2):
"""
Optimization of FoMD over MPS for initial wave function. Function for finite size systems with PBC.
Parameters:
c2d: MPO for square of dual of SLD, expected ndarray of a shape (bd,bd,d,d,n)
cpd: MPO for dual of generalized derivative of SLD, expected ndarray of a shape (bd,bd,d,d,n)
a0: MPS for initial wave function, expected ndarray of a shape (bd,bd,d,n)
imprecision: expected imprecision of the end results, default value is 10**-2
Returns:
fomdval: optimal value of FoMD
a0: optimal MPS for initial wave function
"""
n = np.shape(c2d)[4]
d = np.shape(c2d)[2]
bdl2d = np.shape(c2d)[0]
bdlpd = np.shape(cpd)[0]
bdpsi = np.shape(a0)[0]
tol_fomd = 0.1*imprecision/n**2
if n == 1:
tensors = [c2d[:,:,:,:,0],np.eye(bdpsi),np.eye(bdpsi)]
legs = [[1,1,-3,-6],[-2,-1],[-5,-4]]
l2d = ncon(tensors,legs)
l2d = np.reshape(l2d,(bdpsi*bdpsi*d,bdpsi*bdpsi*d),order='F')
tensors = [cpd[:,:,:,:,0],np.eye(bdpsi),np.eye(bdpsi)]
legs = [[1,1,-3,-6],[-2,-1],[-5,-4]]
lpd = ncon(tensors,legs)
lpd = np.reshape(lpd,(bdpsi*bdpsi*d,bdpsi*bdpsi*d),order='F')
tensors = [np.eye(bdpsi),np.eye(bdpsi)]
legs = [[-2,-1],[-4,-3]]
psinorm = ncon(tensors,legs)
psinorm = np.reshape(psinorm,(bdpsi*bdpsi,bdpsi*bdpsi),order='F')
psinorm = (psinorm+np.conj(psinorm).T)/2
psinormpinv = np.linalg.pinv(psinorm,tol_fomd,hermitian=True)
psinormpinv = (psinormpinv+np.conj(psinormpinv).T)/2
psinormpinv = np.kron(np.eye(d),psinormpinv)
eiginput = 2*lpd-l2d
eiginput = (eiginput+np.conj(eiginput).T)/2
eiginput = psinormpinv @ eiginput
fomdval,a0v = np.linalg.eig(eiginput)
position = np.argmax(np.real(fomdval))
a0v = np.reshape(a0v[:,position],-1,order='F')
a0v = a0v/np.sqrt(np.abs(np.conj(a0v) @ np.kron(np.eye(d),psinorm) @ a0v))
a0[:,:,:,0] = np.reshape(a0v,(bdpsi,bdpsi,d),order='F')
fomdval = np.real(fomdval[position])
else:
relunc_fomd = 0.1*imprecision
l2df = np.zeros((bdpsi,bdl2d,bdpsi,bdpsi,bdl2d,bdpsi,n-1),dtype=complex)
lpdf = np.zeros((bdpsi,bdlpd,bdpsi,bdpsi,bdlpd,bdpsi,n-1),dtype=complex)
psinormf = np.zeros((bdpsi,bdpsi,bdpsi,bdpsi,n-1),dtype=complex)
fomd = np.array([])
iter_fomd = 0
while True:
tensors = [np.conj(a0[:,:,:,n-1]),c2d[:,:,:,:,n-1],a0[:,:,:,n-1]]
legs = [[-1,-4,1],[-2,-5,1,2],[-3,-6,2]]
l2df[:,:,:,:,:,:,n-2] = ncon(tensors,legs)
tensors = [np.conj(a0[:,:,:,n-1]),cpd[:,:,:,:,n-1],a0[:,:,:,n-1]]
legs = [[-1,-4,1],[-2,-5,1,2],[-3,-6,2]]
lpdf[:,:,:,:,:,:,n-2] = ncon(tensors,legs)
tensors = [np.conj(a0[:,:,:,n-1]),a0[:,:,:,n-1]]
legs = [[-1,-3,1],[-2,-4,1]]
psinormf[:,:,:,:,n-2] = ncon(tensors,legs)
for x in range(n-2,0,-1):
tensors = [np.conj(a0[:,:,:,x]),c2d[:,:,:,:,x],a0[:,:,:,x],l2df[:,:,:,:,:,:,x]]
legs = [[-1,3,1],[-2,4,1,2],[-3,5,2],[3,4,5,-4,-5,-6]]
l2df[:,:,:,:,:,:,x-1] = ncon(tensors,legs)
tensors = [np.conj(a0[:,:,:,x]),cpd[:,:,:,:,x],a0[:,:,:,x],lpdf[:,:,:,:,:,:,x]]
legs = [[-1,3,1],[-2,4,1,2],[-3,5,2],[3,4,5,-4,-5,-6]]
lpdf[:,:,:,:,:,:,x-1] = ncon(tensors,legs)
tensors = [np.conj(a0[:,:,:,x]),a0[:,:,:,x],psinormf[:,:,:,:,x]]
legs = [[-1,2,1],[-2,3,1],[2,3,-3,-4]]
psinormf[:,:,:,:,x-1] = ncon(tensors,legs)
tensors = [c2d[:,:,:,:,0],l2df[:,:,:,:,:,:,0]]
legs = [[2,1,-3,-6],[-2,1,-5,-1,2,-4]]
l2d = ncon(tensors,legs)
l2d = np.reshape(l2d,(bdpsi*bdpsi*d,bdpsi*bdpsi*d),order='F')
tensors = [cpd[:,:,:,:,0],lpdf[:,:,:,:,:,:,0]]
legs = [[2,1,-3,-6],[-2,1,-5,-1,2,-4]]
lpd = ncon(tensors,legs)
lpd = np.reshape(lpd,(bdpsi*bdpsi*d,bdpsi*bdpsi*d),order='F')
tensors = [psinormf[:,:,:,:,0]]
legs = [[-2,-4,-1,-3]]
psinorm = ncon(tensors,legs)
psinorm = np.reshape(psinorm,(bdpsi*bdpsi,bdpsi*bdpsi),order='F')
psinorm = (psinorm+np.conj(psinorm).T)/2
psinormpinv = np.linalg.pinv(psinorm,tol_fomd,hermitian=True)
psinormpinv = (psinormpinv+np.conj(psinormpinv).T)/2
psinormpinv = np.kron(np.eye(d),psinormpinv)
eiginput = 2*lpd-l2d
eiginput = (eiginput+np.conj(eiginput).T)/2
eiginput = psinormpinv @ eiginput
fomdval,a0v = np.linalg.eig(eiginput)
position = np.argmax(np.real(fomdval))
a0v = np.reshape(a0v[:,position],-1,order='F')
a0v = a0v/np.sqrt(np.abs(np.conj(a0v) @ np.kron(np.eye(d),psinorm) @ a0v))
a0[:,:,:,0] = np.reshape(a0v,(bdpsi,bdpsi,d),order='F')
fomd = np.append(fomd,np.real(fomdval[position]))
tensors = [np.conj(a0[:,:,:,0]),c2d[:,:,:,:,0],a0[:,:,:,0]]
legs = [[-1,-4,1],[-2,-5,1,2],[-3,-6,2]]
l2dc = ncon(tensors,legs)
tensors = [np.conj(a0[:,:,:,0]),cpd[:,:,:,:,0],a0[:,:,:,0]]
legs = [[-1,-4,1],[-2,-5,1,2],[-3,-6,2]]
lpdc = ncon(tensors,legs)
tensors = [np.conj(a0[:,:,:,0]),a0[:,:,:,0]]
legs = [[-1,-3,1],[-2,-4,1]]
psinormc = ncon(tensors,legs)
for x in range(1,n-1):
tensors = [l2dc,c2d[:,:,:,:,x],l2df[:,:,:,:,:,:,x]]
legs = [[3,4,5,-1,1,-4],[1,2,-3,-6],[-2,2,-5,3,4,5]]
l2d = ncon(tensors,legs)
l2d = np.reshape(l2d,(bdpsi*bdpsi*d,bdpsi*bdpsi*d),order='F')
tensors = [lpdc,cpd[:,:,:,:,x],lpdf[:,:,:,:,:,:,x]]
legs = [[3,4,5,-1,1,-4],[1,2,-3,-6],[-2,2,-5,3,4,5]]
lpd = ncon(tensors,legs)
lpd = np.reshape(lpd,(bdpsi*bdpsi*d,bdpsi*bdpsi*d),order='F')
tensors = [psinormc,psinormf[:,:,:,:,x]]
legs = [[1,2,-1,-3],[-2,-4,1,2]]
psinorm = ncon(tensors,legs)
psinorm = np.reshape(psinorm,(bdpsi*bdpsi,bdpsi*bdpsi),order='F')
psinorm = (psinorm+np.conj(psinorm).T)/2
psinormpinv = np.linalg.pinv(psinorm,tol_fomd,hermitian=True)
psinormpinv = (psinormpinv+np.conj(psinormpinv).T)/2
psinormpinv = np.kron(np.eye(d),psinormpinv)
eiginput = 2*lpd-l2d
eiginput = (eiginput+np.conj(eiginput).T)/2
eiginput = psinormpinv @ eiginput
fomdval,a0v = np.linalg.eig(eiginput)
position = np.argmax(np.real(fomdval))
a0v = np.reshape(a0v[:,position],-1,order='F')
a0v = a0v/np.sqrt(np.abs(np.conj(a0v) @ np.kron(np.eye(d),psinorm) @ a0v))
a0[:,:,:,x] = np.reshape(a0v,(bdpsi,bdpsi,d),order='F')
fomd = np.append(fomd,np.real(fomdval[position]))
tensors = [l2dc,np.conj(a0[:,:,:,x]),c2d[:,:,:,:,x],a0[:,:,:,x]]
legs = [[-1,-2,-3,3,4,5],[3,-4,1],[4,-5,1,2],[5,-6,2]]
l2dc = ncon(tensors,legs)
tensors = [lpdc,np.conj(a0[:,:,:,x]),cpd[:,:,:,:,x],a0[:,:,:,x]]
legs = [[-1,-2,-3,3,4,5],[3,-4,1],[4,-5,1,2],[5,-6,2]]
lpdc = ncon(tensors,legs)
tensors = [psinormc,np.conj(a0[:,:,:,x]),a0[:,:,:,x]]
legs = [[-1,-2,2,3],[2,-3,1],[3,-4,1]]
psinormc = ncon(tensors,legs)
tensors = [l2dc,c2d[:,:,:,:,n-1]]
legs = [[-2,2,-5,-1,1,-4],[1,2,-3,-6]]
l2d = ncon(tensors,legs)
l2d = np.reshape(l2d,(bdpsi*bdpsi*d,bdpsi*bdpsi*d),order='F')
tensors = [lpdc,cpd[:,:,:,:,n-1]]
legs = [[-2,2,-5,-1,1,-4],[1,2,-3,-6]]
lpd = ncon(tensors,legs)
lpd = np.reshape(lpd,(bdpsi*bdpsi*d,bdpsi*bdpsi*d),order='F')
tensors = [psinormc]
legs = [[-2,-4,-1,-3]]
psinorm = ncon(tensors,legs)
psinorm = np.reshape(psinorm,(bdpsi*bdpsi,bdpsi*bdpsi),order='F')
psinorm = (psinorm+np.conj(psinorm).T)/2
psinormpinv = np.linalg.pinv(psinorm,tol_fomd,hermitian=True)
psinormpinv = (psinormpinv+np.conj(psinormpinv).T)/2
psinormpinv = np.kron(np.eye(d),psinormpinv)
eiginput = 2*lpd-l2d
eiginput = (eiginput+np.conj(eiginput).T)/2
eiginput = psinormpinv @ eiginput
fomdval,a0v = np.linalg.eig(eiginput)
position = np.argmax(np.real(fomdval))
a0v = np.reshape(a0v[:,position],-1,order='F')
a0v = a0v/np.sqrt(np.abs(np.conj(a0v) @ np.kron(np.eye(d),psinorm) @ a0v))
a0[:,:,:,n-1] = np.reshape(a0v,(bdpsi,bdpsi,d),order='F')
fomd = np.append(fomd,np.real(fomdval[position]))
iter_fomd += 1
if iter_fomd >= 2 and all(fomd[-2*n:] > 0) and np.std(fomd[-2*n:])/np.mean(fomd[-2*n:]) <= relunc_fomd:
break
fomdval = fomd[-1]
return fomdval,a0
def fin_FoM_OBC_val(a,b,c):
"""
Calculate the value of FoM. Function for finite size systems with OBC.
Parameters:
a: MPO for density matrix, expected list of length n of ndarrays of a shape (bd,bd,d,d) (bd can vary between sites)
b: MPO for generalized derivative of density matrix, expected list of length n of ndarrays of a shape (bd,bd,d,d) (bd can vary between sites)
c: MPO for SLD, expected list of length n of ndarrays of a shape (bd,bd,d,d) (bd can vary between sites)
Returns:
fomval: value of FoM
"""
n = len(c)
if n == 1:
if np.shape(a[0])[0] == 1 and np.shape(b[0])[0] == 1 and np.shape(c[0])[0] == 1:
tensors = [c[0][0,0,:,:],b[0][0:,0,:,:]]
legs = [[1,2],[2,1]]
l1 = ncon(tensors,legs)
tensors = [c[0][0,0,:,:],[0][0,0,:,:],[0][0,0,:,:]]
legs = [[1,2],[2,3],[3,1]]
l2 = ncon(tensors,legs)
fomval = 2*l1-l2
else:
warnings.warn('Tensor networks with OBC and length one have to have bond dimension equal to one.')
else:
tensors = [c[n-1],b[n-1]]
legs = [[-1,-3,1,2],[-2,-4,2,1]]
l1 = ncon(tensors,legs)
l1 = l1[:,:,0,0]
tensors = [c[n-1],a[n-1],c[n-1]]
legs = [[-1,-4,1,2],[-2,-5,2,3],[-3,-6,3,1]]
l2 = ncon(tensors,legs)
l2 = l2[:,:,:,0,0,0]
for x in range(n-2,0,-1):
tensors = [c[x],b[x],l1]
legs = [[-1,3,1,2],[-2,4,2,1],[3,4]]
l1 = ncon(tensors,legs)
tensors = [c[x],a[x],c[x],l2]
legs = [[-1,4,1,2],[-2,5,2,3],[-3,6,3,1],[4,5,6]]
l2 = ncon(tensors,legs)
tensors = [c[0],b[0],l1]
legs = [[-1,3,1,2],[-2,4,2,1],[3,4]]
l1 = ncon(tensors,legs)
l1 = float(l1)
tensors = [c[0],a[0],c[0],l2]
legs = [[-1,4,1,2],[-2,5,2,3],[-3,6,3,1],[4,5,6]]
l2 = ncon(tensors,legs)
l2 = float(l2)
fomval = 2*l1-l2
return fomval
def fin_FoM_PBC_val(a,b,c):
"""
Calculate the value of FoM. Function for finite size systems with PBC.
Parameters:
a: MPO for a density matrix, expected ndarray of a shape (bd,bd,d,d,n)
b: MPO for generalized derivative of a density matrix, expected ndarray of a shape (bd,bd,d,d,n)
c: MPO for the SLD, expected ndarray of a shape (bd,bd,d,d,n)
Returns:
fomval: value of FoM
"""
n = np.shape(a)[4]
if n == 1:
tensors = [c[:,:,:,:,0],b[:,:,:,:,0]]
legs = [[3,3,1,2],[4,4,2,1]]
l1 = ncon(tensors,legs)
tensors = [c[:,:,:,:,0],a[:,:,:,:,0],c[:,:,:,:,0]]
legs = [[4,4,1,2],[5,5,2,3],[6,6,3,1]]
l2 = ncon(tensors,legs)
fomval = 2*l1-l2
else:
tensors = [c[:,:,:,:,n-1],b[:,:,:,:,n-1]]
legs = [[-1,-3,1,2],[-2,-4,2,1]]
l1 = ncon(tensors,legs)
tensors = [c[:,:,:,:,n-1],a[:,:,:,:,n-1],c[:,:,:,:,n-1]]
legs = [[-1,-4,1,2],[-2,-5,2,3],[-3,-6,3,1]]
l2 = ncon(tensors,legs)
for x in range(n-2,0,-1):
tensors = [c[:,:,:,:,x],b[:,:,:,:,x],l1]
legs = [[-1,3,1,2],[-2,4,2,1],[3,4,-3,-4]]
l1 = ncon(tensors,legs)
tensors = [c[:,:,:,:,x],a[:,:,:,:,x],c[:,:,:,:,x],l2]
legs = [[-1,4,1,2],[-2,5,2,3],[-3,6,3,1],[4,5,6,-4,-5,-6]]
l2 = ncon(tensors,legs)
tensors = [c[:,:,:,:,0],b[:,:,:,:,0],l1]
legs = [[5,3,1,2],[6,4,2,1],[3,4,5,6]]
l1 = ncon(tensors,legs)
tensors = [c[:,:,:,:,0],a[:,:,:,:,0],c[:,:,:,:,0],l2]
legs = [[7,4,1,2],[8,5,2,3],[9,6,3,1],[4,5,6,7,8,9]]
l2 = ncon(tensors,legs)
fomval = 2*l1-l2
return fomval
def fin_FoMD_OBC_val(c2d,cpd,a0):
"""
Calculate value of FoMD. Function for finite size systems with OBC.
Parameters:
c2d: MPO for square of dual of SLD, expected list of length n of ndarrays of a shape (bd,bd,d,d) (bd can vary between sites)
cpd: MPO for dual of generalized derivative of SLD, expected list of length n of ndarrays of a shape (bd,bd,d,d) (bd can vary between sites)
a0: MPS for initial wave function, expected list of length n of ndarrays of a shape (bd,bd,d,d) (bd can vary between sites)
Returns:
fomdval: value of FoMD
"""
n = len(a0)
if n == 1:
if np.shape(c2d[0])[0] == 1 and np.shape(cpd[0])[0] == 1 and np.shape(a0[0])[0] == 1:
tensors = [np.conj(a0[0][0,0,:]),c2d[0][0,0,:,:],a0[0][0,0,:]]
legs = [[1],[1,2],[2]]
l2d = ncon(tensors,legs)
tensors = [np.conj(a0[0][0,0,:]),cpd[0][0,0,:,:],a0[0][0,0,:]]
legs = [[1],[1,2],[2]]
lpd = ncon(tensors,legs)
fomdval = 2*lpd-l2d
else:
warnings.warn('Tensor networks with OBC and length one have to have bond dimension equal to one.')
else:
tensors = [np.conj(a0[n-1]),c2d[n-1],a0[n-1]]
legs = [[-1,-4,1],[-2,-5,1,2],[-3,-6,2]]
l2d = ncon(tensors,legs)
l2d = l2d[:,:,:,0,0,0]
tensors = [np.conj(a0[n-1]),cpd[n-1],a0[n-1]]
legs = [[-1,-4,1],[-2,-5,1,2],[-3,-6,2]]
lpd = ncon(tensors,legs)
lpd = lpd[:,:,:,0,0,0]
for x in range(n-2,0,-1):
tensors = [np.conj(a0[x]),c2d[x],a0[x],l2d]
legs = [[-1,3,1],[-2,4,1,2],[-3,5,2],[3,4,5]]
l2d = ncon(tensors,legs)
tensors = [np.conj(a0[x]),cpd[x],a0[x],lpd]
legs = [[-1,3,1],[-2,4,1,2],[-3,5,2],[3,4,5]]
lpd = ncon(tensors,legs)
tensors = [np.conj(a0[0]),c2d[0],a0[0],l2d]
legs = [[-1,3,1],[-2,4,1,2],[-3,5,2],[3,4,5]]
l2d = ncon(tensors,legs)
l2d = float(l2d)
tensors = [np.conj(a0[0]),cpd[0],a0[0],lpd]
legs = [[-1,3,1],[-2,4,1,2],[-3,5,2],[3,4,5]]
lpd = ncon(tensors,legs)
lpd = float(lpd)
fomdval = 2*lpd-l2d
return fomdval
def fin_FoMD_PBC_val(c2d,cpd,a0):
"""
Calculate the value of FoMD. Function for finite size systems with PBC.
Parameters:
c2d: MPO for square of dual of the SLD, expected ndarray of a shape (bd,bd,d,d,n)
cpd: MPO for dual of generalized derivative of the SLD, expected ndarray of a shape (bd,bd,d,d,n)
a0: MPS for the initial wave function, expected ndarray of a shape (bd,bd,d,n)
Returns:
fomdval: value of FoMD
"""
n = np.shape(c2d)[4]
if n == 1:
tensors = [np.conj(a0[:,:,:,0]),c2d[:,:,:,:,0],a0[:,:,:,0]]
legs = [[3,3,1],[4,4,1,2],[5,5,2]]
l2d = ncon(tensors,legs)
tensors = [np.conj(a0[:,:,:,0]),cpd[:,:,:,:,0],a0[:,:,:,0]]
legs = [[3,3,1],[4,4,1,2],[5,5,2]]
lpd = ncon(tensors,legs)
fomdval = 2*lpd-l2d
else:
tensors = [np.conj(a0[:,:,:,n-1]),c2d[:,:,:,:,n-1],a0[:,:,:,n-1]]
legs = [[-1,-4,1],[-2,-5,1,2],[-3,-6,2]]
l2d = ncon(tensors,legs)
tensors = [np.conj(a0[:,:,:,n-1]),cpd[:,:,:,:,n-1],a0[:,:,:,n-1]]
legs = [[-1,-4,1],[-2,-5,1,2],[-3,-6,2]]
lpd = ncon(tensors,legs)
for x in range(n-2,0,-1):
tensors = [np.conj(a0[:,:,:,x]),c2d[:,:,:,:,x],a0[:,:,:,x],l2d]
legs = [[-1,3,1],[-2,4,1,2],[-3,5,2],[3,4,5,-4,-5,-6]]
l2d = ncon(tensors,legs)
tensors = [np.conj(a0[:,:,:,x]),cpd[:,:,:,:,x],a0[:,:,:,x],lpd]
legs = [[-1,3,1],[-2,4,1,2],[-3,5,2],[3,4,5,-4,-5,-6]]
lpd = ncon(tensors,legs)
tensors = [np.conj(a0[:,:,:,0]),c2d[:,:,:,:,0],a0[:,:,:,0],l2d]
legs = [[6,3,1],[7,4,1,2],[8,5,2],[3,4,5,6,7,8]]
l2d = ncon(tensors,legs)
tensors = [np.conj(a0[:,:,:,0]),cpd[:,:,:,:,0],a0[:,:,:,0],lpd]
legs = [[6,3,1],[7,4,1,2],[8,5,2],[3,4,5,6,7,8]]
lpd = ncon(tensors,legs)
fomdval = 2*lpd-l2d
return fomdval
#################################################################
# 1.2.2 Problems with discrete approximation of the derivative. #
#################################################################
def fin2_FoM_FoMD_optbd(n,d,bc,ch,chp,epsilon,cini=None,a0ini=None,imprecision=10**-2,bdlmax=100,alwaysbdlmax=False,lherm=True,bdpsimax=100,alwaysbdpsimax=False):
"""
Iterative optimization of FoM/FoMD over SLD MPO and initial wave function MPS and also a check of convergence with increasing bond dimensions. Function for finite size systems. Version with two channels separated by epsilon.
Parameters:
n: number of sites in TN
d: dimension of the local Hilbert space (dimension of the physical index)
bc: boundary conditions, 'O' for OBC, 'P' for PBC
ch: MPO for a quantum channel at the value of estimated parameter phi=phi_0, expected list of length n of ndarrays of a shape (bd,bd,d**2,d**2) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d**2,d**2,n) for PBC
chp: MPO for a quantum channel at the value of estimated parameter phi=phi_0+epsilon, expected list of length n of ndarrays of a shape (bd,bd,d**2,d**2) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d**2,d**2,n) for PBC
epsilon: value of a separation between estimated parameters encoded in ch and chp, float
cini: initial MPO for the SLD, expected list of length n of ndarrays of a shape (bd,bd,d,d) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d,d,n) for PBC
a0ini: initial MPS for the initial wave function, expected list of length n of ndarrays of a shape (bd,bd,d) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d,n) for PBC
imprecision: expected imprecision of the end results, default value is 10**-2
bdlmax: maximal value of bd for SLD MPO, default value is 100
alwaysbdlmax: boolean value, True if the maximal value of bd for SLD MPO has to be reached, otherwise False (default value)
lherm: boolean value, True (default value) when Hermitian gauge is imposed on SLD MPO, otherwise False
bdpsimax: maximal value of bd for the initial wave function MPS, default value is 100
alwaysbdpsimax: boolean value, True if the maximal value of bd for initial wave function MPS has to be reached, otherwise False (default value)
Returns:
result: optimal value of FoM/FoMD
resultm: matrix describing FoM/FoMD as a function of bd of respectively SLD MPO [rows] and the initial wave function MPS [columns]
c: optimal MPO for SLD
a0: optimal MPS for initial wave function
"""
while True:
if a0ini is None:
bdpsi = 1
a0 = np.zeros(d,dtype=complex)
for i in range(d):
a0[i] = np.sqrt(math.comb(d-1,i))*2**(-(d-1)/2) # prod
# a0[i] = np.sqrt(2/(d+1))*np.sin((1+i)*np.pi/(d+1)) # sine
if bc == 'O':
a0 = a0[np.newaxis,np.newaxis,:]
a0 = [a0]*n
elif bc == 'P':
a0 = a0[np.newaxis,np.newaxis,:,np.newaxis]
a0 = np.tile(a0,(1,1,1,n))
else:
a0 = a0ini
if bc == 'O':
bdpsi = max([np.shape(a0[i])[0] for i in range(n)])
a0 = [a0[i].astype(complex) for i in range(n)]
elif bc == 'P':
bdpsi = np.shape(a0)[0]
a0 = a0.astype(complex)
if cini is None:
bdl = 1
rng = np.random.default_rng()
if bc == 'O':
c = [0]*n
c[0] = (rng.random((1,bdl,d,d)) + 1j*rng.random((1,bdl,d,d)))/bdl
c[0] = (c[0] + np.conj(np.moveaxis(c[0],2,3)))/2
for x in range(1,n-1):
c[x] = (rng.random((bdl,bdl,d,d)) + 1j*rng.random((bdl,bdl,d,d)))/bdl
c[x] = (c[x] + np.conj(np.moveaxis(c[x],2,3)))/2
c[n-1] = (rng.random((bdl,1,d,d)) + 1j*rng.random((bdl,1,d,d)))/bdl
c[n-1] = (c[n-1] + np.conj(np.moveaxis(c[n-1],2,3)))/2
elif bc == 'P':
c = (rng.random((bdl,bdl,d,d,n)) + 1j*rng.random((bdl,bdl,d,d,n)))/bdl
c = (c + np.conj(np.moveaxis(c,2,3)))/2
else:
c = cini
if bc == 'O':
bdl = max([np.shape(c[i])[0] for i in range(n)])
c = [c[i].astype(complex) for i in range(n)]
elif bc == 'P':
bdl = np.shape(c)[0]
c = c.astype(complex)
resultm = np.zeros((bdlmax,bdpsimax),dtype=float)
resultm[bdl-1,bdpsi-1],c,a0 = fin2_FoM_FoMD_optm(n,d,bc,c,a0,ch,chp,epsilon,imprecision,lherm)
if bc == 'O' and n == 1:
resultm = resultm[0:bdl,0:bdpsi]
result = resultm[bdl-1,bdpsi-1]
return result,resultm,c,a0
factorv = np.array([0.5,0.25,0.1,1,0.01])
problem = False
while True:
while True:
if bdpsi == bdpsimax:
break
else:
a0old = a0
bdpsi += 1
i = 0
while True:
a0 = fin_enlarge_bdpsi(a0,factorv[i])
resultm[bdl-1,bdpsi-1],cnew,a0new = fin2_FoM_FoMD_optm(n,d,bc,c,a0,ch,chp,epsilon,imprecision,lherm)
if resultm[bdl-1,bdpsi-1] >= resultm[bdl-1,bdpsi-2]:
break
i += 1
if i == np.size(factorv):
problem = True
break
if problem:
break
if not(alwaysbdpsimax) and resultm[bdl-1,bdpsi-1] < (1+imprecision)*resultm[bdl-1,bdpsi-2]:
bdpsi += -1
a0 = a0old
a0copy = a0new
ccopy = cnew
break
else:
a0 = a0new
c = cnew
if problem:
break
if bdl == bdlmax:
if bdpsi == bdpsimax:
resultm = resultm[0:bdl,0:bdpsi]
result = resultm[bdl-1,bdpsi-1]
else:
a0 = a0copy
c = ccopy
resultm = resultm[0:bdl,0:bdpsi+1]
result = resultm[bdl-1,bdpsi]
break
else:
bdl += 1
i = 0
while True:
c = fin_enlarge_bdl(c,factorv[i])
resultm[bdl-1,bdpsi-1],cnew,a0new = fin2_FoM_FoMD_optm(n,d,bc,c,a0,ch,chp,epsilon,imprecision,lherm)
if resultm[bdl-1,bdpsi-1] >= resultm[bdl-2,bdpsi-1]:
a0 = a0new
c = cnew
break
i += 1
if i == np.size(factorv):
problem = True
break
if problem:
break
if not(alwaysbdlmax) and resultm[bdl-1,bdpsi-1] < (1+imprecision)*resultm[bdl-2,bdpsi-1]:
if bdpsi == bdpsimax:
resultm = resultm[0:bdl,0:bdpsi]
result = resultm[bdl-1,bdpsi-1]
else:
if resultm[bdl-1,bdpsi-1] < resultm[bdl-2,bdpsi]:
a0 = a0copy
c = ccopy
resultm = resultm[0:bdl,0:bdpsi+1]
bdl += -1
bdpsi += 1
result = resultm[bdl-1,bdpsi-1]
else:
resultm = resultm[0:bdl,0:bdpsi+1]
result = resultm[bdl-1,bdpsi-1]
break
if not(problem):
break
return result,resultm,c,a0
def fin2_FoM_optbd(n,d,bc,a,b,epsilon,cini=None,imprecision=10**-2,bdlmax=100,alwaysbdlmax=False,lherm=True):
"""
Optimization of FoM over SLD MPO and also check of convergence in bond dimension. Function for finite size systems. Version with two states separated by epsilon.
Parameters:
n: number of sites in TN
d: dimension of local Hilbert space (dimension of physical index)
bc: boundary conditions, 'O' for OBC, 'P' for PBC
a: MPO for the density matrix at the value of estimated parameter phi=phi_0, expected list of length n of ndarrays of a shape (bd,bd,d,d) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d,d,n) for PBC
b: MPO for the density matrix at the value of estimated parameter phi=phi_0+epsilon, expected list of length n of ndarrays of a shape (bd,bd,d,d) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d,d,n) for PBC
epsilon: value of a separation between estimated parameters encoded in a and b, float
cini: initial MPO for SLD, expected list of length n of ndarrays of a shape (bd,bd,d,d) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d,d,n) for PBC
imprecision: expected imprecision of the end results, default value is 10**-2
bdlmax: maximal value of bd for SLD MPO, default value is 100
alwaysbdlmax: boolean value, True if maximal value of bd for SLD MPO have to be reached, otherwise False (default value)
lherm: boolean value, True (default value) when Hermitian gauge is imposed on SLD MPO, otherwise False
Returns:
result: optimal value of FoM
resultv: vector describing FoM as a function of bd of the SLD MPO
c: optimal MPO for SLD
"""
while True:
if cini is None:
bdl = 1
rng = np.random.default_rng()
if bc == 'O':
c = [0]*n
c[0] = (rng.random((1,bdl,d,d)) + 1j*rng.random((1,bdl,d,d)))/bdl
c[0] = (c[0] + np.conj(np.moveaxis(c[0],2,3)))/2
for x in range(1,n-1):
c[x] = (rng.random((bdl,bdl,d,d)) + 1j*rng.random((bdl,bdl,d,d)))/bdl
c[x] = (c[x] + np.conj(np.moveaxis(c[x],2,3)))/2
c[n-1] = (rng.random((bdl,1,d,d)) + 1j*rng.random((bdl,1,d,d)))/bdl
c[n-1] = (c[n-1] + np.conj(np.moveaxis(c[n-1],2,3)))/2
elif bc == 'P':
c = (rng.random((bdl,bdl,d,d,n)) + 1j*rng.random((bdl,bdl,d,d,n)))/bdl
c = (c + np.conj(np.moveaxis(c,2,3)))/2
else:
c = cini
if bc == 'O':
bdl = max([np.shape(c[i])[0] for i in range(n)])
c = [c[i].astype(complex) for i in range(n)]
elif bc == 'P':
bdl = np.shape(c)[0]
c = c.astype(complex)
resultv = np.zeros(bdlmax,dtype=float)
if bc == 'O':
resultv[bdl-1],c = fin2_FoM_OBC_optm(a,b,epsilon,c,imprecision,lherm)
if n == 1:
resultv = resultv[0:bdl]
result = resultv[bdl-1]
return result,resultv,c
elif bc == 'P':
resultv[bdl-1],c = fin2_FoM_PBC_optm(a,b,epsilon,c,imprecision,lherm)
factorv = np.array([0.5,0.25,0.1,1,0.01])
problem = False
while True:
if bdl == bdlmax:
resultv = resultv[0:bdl]
result = resultv[bdl-1]
break
else:
bdl += 1
i = 0
while True:
c = fin_enlarge_bdl(c,factorv[i])
if bc == 'O':
resultv[bdl-1],cnew = fin2_FoM_OBC_optm(a,b,epsilon,c,imprecision,lherm)
elif bc == 'P':
resultv[bdl-1],cnew = fin2_FoM_PBC_optm(a,b,epsilon,c,imprecision,lherm)
if resultv[bdl-1] >= resultv[bdl-2]:
c = cnew
break
i += 1
if i == np.size(factorv):
problem = True
break
if problem:
break
if not(alwaysbdlmax) and resultv[bdl-1] < (1+imprecision)*resultv[bdl-2]:
resultv = resultv[0:bdl]
result = resultv[bdl-1]
break
if not(problem):
break
return result,resultv,c
def fin2_FoMD_optbd(n,d,bc,c2d,cd,cpd,epsilon,a0ini=None,imprecision=10**-2,bdpsimax=100,alwaysbdpsimax=False):
"""
Optimization of FoMD over initial wave function MPS and also check of convergence when increasing the bond dimension. Function for finite size systems. Version with two dual SLDs separated by epsilon.
Parameters:
n: number of sites in TN
d: dimension of local Hilbert space (dimension of physical index)
bc: boundary conditions, 'O' for OBC, 'P' for PBC
c2d: MPO for square of dual of SLD at the value of estimated parameter phi=-phi_0, expected list of length n of ndarrays of a shape (bd,bd,d,d) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d,d,n) for PBC
cd: MPO for dual of SLD at the value of estimated parameter phi=-phi_0, expected list of length n of ndarrays of a shape (bd,bd,d,d) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d,d,n) for PBC
cpd: MPO for dual of SLD at the value of estimated parameter phi=-(phi_0+epsilon), expected list of length n of ndarrays of a shape (bd,bd,d,d) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d,d,n) for PBC
epsilon: value of a separation between estimated parameters encoded in cd and cpd, float
a0ini: initial MPS for initial wave function, expected list of length n of ndarrays of a shape (bd,bd,d) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d,n) for PBC
imprecision: expected imprecision of the end results, default value is 10**-2
bdpsimax: maximal value of bd for initial wave function MPS, default value is 100
alwaysbdpsimax: boolean value, True if maximal value of bd for initial wave function MPS have to be reached, otherwise False (default value)
Returns:
result: optimal value of FoMD
resultv: vector describing FoMD in function of bd of initial wave function MPS
a0: optimal MPS for initial wave function
"""
while True:
if a0ini is None:
bdpsi = 1
a0 = np.zeros(d,dtype=complex)
for i in range(d):
a0[i] = np.sqrt(math.comb(d-1,i))*2**(-(d-1)/2) # prod
# a0[i] = np.sqrt(2/(d+1))*np.sin((1+i)*np.pi/(d+1)) # sine
if bc == 'O':
a0 = a0[np.newaxis,np.newaxis,:]
a0 = [a0]*n
elif bc == 'P':
a0 = a0[np.newaxis,np.newaxis,:,np.newaxis]
a0 = np.tile(a0,(1,1,1,n))
else:
a0 = a0ini
if bc == 'O':
bdpsi = max([np.shape(a0[i])[0] for i in range(n)])
a0 = [a0[i].astype(complex) for i in range(n)]
elif bc == 'P':
bdpsi = np.shape(a0)[0]
a0 = a0.astype(complex)
resultv = np.zeros(bdpsimax,dtype=float)
if bc == 'O':
resultv[bdpsi-1],a0 = fin2_FoMD_OBC_optm(c2d,cd,cpd,epsilon,a0,imprecision)
if n == 1:
resultv = resultv[0:bdpsi]
result = resultv[bdpsi-1]
return result,resultv,a0
elif bc == 'P':
resultv[bdpsi-1],a0 = fin2_FoMD_PBC_optm(c2d,cd,cpd,epsilon,a0,imprecision)
factorv = np.array([0.5,0.25,0.1,1,0.01])
problem = False
while True:
if bdpsi == bdpsimax:
resultv = resultv[0:bdpsi]
result = resultv[bdpsi-1]
break
else:
bdpsi += 1
i = 0
while True:
a0 = fin_enlarge_bdpsi(a0,factorv[i])
if bc == 'O':
resultv[bdpsi-1],a0new = fin2_FoMD_OBC_optm(c2d,cd,cpd,epsilon,a0,imprecision)
elif bc == 'P':
resultv[bdpsi-1],a0new = fin2_FoMD_PBC_optm(c2d,cd,cpd,epsilon,a0,imprecision)
if resultv[bdpsi-1] >= resultv[bdpsi-2]:
a0 = a0new
break
i += 1
if i == np.size(factorv):
problem = True
break
if problem:
break
if not(alwaysbdpsimax) and resultv[bdpsi-1] < (1+imprecision)*resultv[bdpsi-2]:
resultv = resultv[0:bdpsi]
result = resultv[bdpsi-1]
break
if not(problem):
break
return result,resultv,a0
def fin2_FoM_FoMD_optm(n,d,bc,c,a0,ch,chp,epsilon,imprecision=10**-2,lherm=True):
"""
Iterative optimization of FoM/FoMD over SLD MPO and initial wave function MPS. Function for finite size systems. Version with two channels separated by epsilon.
Parameters:
n: number of sites in TN
d: dimension of local Hilbert space (dimension of physical index)
bc: boundary conditions, 'O' for OBC, 'P' for PBC
c: MPO for SLD, expected list of length n of ndarrays of a shape (bd,bd,d,d) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d,d,n) for PBC
a0: MPS for initial wave function, expected list of length n of ndarrays of a shape (bd,bd,d) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d,n) for PBC
ch: MPO for quantum channel at the value of estimated parameter phi=phi_0, expected list of length n of ndarrays of a shape (bd,bd,d**2,d**2) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d**2,d**2,n) for PBC
chp: MPO for quantum channel at the value of estimated parameter phi=phi_0+epsilon, expected list of length n of ndarrays of a shape (bd,bd,d**2,d**2) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d**2,d**2,n) for PBC
epsilon: value of a separation between estimated parameters encoded in ch and chp, float
imprecision: expected imprecision of the end results, default value is 10**-2
lherm: boolean value, True (default value) when Hermitian gauge is imposed on SLD MPO, otherwise False
Returns:
fval: optimal value of FoM/FoMD
c: optimal MPO for SLD
a0: optimal MPS for initial wave function
"""
relunc_f = 0.1*imprecision
if bc == 'O':
chd = [0]*n
chpd = [0]*n
for x in range(n):
chd[x] = np.conj(np.moveaxis(ch[x],2,3))
chpd[x] = np.conj(np.moveaxis(chp[x],2,3))
elif bc == 'P':
chd = np.conj(np.moveaxis(ch,2,3))
chpd = np.conj(np.moveaxis(chp,2,3))
f = np.array([])
iter_f = 0
while True:
a0_dm = wave_function_to_density_matrix(a0)
a = channel_acting_on_operator(ch,a0_dm)
b = channel_acting_on_operator(chp,a0_dm)
if bc == 'O':
fom,c = fin2_FoM_OBC_optm(a,b,epsilon,c,imprecision,lherm)
elif bc == 'P':
fom,c = fin2_FoM_PBC_optm(a,b,epsilon,c,imprecision,lherm)
f = np.append(f,fom)
if iter_f >= 2 and np.std(f[-4:])/np.mean(f[-4:]) <= relunc_f:
break
if bc == 'O':
c2 = [0]*n
for x in range(n):
bdl1 = np.shape(c[x])[0]
bdl2 = np.shape(c[x])[1]
c2[x] = np.zeros((bdl1**2,bdl2**2,d,d),dtype=complex)
for nx in range(d):
for nxp in range(d):
for nxpp in range(d):
c2[x][:,:,nx,nxp] = c2[x][:,:,nx,nxp]+np.kron(c[x][:,:,nx,nxpp],c[x][:,:,nxpp,nxp])
elif bc == 'P':
bdl = np.shape(c)[0]
c2 = np.zeros((bdl**2,bdl**2,d,d,n),dtype=complex)
for nx in range(d):
for nxp in range(d):
for nxpp in range(d):
for x in range(n):
c2[:,:,nx,nxp,x] = c2[:,:,nx,nxp,x]+np.kron(c[:,:,nx,nxpp,x],c[:,:,nxpp,nxp,x])
c2d = channel_acting_on_operator(chd,c2)
cd = channel_acting_on_operator(chd,c)
cpd = channel_acting_on_operator(chpd,c)
if bc == 'O':
fomd,a0 = fin2_FoMD_OBC_optm(c2d,cd,cpd,epsilon,a0,imprecision)
elif bc == 'P':
fomd,a0 = fin2_FoMD_PBC_optm(c2d,cd,cpd,epsilon,a0,imprecision)
f = np.append(f,fomd)
iter_f += 1
fval = f[-1]
return fval,c,a0
def fin2_FoM_OBC_optm(a,b,epsilon,c,imprecision=10**-2,lherm=True):
"""
Optimization of FoM over MPO for SLD. Function for finite size systems with OBC. Version with two states separated by epsilon.
Parameters:
a: MPO for the density matrix at the value of estimated parameter phi=phi_0, expected list of length n of ndarrays of a shape (bd,bd,d,d) (bd can vary between sites)
b: MPO for the density matrix at the value of estimated parameter phi=phi_0+epsilon, expected list of length n of ndarrays of a shape (bd,bd,d,d) (bd can vary between sites)
epsilon: value of a separation between estimated parameters encoded in a and b, float
c: MPO for the SLD, expected list of length n of ndarrays of a shape (bd,bd,d,d) (bd can vary between sites)
imprecision: expected imprecision of the end results, default value is 10**-2
lherm: boolean value, True (default value) when Hermitian gauge is imposed on SLD MPO, otherwise False
Returns:
fomval: optimal value of FoM
c: optimal MPO for SLD
"""
n = len(c)
tol_fom = 0.1*imprecision/n**2
if n == 1:
if np.shape(a[0])[0] == 1 and np.shape(b[0])[0] == 1 and np.shape(c[0])[0] == 1:
d = np.shape(c[0])[2]
tensors = [b[0][0,0,:,:]]
legs = [[-2,-1]]
l1 = ncon(tensors,legs)
l1 = np.reshape(l1,-1,order='F')
tensors = [a[0][0,0,:,:]]
legs = [[-2,-1]]
l1_0 = ncon(tensors,legs)
l1_0 = np.reshape(l1_0,-1,order='F')
tensors = [a[0][0,0,:,:],np.eye(d)]
legs = [[-2,-3],[-4,-1]]
l2 = ncon(tensors,legs)
l2 = np.reshape(l2,(d*d,d*d),order='F')
dl2 = l2+l2.T
dl1 = 2*(l1-l1_0)/epsilon
dl2pinv = np.linalg.pinv(dl2,tol_fom)
dl2pinv = (dl2pinv+dl2pinv.T)/2
cv = dl2pinv @ dl1
c[0][0,0,:,:] = np.reshape(cv,(d,d),order='F')
if lherm:
c[0] = (c[0]+np.conj(np.moveaxis(c[0],2,3)))/2
cv = np.reshape(c[0],-1,order='F')
fomval = np.real(2*cv @ (l1-l1_0)/epsilon - cv @ l2 @ cv)
else:
warnings.warn('Tensor networks with OBC and length one have to have bond dimension equal to one.')
else:
relunc_fom = 0.1*imprecision
l1f = [0]*n
l1_0f = [0]*n
l2f = [0]*n
fom = np.array([])
iter_fom = 0
while True:
tensors = [c[n-1],b[n-1]]
legs = [[-1,-3,1,2],[-2,-4,2,1]]
l1f[n-2] = ncon(tensors,legs)
l1f[n-2] = l1f[n-2][:,:,0,0]
tensors = [c[n-1],a[n-1]]
legs = [[-1,-3,1,2],[-2,-4,2,1]]
l1_0f[n-2] = ncon(tensors,legs)
l1_0f[n-2] = l1_0f[n-2][:,:,0,0]
tensors = [c[n-1],a[n-1],c[n-1]]
legs = [[-1,-4,1,2],[-2,-5,2,3],[-3,-6,3,1]]
l2f[n-2] = ncon(tensors,legs)
l2f[n-2] = l2f[n-2][:,:,:,0,0,0]
for x in range(n-2,0,-1):
tensors = [c[x],b[x],l1f[x]]
legs = [[-1,3,1,2],[-2,4,2,1],[3,4]]
l1f[x-1] = ncon(tensors,legs)
tensors = [c[x],a[x],l1_0f[x]]
legs = [[-1,3,1,2],[-2,4,2,1],[3,4]]
l1_0f[x-1] = ncon(tensors,legs)
tensors = [c[x],a[x],c[x],l2f[x]]
legs = [[-1,4,1,2],[-2,5,2,3],[-3,6,3,1],[4,5,6]]
l2f[x-1] = ncon(tensors,legs)
bdl1,bdl2,d,d = np.shape(c[0])
tensors = [b[0],l1f[0]]
legs = [[-5,1,-4,-3],[-2,1]]
l1 = ncon(tensors,legs)
l1 = np.reshape(l1,-1,order='F')
tensors = [a[0],l1_0f[0]]
legs = [[-5,1,-4,-3],[-2,1]]
l1_0 = ncon(tensors,legs)
l1_0 = np.reshape(l1_0,-1,order='F')
tensors = [a[0],np.eye(d),l2f[0]]
legs = [[-9,1,-4,-7],[-8,-3],[-2,1,-6]]
l2 = ncon(tensors,legs)
l2 = np.reshape(l2,(bdl1*bdl2*d*d,bdl1*bdl2*d*d),order='F')
dl2 = l2+l2.T
dl1 = 2*(l1-l1_0)/epsilon
dl2pinv = np.linalg.pinv(dl2,tol_fom)
dl2pinv = (dl2pinv+dl2pinv.T)/2
cv = dl2pinv @ dl1
c[0] = np.reshape(cv,(bdl1,bdl2,d,d),order='F')
if lherm:
c[0] = (c[0]+np.conj(np.moveaxis(c[0],2,3)))/2
cv = np.reshape(c[0],-1,order='F')
fom = np.append(fom,np.real(2*cv @ (l1-l1_0)/epsilon - cv @ l2 @ cv))
tensors = [c[0],b[0]]
legs = [[-3,-1,1,2],[-4,-2,2,1]]
l1c = ncon(tensors,legs)
l1c = l1c[:,:,0,0]
tensors = [c[0],a[0]]
legs = [[-3,-1,1,2],[-4,-2,2,1]]
l1_0c = ncon(tensors,legs)
l1_0c = l1_0c[:,:,0,0]
tensors = [c[0],a[0],c[0]]
legs = [[-4,-1,1,2],[-5,-2,2,3],[-6,-3,3,1]]
l2c = ncon(tensors,legs)
l2c = l2c[:,:,:,0,0,0]
for x in range(1,n-1):
bdl1,bdl2,d,d = np.shape(c[x])
tensors = [l1c,b[x],l1f[x]]
legs = [[-1,1],[1,2,-4,-3],[-2,2]]
l1 = ncon(tensors,legs)
l1 = np.reshape(l1,-1,order='F')
tensors = [l1_0c,a[x],l1_0f[x]]
legs = [[-1,1],[1,2,-4,-3],[-2,2]]
l1_0 = ncon(tensors,legs)
l1_0 = np.reshape(l1_0,-1,order='F')
tensors = [l2c,a[x],np.eye(d),l2f[x]]
legs = [[-1,1,-5],[1,2,-4,-7],[-8,-3],[-2,2,-6]]
l2 = ncon(tensors,legs)
l2 = np.reshape(l2,(bdl1*bdl2*d*d,bdl1*bdl2*d*d),order='F')
dl2 = l2+l2.T
dl1 = 2*(l1-l1_0)/epsilon
dl2pinv = np.linalg.pinv(dl2,tol_fom)
dl2pinv = (dl2pinv+dl2pinv.T)/2
cv = dl2pinv @ dl1
c[x] = np.reshape(cv,(bdl1,bdl2,d,d),order='F')
if lherm:
c[x] = (c[x]+np.conj(np.moveaxis(c[x],2,3)))/2
cv = np.reshape(c[x],-1,order='F')
fom = np.append(fom,np.real(2*cv @ (l1-l1_0)/epsilon - cv @ l2 @ cv))
tensors = [l1c,c[x],b[x]]
legs = [[3,4],[3,-1,1,2],[4,-2,2,1]]
l1c = ncon(tensors,legs)
tensors = [l1_0c,c[x],a[x]]
legs = [[3,4],[3,-1,1,2],[4,-2,2,1]]
l1_0c = ncon(tensors,legs)
tensors = [l2c,c[x],a[x],c[x]]
legs = [[4,5,6],[4,-1,1,2],[5,-2,2,3],[6,-3,3,1]]
l2c = ncon(tensors,legs)
bdl1,bdl2,d,d = np.shape(c[n-1])
tensors = [l1c,b[n-1]]
legs = [[-1,1],[1,-5,-4,-3]]
l1 = ncon(tensors,legs)
l1 = np.reshape(l1,-1,order='F')
tensors = [l1_0c,a[n-1]]
legs = [[-1,1],[1,-5,-4,-3]]
l1_0 = ncon(tensors,legs)
l1_0 = np.reshape(l1_0,-1,order='F')
tensors = [l2c,a[n-1],np.eye(d)]
legs = [[-1,1,-5],[1,-9,-4,-7],[-8,-3]]
l2 = ncon(tensors,legs)
l2 = np.reshape(l2,(bdl1*bdl2*d*d,bdl1*bdl2*d*d),order='F')
dl2 = l2+l2.T
dl1 = 2*(l1-l1_0)/epsilon
dl2pinv = np.linalg.pinv(dl2,tol_fom)
dl2pinv = (dl2pinv+dl2pinv.T)/2
cv = dl2pinv @ dl1
c[n-1] = np.reshape(cv,(bdl1,bdl2,d,d),order='F')
if lherm:
c[n-1] = (c[n-1]+np.conj(np.moveaxis(c[n-1],2,3)))/2
cv = np.reshape(c[n-1],-1,order='F')
fom = np.append(fom,np.real(2*cv @ (l1-l1_0)/epsilon - cv @ l2 @ cv))
iter_fom += 1
if iter_fom >= 2 and all(fom[-2*n:] > 0) and np.std(fom[-2*n:])/np.mean(fom[-2*n:]) <= relunc_fom:
break
fomval = fom[-1]
return fomval,c
def fin2_FoM_PBC_optm(a,b,epsilon,c,imprecision=10**-2,lherm=True):
"""
Optimization of FoM over MPO for SLD. Function for finite size systems with PBC. Version with two states separated by epsilon.
Parameters:
a: MPO for the density matrix at the value of estimated parameter phi=phi_0, expected ndarray of a shape (bd,bd,d,d,n)
b: MPO for the density matrix at the value of estimated parameter phi=phi_0+epsilon, expected ndarray of a shape (bd,bd,d,d,n)
epsilon: value of a separation between estimated parameters encoded in a and b, float
c: MPO for the SLD, expected ndarray of a shape (bd,bd,d,d,n)
imprecision: expected imprecision of the end results, default value is 10**-2
lherm: boolean value, True (default value) when Hermitian gauge is imposed on SLD MPO, otherwise False
Returns:
fomval: optimal value of FoM
c: optimal MPO for SLD
"""
n = np.shape(a)[4]
d = np.shape(a)[2]
bdr = np.shape(a)[0]
bdrp = np.shape(b)[0]
bdl = np.shape(c)[0]
tol_fom = 0.1*imprecision/n**2
if n == 1:
tensors = [b[:,:,:,:,0],np.eye(bdl)]
legs = [[1,1,-4,-3],[-2,-1]]
l1 = ncon(tensors,legs)
l1 = np.reshape(l1,-1,order='F')
tensors = [a[:,:,:,:,0],np.eye(bdl)]
legs = [[1,1,-4,-3],[-2,-1]]
l1_0 = ncon(tensors,legs)
l1_0 = np.reshape(l1_0,-1,order='F')
tensors = [a[:,:,:,:,0],np.eye(d),np.eye(bdl),np.eye(bdl)]
legs = [[1,1,-4,-7],[-8,-3],[-2,-1],[-6,-5]]
l2 = ncon(tensors,legs)
l2 = np.reshape(l2,(bdl*bdl*d*d,bdl*bdl*d*d),order='F')
dl2 = l2+l2.T
dl1 = 2*(l1-l1_0)/epsilon
dl2pinv = np.linalg.pinv(dl2,tol_fom)
dl2pinv = (dl2pinv+dl2pinv.T)/2
cv = dl2pinv @ dl1
c[:,:,:,:,0] = np.reshape(cv,(bdl,bdl,d,d),order='F')
if lherm:
c[:,:,:,:,0] = (c[:,:,:,:,0]+np.conj(np.moveaxis(c[:,:,:,:,0],2,3)))/2
cv = np.reshape(c[:,:,:,:,0],-1,order='F')
fomval = np.real(2*cv @ (l1-l1_0)/epsilon - cv @ l2 @ cv)
else:
relunc_fom = 0.1*imprecision
l1f = np.zeros((bdl,bdrp,bdl,bdrp,n-1),dtype=complex)
l1_0f = np.zeros((bdl,bdrp,bdl,bdrp,n-1),dtype=complex)
l2f = np.zeros((bdl,bdr,bdl,bdl,bdr,bdl,n-1),dtype=complex)
fom = np.array([])
iter_fom = 0
while True:
tensors = [c[:,:,:,:,n-1],b[:,:,:,:,n-1]]
legs = [[-1,-3,1,2],[-2,-4,2,1]]
l1f[:,:,:,:,n-2] = ncon(tensors,legs)
tensors = [c[:,:,:,:,n-1],a[:,:,:,:,n-1]]
legs = [[-1,-3,1,2],[-2,-4,2,1]]
l1_0f[:,:,:,:,n-2] = ncon(tensors,legs)
tensors = [c[:,:,:,:,n-1],a[:,:,:,:,n-1],c[:,:,:,:,n-1]]
legs = [[-1,-4,1,2],[-2,-5,2,3],[-3,-6,3,1]]
l2f[:,:,:,:,:,:,n-2] = ncon(tensors,legs)
for x in range(n-2,0,-1):
tensors = [c[:,:,:,:,x],b[:,:,:,:,x],l1f[:,:,:,:,x]]
legs = [[-1,3,1,2],[-2,4,2,1],[3,4,-3,-4]]
l1f[:,:,:,:,x-1] = ncon(tensors,legs)
tensors = [c[:,:,:,:,x],a[:,:,:,:,x],l1_0f[:,:,:,:,x]]
legs = [[-1,3,1,2],[-2,4,2,1],[3,4,-3,-4]]
l1_0f[:,:,:,:,x-1] = ncon(tensors,legs)
tensors = [c[:,:,:,:,x],a[:,:,:,:,x],c[:,:,:,:,x],l2f[:,:,:,:,:,:,x]]
legs = [[-1,4,1,2],[-2,5,2,3],[-3,6,3,1],[4,5,6,-4,-5,-6]]
l2f[:,:,:,:,:,:,x-1] = ncon(tensors,legs)
tensors = [b[:,:,:,:,0],l1f[:,:,:,:,0]]
legs = [[2,1,-4,-3],[-2,1,-1,2]]
l1 = ncon(tensors,legs)
l1 = np.reshape(l1,-1,order='F')
tensors = [a[:,:,:,:,0],l1_0f[:,:,:,:,0]]
legs = [[2,1,-4,-3],[-2,1,-1,2]]
l1_0 = ncon(tensors,legs)
l1_0 = np.reshape(l1_0,-1,order='F')
tensors = [a[:,:,:,:,0],np.eye(d),l2f[:,:,:,:,:,:,0]]
legs = [[2,1,-4,-7],[-8,-3],[-2,1,-6,-1,2,-5]]
l2 = ncon(tensors,legs)
l2 = np.reshape(l2,(bdl*bdl*d*d,bdl*bdl*d*d),order='F')
dl2 = l2+l2.T
dl1 = 2*(l1-l1_0)/epsilon
dl2pinv = np.linalg.pinv(dl2,tol_fom)
dl2pinv = (dl2pinv+dl2pinv.T)/2
cv = dl2pinv @ dl1
c[:,:,:,:,0] = np.reshape(cv,(bdl,bdl,d,d),order='F')
if lherm:
c[:,:,:,:,0] = (c[:,:,:,:,0]+np.conj(np.moveaxis(c[:,:,:,:,0],2,3)))/2
cv = np.reshape(c[:,:,:,:,0],-1,order='F')
fom = np.append(fom,np.real(2*cv @ (l1-l1_0)/epsilon - cv @ l2 @ cv))
tensors = [c[:,:,:,:,0],b[:,:,:,:,0]]
legs = [[-1,-3,1,2],[-2,-4,2,1]]
l1c = ncon(tensors,legs)
tensors = [c[:,:,:,:,0],a[:,:,:,:,0]]
legs = [[-1,-3,1,2],[-2,-4,2,1]]
l1_0c = ncon(tensors,legs)
tensors = [c[:,:,:,:,0],a[:,:,:,:,0],c[:,:,:,:,0]]
legs = [[-1,-4,1,2],[-2,-5,2,3],[-3,-6,3,1]]
l2c = ncon(tensors,legs)
for x in range(1,n-1):
tensors = [l1c,b[:,:,:,:,x],l1f[:,:,:,:,x]]
legs = [[3,4,-1,1],[1,2,-4,-3],[-2,2,3,4]]
l1 = ncon(tensors,legs)
l1 = np.reshape(l1,-1,order='F')
tensors = [l1_0c,a[:,:,:,:,x],l1_0f[:,:,:,:,x]]
legs = [[3,4,-1,1],[1,2,-4,-3],[-2,2,3,4]]
l1_0 = ncon(tensors,legs)
l1_0 = np.reshape(l1_0,-1,order='F')
tensors = [l2c,a[:,:,:,:,x],np.eye(d),l2f[:,:,:,:,:,:,x]]
legs = [[3,4,5,-1,1,-5],[1,2,-4,-7],[-8,-3],[-2,2,-6,3,4,5]]
l2 = ncon(tensors,legs)
l2 = np.reshape(l2,(bdl*bdl*d*d,bdl*bdl*d*d),order='F')
dl2 = l2+l2.T
dl1 = 2*(l1-l1_0)/epsilon
dl2pinv = np.linalg.pinv(dl2,tol_fom)
dl2pinv = (dl2pinv+dl2pinv.T)/2
cv = dl2pinv @ dl1
c[:,:,:,:,x] = np.reshape(cv,(bdl,bdl,d,d),order='F')
if lherm:
c[:,:,:,:,x] = (c[:,:,:,:,x]+np.conj(np.moveaxis(c[:,:,:,:,x],2,3)))/2
cv = np.reshape(c[:,:,:,:,x],-1,order='F')
fom = np.append(fom,np.real(2*cv @ (l1-l1_0)/epsilon - cv @ l2 @ cv))
tensors = [l1c,c[:,:,:,:,x],b[:,:,:,:,x]]
legs = [[-1,-2,3,4],[3,-3,1,2],[4,-4,2,1]]
l1c = ncon(tensors,legs)
tensors = [l1_0c,c[:,:,:,:,x],a[:,:,:,:,x]]
legs = [[-1,-2,3,4],[3,-3,1,2],[4,-4,2,1]]
l1_0c = ncon(tensors,legs)
tensors = [l2c,c[:,:,:,:,x],a[:,:,:,:,x],c[:,:,:,:,x]]
legs = [[-1,-2,-3,4,5,6],[4,-4,1,2],[5,-5,2,3],[6,-6,3,1]]
l2c = ncon(tensors,legs)
tensors = [l1c,b[:,:,:,:,n-1]]
legs = [[-2,2,-1,1],[1,2,-4,-3]]
l1 = ncon(tensors,legs)
l1 = np.reshape(l1,-1,order='F')
tensors = [l1_0c,a[:,:,:,:,n-1]]
legs = [[-2,2,-1,1],[1,2,-4,-3]]
l1_0 = ncon(tensors,legs)
l1_0 = np.reshape(l1_0,-1,order='F')
tensors = [l2c,a[:,:,:,:,n-1],np.eye(d)]
legs = [[-2,2,-6,-1,1,-5],[1,2,-4,-7],[-8,-3]]
l2 = ncon(tensors,legs)
l2 = np.reshape(l2,(bdl*bdl*d*d,bdl*bdl*d*d),order='F')
dl2 = l2+l2.T
dl1 = 2*(l1-l1_0)/epsilon
dl2pinv = np.linalg.pinv(dl2,tol_fom)
dl2pinv = (dl2pinv+dl2pinv.T)/2
cv = dl2pinv @ dl1
c[:,:,:,:,n-1] = np.reshape(cv,(bdl,bdl,d,d),order='F')
if lherm:
c[:,:,:,:,n-1] = (c[:,:,:,:,n-1]+np.conj(np.moveaxis(c[:,:,:,:,n-1],2,3)))/2
cv = np.reshape(c[:,:,:,:,n-1],-1,order='F')
fom = np.append(fom,np.real(2*cv @ (l1-l1_0)/epsilon - cv @ l2 @ cv))
iter_fom += 1
if iter_fom >= 2 and all(fom[-2*n:] > 0) and np.std(fom[-2*n:])/np.mean(fom[-2*n:]) <= relunc_fom:
break
fomval = fom[-1]
return fomval,c
def fin2_FoMD_OBC_optm(c2d,cd,cpd,epsilon,a0,imprecision=10**-2):
"""
Optimization of FoMD over MPS for initial wave function. Function for finite size systems with OBC. Version with two dual SLDs separated by epsilon.
Parameters:
c2d: MPO for the square of the dual of the SLD at the value of estimated parameter phi=-phi_0, expected list of length n of ndarrays of a shape (bd,bd,d,d) (bd can vary between sites)
cd: MPO for the dual of the SLD at the value of estimated parameter phi=-phi_0, expected list of length n of ndarrays of a shape (bd,bd,d,d) (bd can vary between sites)
cpd: MPO for the dual of the SLD at the value of estimated parameter phi=-(phi_0+epsilon), expected list of length n of ndarrays of a shape (bd,bd,d,d) (bd can vary between sites)
epsilon: value of a separation between estimated parameters encoded in cd and cpd, float
a0: MPS for the initial wave function, expected list of length n of ndarrays of a shape (bd,bd,d,d) (bd can vary between sites)
imprecision: expected imprecision of the end results, default value is 10**-2
Returns:
fomdval: optimal value of FoMD
a0: optimal MPS for the initial wave function
"""
n = len(a0)
if n == 1:
if np.shape(c2d[0])[0] == 1 and np.shape(cpd[0])[0] == 1 and np.shape(a0[0])[0] == 1:
d = np.shape(a0[0])[2]
tensors = [c2d[0][0,0,:,:]]
legs = [[-1,-2]]
l2d = ncon(tensors,legs)
l2d = np.reshape(l2d,(d,d),order='F')
tensors = [cpd[0][0,0,:,:]]
legs = [[-1,-2]]
lpd = ncon(tensors,legs)
lpd = np.reshape(lpd,(d,d),order='F')
tensors = [cd[0][0,0,:,:]]
legs = [[-1,-2]]
ld = ncon(tensors,legs)
ld = np.reshape(ld,(d,d),order='F')
eiginput = 2*(lpd-ld)/epsilon-l2d
eiginput = (eiginput+np.conj(eiginput).T)/2
fomdval,a0v = np.linalg.eig(eiginput)
position = np.argmax(np.real(fomdval))
a0v = np.reshape(a0v[:,position],-1,order='F')
a0v = a0v/np.sqrt(np.abs(np.conj(a0v) @ a0v))
a0[0][0,0,:] = np.reshape(a0v,(d),order='F')
fomdval = np.real(fomdval[position])
else:
warnings.warn('Tensor networks with OBC and length one have to have bond dimension equal to one.')
else:
relunc_fomd = 0.1*imprecision
l2df = [0]*n
lpdf = [0]*n
ldf = [0]*n
fomd = np.array([])
iter_fomd = 0
while True:
tensors = [np.conj(a0[n-1]),c2d[n-1],a0[n-1]]
legs = [[-1,-4,1],[-2,-5,1,2],[-3,-6,2]]
l2df[n-2] = ncon(tensors,legs)
l2df[n-2] = l2df[n-2][:,:,:,0,0,0]
tensors = [np.conj(a0[n-1]),cpd[n-1],a0[n-1]]
legs = [[-1,-4,1],[-2,-5,1,2],[-3,-6,2]]
lpdf[n-2] = ncon(tensors,legs)
lpdf[n-2] = lpdf[n-2][:,:,:,0,0,0]
tensors = [np.conj(a0[n-1]),cd[n-1],a0[n-1]]
legs = [[-1,-4,1],[-2,-5,1,2],[-3,-6,2]]
ldf[n-2] = ncon(tensors,legs)
ldf[n-2] = ldf[n-2][:,:,:,0,0,0]
for x in range(n-2,0,-1):
tensors = [np.conj(a0[x]),c2d[x],a0[x],l2df[x]]
legs = [[-1,3,1],[-2,4,1,2],[-3,5,2],[3,4,5]]
l2df[x-1] = ncon(tensors,legs)
tensors = [np.conj(a0[x]),cpd[x],a0[x],lpdf[x]]
legs = [[-1,3,1],[-2,4,1,2],[-3,5,2],[3,4,5]]
lpdf[x-1] = ncon(tensors,legs)
tensors = [np.conj(a0[x]),cd[x],a0[x],ldf[x]]
legs = [[-1,3,1],[-2,4,1,2],[-3,5,2],[3,4,5]]
ldf[x-1] = ncon(tensors,legs)
bdpsi1,bdpsi2,d = np.shape(a0[0])
tensors = [c2d[0],l2df[0]]
legs = [[-7,1,-3,-6],[-2,1,-5]]
l2d = ncon(tensors,legs)
l2d = np.reshape(l2d,(bdpsi1*bdpsi2*d,bdpsi1*bdpsi2*d),order='F')
tensors = [cpd[0],lpdf[0]]
legs = [[-7,1,-3,-6],[-2,1,-5]]
lpd = ncon(tensors,legs)
lpd = np.reshape(lpd,(bdpsi1*bdpsi2*d,bdpsi1*bdpsi2*d),order='F')
tensors = [cd[0],ldf[0]]
legs = [[-7,1,-3,-6],[-2,1,-5]]
ld = ncon(tensors,legs)
ld = np.reshape(ld,(bdpsi1*bdpsi2*d,bdpsi1*bdpsi2*d),order='F')
eiginput = 2*(lpd-ld)/epsilon-l2d
eiginput = (eiginput+np.conj(eiginput).T)/2
fomdval,a0v = np.linalg.eig(eiginput)
position = np.argmax(np.real(fomdval))
a0v = np.reshape(a0v[:,position],-1,order='F')
a0v = a0v/np.sqrt(np.abs(np.conj(a0v) @ a0v))
a0[0] = np.reshape(a0v,(bdpsi1,bdpsi2,d),order='F')
fomd = np.append(fomd,np.real(fomdval[position]))
a0[0] = np.moveaxis(a0[0],2,0)
a0[0] = np.reshape(a0[0],(d*bdpsi1,bdpsi2),order='F')
u,s,vh = np.linalg.svd(a0[0],full_matrices=False)
a0[0] = np.reshape(u,(d,bdpsi1,np.shape(s)[0]),order='F')
a0[0] = np.moveaxis(a0[0],0,2)
tensors = [np.diag(s) @ vh,a0[1]]
legs = [[-1,1],[1,-2,-3]]
a0[1] = ncon(tensors,legs)
tensors = [np.conj(a0[0]),c2d[0],a0[0]]
legs = [[-4,-1,1],[-5,-2,1,2],[-6,-3,2]]
l2dc = ncon(tensors,legs)
l2dc = l2dc[:,:,:,0,0,0]
tensors = [np.conj(a0[0]),cpd[0],a0[0]]
legs = [[-4,-1,1],[-5,-2,1,2],[-6,-3,2]]
lpdc = ncon(tensors,legs)
lpdc = lpdc[:,:,:,0,0,0]
tensors = [np.conj(a0[0]),cd[0],a0[0]]
legs = [[-4,-1,1],[-5,-2,1,2],[-6,-3,2]]
ldc = ncon(tensors,legs)
ldc = ldc[:,:,:,0,0,0]
for x in range(1,n-1):
bdpsi1,bdpsi2,d = np.shape(a0[x])
tensors = [l2dc,c2d[x],l2df[x]]
legs = [[-1,1,-4],[1,2,-3,-6],[-2,2,-5]]
l2d = ncon(tensors,legs)
l2d = np.reshape(l2d,(bdpsi1*bdpsi2*d,bdpsi1*bdpsi2*d),order='F')
tensors = [lpdc,cpd[x],lpdf[x]]
legs = [[-1,1,-4],[1,2,-3,-6],[-2,2,-5]]
lpd = ncon(tensors,legs)
lpd = np.reshape(lpd,(bdpsi1*bdpsi2*d,bdpsi1*bdpsi2*d),order='F')
tensors = [ldc,cd[x],ldf[x]]
legs = [[-1,1,-4],[1,2,-3,-6],[-2,2,-5]]
ld = ncon(tensors,legs)
ld = np.reshape(ld,(bdpsi1*bdpsi2*d,bdpsi1*bdpsi2*d),order='F')
eiginput = 2*(lpd-ld)/epsilon-l2d
eiginput = (eiginput+np.conj(eiginput).T)/2
fomdval,a0v = np.linalg.eig(eiginput)
position = np.argmax(np.real(fomdval))
a0v = np.reshape(a0v[:,position],-1,order='F')
a0v = a0v/np.sqrt(np.abs(np.conj(a0v) @ a0v))
a0[x] = np.reshape(a0v,(bdpsi1,bdpsi2,d),order='F')
fomd = np.append(fomd,np.real(fomdval[position]))
a0[x] = np.moveaxis(a0[x],2,0)
a0[x] = np.reshape(a0[x],(d*bdpsi1,bdpsi2),order='F')
u,s,vh = np.linalg.svd(a0[x],full_matrices=False)
a0[x] = np.reshape(u,(d,bdpsi1,np.shape(s)[0]),order='F')
a0[x] = np.moveaxis(a0[x],0,2)
tensors = [np.diag(s) @ vh,a0[x+1]]
legs = [[-1,1],[1,-2,-3]]
a0[x+1] = ncon(tensors,legs)
tensors = [l2dc,np.conj(a0[x]),c2d[x],a0[x]]
legs = [[3,4,5],[3,-1,1],[4,-2,1,2],[5,-3,2]]
l2dc = ncon(tensors,legs)
tensors = [lpdc,np.conj(a0[x]),cpd[x],a0[x]]
legs = [[3,4,5],[3,-1,1],[4,-2,1,2],[5,-3,2]]
lpdc = ncon(tensors,legs)
tensors = [ldc,np.conj(a0[x]),cd[x],a0[x]]
legs = [[3,4,5],[3,-1,1],[4,-2,1,2],[5,-3,2]]
ldc = ncon(tensors,legs)
bdpsi1,bdpsi2,d = np.shape(a0[n-1])
tensors = [l2dc,c2d[n-1]]
legs = [[-1,1,-4],[1,-7,-3,-6]]
l2d = ncon(tensors,legs)
l2d = np.reshape(l2d,(bdpsi1*bdpsi2*d,bdpsi1*bdpsi2*d),order='F')
tensors = [lpdc,cpd[n-1]]
legs = [[-1,1,-4],[1,-7,-3,-6]]
lpd = ncon(tensors,legs)
lpd = np.reshape(lpd,(bdpsi1*bdpsi2*d,bdpsi1*bdpsi2*d),order='F')
tensors = [ldc,cd[n-1]]
legs = [[-1,1,-4],[1,-7,-3,-6]]
ld = ncon(tensors,legs)
ld = np.reshape(ld,(bdpsi1*bdpsi2*d,bdpsi1*bdpsi2*d),order='F')
eiginput = 2*(lpd-ld)/epsilon-l2d
eiginput = (eiginput+np.conj(eiginput).T)/2
fomdval,a0v = np.linalg.eig(eiginput)
position = np.argmax( | np.real(fomdval) | numpy.real |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 12 19:51:39 2019
@author: ghosh128
"""
import sys
sys.path.append("../")
import os
import numpy as np
import config
import tensorflow as tf
from scipy import optimize
tf.set_random_seed(1)
#%%
print("LOAD DATA")
train_data_strong = np.load(os.path.join(config.NUMPY_DIR, "train_data_strong.npy")).astype(np.float32)
train_data_weak = np.load(os.path.join(config.NUMPY_DIR, "train_data_weak.npy")).astype(np.float32)
num_features = train_data_strong.shape[-1] - 2
print("COMPUTE THETA")
O_s = train_data_strong[:,-1]
Y_s = train_data_strong[:,-2]
N_s = len(O_s)
num_levels = len(np.unique(O_s))
#Inequality 1 : theta_l_i - \xi_l_i <= y_i
A1 = | np.zeros((N_s, 2*N_s+(num_levels-1))) | numpy.zeros |
#!/usr/bin/env python3
# Date: 2020/01/05
# Author: Armit
# 主成分分析PCA基本思想:特征值、特征向量
# 构建所有特征的协方差矩阵,求该矩阵的特征值最大的几个方向的特征向量、即主成分
# 第一主成分总是和第二主成分正交、其余同理,最后张开的子空间也是"方的"
import random
import numpy as np
import matplotlib.pyplot as plt
def get_data(N=100, begin=0, end=10):
fx = lambda x: 1.7 * x + 0.4
data = np.array([np.array([x, fx(x) + random.random() * random.randrange(4)])
for x in np.linspace(begin, end, N)
for _ in range(random.randrange(6))])
return data
def pca(data, top_n_feat=100): # data = [(ft1, ft2, ..., ftm), ...]
# 去平均值/中心化
data_mean = np.mean(data, axis=0) # mean by column
data -= data_mean
# 协方差矩阵特征分解 Cov(X,Y) = E(XY) - E(X)E(Y); Cov(X,X) = D(X)
cov = | np.cov(data, rowvar=False) | numpy.cov |
import argparse
import gc
import math
import sys
import random
from sklearn.model_selection import KFold
from sklearn.model_selection import train_test_split
from sklearn import svm
import pickle
from sklearn.externals import joblib
import numpy as np
import h5py
from sklearn.metrics import confusion_matrix, accuracy_score
from keras import backend as K
from keras.layers import Input, Activation, Dense, Dropout
from keras.layers.normalization import BatchNormalization
from keras.optimizers import Adam
from keras.models import Model, load_model
from keras.layers.advanced_activations import ELU
from datetime import datetime
import matplotlib
import itertools
matplotlib.use('Agg')
from matplotlib import pyplot as plt
''' This code is based on <NAME>., <NAME>., & Arganda-Carreras,
I. (2017). "Vision-Based Fall Detection with Convolutional Neural Networks"
Wireless Communications and Mobile Computing, 2017.
Also, new features were added by <NAME> working in
Semantix.
'''
''' Documentation: class Train
This class has a few methods:
pre_train_cross
pre_train
cross_train
train
evaluate
plot_training_info
The methods that should be called outside of this class are:
cross_train: perform a n_split cross_train on files passed by
argument
train: perfom a simple trainment on files passsed by argument
'''
class Train:
def __init__(self, threshold, epochs, learning_rate,
weight, mini_batch_size, id, batch_norm):
'''
Necessary parameters to train
'''
self.features_key = 'features'
self.labels_key = 'labels'
self.samples_key = 'samples'
self.num_key = 'num'
self.id = id
self.threshold = threshold
self.num_features = 4096
self.sliding_height = 10
self.epochs = epochs
self.learning_rate = learning_rate
self.weight_0 = weight
self.mini_batch_size = mini_batch_size
self.batch_norm = batch_norm
### Number of streams for each combination
self.num_streams = dict()
### This three dicts are to see which strategy gives best results in
### training stage
self.taccuracies_avg = dict()
self.taccuracies_avg_svm = dict()
self.taccuracies_svm = dict()
### This others dicts will give the parameters to all strategies
self.sensitivities_svm = dict()
self.specificities_svm = dict()
self.fars_svm = dict()
self.mdrs_svm = dict()
self.accuracies_svm = dict()
self.sensitivities_avg = dict()
self.specificities_avg = dict()
self.fars_avg = dict()
self.mdrs_avg = dict()
self.accuracies_avg = dict()
self.sensitivities_avg_svm = dict()
self.specificities_avg_svm = dict()
self.fars_avg_svm = dict()
self.mdrs_avg_svm = dict()
self.accuracies_avg_svm = dict()
def calc_metrics(self, num_streams, y_test, y_train, test_predicteds,
train_predicteds, key):
avg_predicted = np.zeros(len(y_test), dtype=np.float)
train_avg_predicted = np.zeros(len(y_train), dtype=np.float)
clf_train_predicteds = np.zeros(shape=(len(y_train), num_streams), dtype=np.float )
for j in range(len(y_test)):
for i in range(num_streams):
avg_predicted[j] += test_predicteds[i][j]
avg_predicted[j] /= (num_streams)
for j in range(len(y_train)):
for i in range(num_streams):
train_avg_predicted[j] += train_predicteds[i][j]
train_avg_predicted[j] /= (num_streams)
for j in range(len(y_train)):
clf_train_predicteds[j] = [item[j] for item in train_predicteds]
####
#### TREINAMENTO COM TRESHOLD E MEDIA
####
print('EVALUATE WITH average and threshold')
tpr, fpr, fnr, tnr, precision, recall, specificity, f1, accuracy = self.evaluate_threshold( | np.array(avg_predicted, copy=True) | numpy.array |
import numpy as np
"""
v_l object. c*r^{n-2}*exp{-e*r^2}
"""
class rnExp:
def __init__(self, n, e, c):
self.n = np.asarray(n)
self.e = np.asarray(e)
self.c = np.asarray(c)
def __call__(self, r):
return np.sum(
r[:, np.newaxis] ** self.n
* self.c
* np.exp(-self.e * r[:, np.newaxis] ** 2),
axis=1,
)
def generate_ecp_functors(coeffs):
"""
Returns a functor, with keys as the angular momenta:
-1 stands for the nonlocal part, 0,1,2,... are the s,p,d channels, etc.
Parameters:
mol._ecp[atom_name][1] (coefficients of the ECP)
Returns:
v_l function, with key = angular momentum
"""
d = {}
for c in coeffs:
el = c[0]
rn = []
exponent = []
coefficient = []
for n, expand in enumerate(c[1]):
# print("r",n-2,"coeff",expand)
for line in expand:
rn.append(n - 2)
exponent.append(line[0])
coefficient.append(line[1])
d[el] = rnExp(rn, exponent, coefficient)
return d
#########################################################################
def P_l(x, l):
"""
Legendre functions,
returns a nconf x naip array for a given l, x=r_ea(i)
Parameters:
x: nconf array, l: integer
Returns:
P_l values: nconf x naip array
"""
if l == 0:
return np.ones(x.shape)
elif l == 1:
return x
elif l == 2:
return 0.5 * (3 * x * x - np.ones(x.shape))
elif l == 3:
return 0.5 * (5 * x * x * x - 3 * x)
elif l == 4:
return 0.125 * (35 * x * x * x * x - 30 * x * x + 3 * np.ones(x.shape))
else:
return np.zeros(x.shape)
def get_r_ea(mol, configs, e, at):
"""
Returns a nconf x 3 array, distances between electron e and atom at
Parameters:
e,at: integers, eletron and atom indices
configs: nconf x nelec x 3 array
Returns:
epos-apos, electron-atom distances
"""
epos = configs[:, e, :]
nconf = configs.shape[0]
apos = np.outer(
np.ones(nconf), np.array(mol._atom[at][1])
) # nconf x 3 array, position of atom at
return epos - apos
def get_r_ea_i(mol, epos_rot, e, at):
"""
Returns a nconf x naip x 3 array, distances between the rotated electron (e) and the atom at
Parameters:
epos_rot: rotated positions of electron e, nconf x naip x 3
Returns:
epos_rot-apos, (rotated) electron-atom distances
"""
nconf, naip = epos_rot.shape[0:2]
apos = np.zeros(
[nconf, naip, 3]
) # position of the atom, broadcasted into nconf x naip x 3
for aip in range(naip):
apos[:, aip, :] = np.outer(np.ones(nconf), np.array(mol._atom[at][1]))
return epos_rot - apos
def get_v_l(mol, configs, e, at):
"""
Returns list of the l's, and a nconf x nl array, v_l values for each l: l= 0,1,2,...,-1
"""
nconf = configs.shape[0]
at_name = mol._atom[at][0]
r_ea = np.linalg.norm(get_r_ea(mol, configs, e, at), axis=1)
vl = generate_ecp_functors(mol._ecp[at_name][1])
Lmax = len(vl)
v_l = np.zeros([nconf, Lmax])
for l in vl.keys(): # -1,0,1,...
v_l[:, l] = vl[l](r_ea)
return vl.keys(), v_l
def get_wf_ratio(wf, epos_rot, e):
"""
Returns a nconf x naip array, which is the Psi(r_e(i))/Psi(r_e) values
"""
nconf, naip = epos_rot.shape[0:2]
wf_ratio = np.zeros([nconf, naip])
for aip in range(naip):
wf_ratio[:, aip] = wf.testvalue(e, epos_rot[:, aip, :])
return wf_ratio
def get_P_l(mol, configs, weights, epos_rot, l_list, e, at):
"""
Returns a nconf x naip x nl array, which is the legendre function values for each l channel.
The factor (2l+1) and the quadrature weights are included.
Parameters:
l_list: [-1,0,1,...] list of given angular momenta
weights: integration weights
Return:
P_l values: nconf x naip x nl array
"""
# at_name = mol._atom[at][0]
nconf, naip = epos_rot.shape[0:2]
P_l_val = np.zeros([nconf, naip, len(l_list)])
r_ea = get_r_ea(mol, configs, e, at) # nconf x 3
r_ea_i = get_r_ea_i(mol, epos_rot, e, at) # nconf x naip x 3
rdotR = np.zeros(r_ea_i.shape[0:2]) # nconf x naip
# get the cosine values
for aip in range(naip):
rdotR[:, aip] = (
r_ea[:, 0] * r_ea_i[:, aip, 0]
+ r_ea[:, 1] * r_ea_i[:, aip, 1]
+ r_ea[:, 2] * r_ea_i[:, aip, 2]
)
rdotR[:, aip] /= np.linalg.norm(r_ea, axis=1) * np.linalg.norm(
r_ea_i[:, aip, :], axis=1
)
# print('cosine values',rdotR)
# already included the factor (2l+1), and the integration weights here
for l in l_list:
P_l_val[:, :, l] = (
(2 * l + 1) * P_l(rdotR, l) * np.outer(np.ones(nconf), weights)
)
return P_l_val
#########################################################################
def ecp_ea(mol, configs, wf, e, at):
"""
Returns the ECP value between electron e and atom at, local+nonlocal.
"""
l_list, v_l = get_v_l(mol, configs, e, at)
naip = 6
if len(l_list) > 2:
naip = 12
weights, epos_rot = get_rot(mol, configs, e, at, naip)
P_l = get_P_l(mol, configs, weights, epos_rot, l_list, e, at)
ratio = get_wf_ratio(wf, epos_rot, e)
ecp_val = np.einsum("ij,ik,ijk->i", ratio, v_l, P_l)
# compute the local part
local_l = -1
ecp_val += v_l[:, local_l]
return ecp_val
def ecp(mol, configs, wf):
"""
Returns the ECP value, summed over all the electrons and atoms.
"""
nconf, nelec = configs.shape[0:2]
ecp_tot = np.zeros(nconf)
if mol._ecp != {}:
for e in range(nelec):
for at in range(len(mol._atom)):
ecp_tot += ecp_ea(mol, configs, wf, e, at)
return ecp_tot
#################### Quadrature Rules ############################
def get_rot(mol, configs, e, at, naip):
"""
Returns the integration weights (naip), and the positions of the rotated electron e (nconf x naip x 3)
Parameters:
configs[:,e,:]: epos of the electron e to be rotated
Returns:
weights: naip array
epos_rot: positions of the rotated electron, nconf x naip x 3
"""
nconf = configs.shape[0]
apos = np.outer(np.ones(nconf), np.array(mol._atom[at][1]))
r_ea_vec = get_r_ea(mol, configs, e, at)
r_ea = np.linalg.norm(r_ea_vec, axis=1)
# t and p are sampled randomly over a sphere around the atom
t = np.random.uniform(low=0.0, high=np.pi, size=nconf)
p = np.random.uniform(low=0.0, high=2 * np.pi, size=nconf)
# rotated unit vectors:
i_rot, j_rot, k_rot = (
np.zeros([nconf, 3]),
np.zeros([nconf, 3]),
np.zeros([nconf, 3]),
)
i_rot[:, 0] = np.cos(p - np.pi / 2.0)
i_rot[:, 1] = np.sin(p - np.pi / 2.0)
j_rot[:, 0] = np.sin(t + np.pi / 2.0) * np.cos(p)
j_rot[:, 1] = np.sin(t + np.pi / 2.0) * np.sin(p)
j_rot[:, 2] = np.cos(t + np.pi / 2.0)
k_rot[:, 0] = np.sin(t) * np.cos(p)
k_rot[:, 1] = | np.sin(t) | numpy.sin |
import sys
import numpy as np
from numba import *
from numba.decorators import jit, autojit
#from numba.testing import test_support
a = np.arange(80).reshape(8, 10)
@autojit(backend='ast')
def np_sum(a):
return np.sum(a, axis=0)
@autojit(backend='ast')
def np_copy(a):
return a.copy(order='F')
@autojit(backend='ast')
def attributes(a):
return (a.T,
a.T.T,
a.copy(),
np.array(a, dtype=np.double))
def test_numpy_attrs():
result = np_sum(a)
np_result = | np.sum(a, axis=0) | numpy.sum |
import numpy as np
from rlberry.envs.benchmarks.ball_exploration import PBall2D
p = 5
A = np.array([
[1.0, 0.1],
[-0.1, 1.0]
]
)
reward_amplitudes = np.array([1.0, 0.5, 0.5])
reward_smoothness = np.array([0.25, 0.25, 0.25])
reward_centers = [
np.array([0.75 * | np.cos(np.pi / 2) | numpy.cos |
"""
For a given session eid, plot spectrogram of sound recorded via the microphone.
"""
# Author: <NAME>, <NAME>
import numpy as np
import matplotlib.pyplot as plt
from one.api import ONE
one = ONE()
dataset_types = [
'_iblmic_audioSpectrogram.frequencies',
'_iblmic_audioSpectrogram.power',
'_iblmic_audioSpectrogram.times_mic']
eid = '098bdac5-0e25-4f51-ae63-995be7fe81c7' # TEST EXAMPLE
TF = one.load_object(eid, 'audioSpectrogram', collection='raw_behavior_data')
# -- Plot spectrogram
tlims = TF['times_mic'][[0, -1]]
flims = TF['frequencies'][0, [0, -1]]
fig = plt.figure(figsize=[16, 7])
ax = plt.axes()
im = ax.imshow(20 * | np.log10(TF['power'].T) | numpy.log10 |
'''
These functions are not necessary to run itam.py, but they are used to write the txt of
the lookup tables I used. Essentially one needs a table for
- the power spectrum
- the percentile point function
- rescaling factor
In *lookup_Pk* one needs to specify the cosmological parameters of the chosen simulation.
In *lookup_ppf* and *rescale_factor* one needs to specify a path to a file containing the density field,
and the smoothing scale one wants to use (it has to be the same in itam.py as well).
'''
import numpy as N
try:
from classy import Class
except:
raise ImportError('CLASS not installed')
from scipy.stats import rankdata
from scipy.integrate import quad
def lookup_Pk(cosmology='planck',nonlinear=0):
"""
it saves the lookup table of the (non) linear power spectrum generate from CLASS.
If nonlinear is False (default) it generates the linear power spectrum.
You can choose between
- planck
- wmap
- ML
Choose also whether you want a nonlinear power spectrum, default is linear (nonlinear=0)
"""
# k in h/Mpc
k = N.logspace(-4., 3., 3*1024)
if nonlinear==1:
hf = 'halofit'
saveto = 'data_itam/'+cosmology+'_pk.txt'
else:
hf = ''
saveto = 'data_itam/'+cosmology+'_pk_linear.txt'
if cosmology == 'planck':
class_params = {
'non linear': hf,
'output': ['mPk','vTk'],
'P_k_max_1/Mpc': 1000.,
'z_pk': 0.,
'A_s': 2.3e-9,
'n_s': 0.96,
'h': 0.7,
'omega_b': 0.0225,
'Omega_cdm': 0.25,
}
sig8_0 = 0.8
elif cosmology == 'wmap':
class_params = {
'non linear': hf,
'output': ['mPk','vTk'],
'P_k_max_1/Mpc': 1000.,
'z_pk': 0.,
'A_s': 2.3e-9,
'n_s': 0.967,
'h': 0.704,
'omega_b': 0.02253,
'Omega_cdm': 0.226,
}
sig8_0 = 0.81
elif cosmology == 'ML':
class_params = {
'non linear': hf,
'output': ['mPk','vTk'],
'P_k_max_1/Mpc': 1000.,
'z_pk': 0.,
'A_s': 2.3e-9,
'n_s': 1.,
'h': 0.73,
'omega_b': 0.045*0.73**2,
'Omega_cdm': 0.25-0.045,
}
sig8_0 = 0.9
else:
raise ValueError("the cosmology you chose does not exist")
cosmoClass_nl = Class()
cosmoClass_nl.set(class_params)
cosmoClass_nl.compute()
# rescale the normalization of matter power spectrum to have sig8=0.8 today
sig8 = cosmoClass_nl.sigma8()
A_s = cosmoClass_nl.pars['A_s']
cosmoClass_nl.struct_cleanup() # does not clean the input class_params, cosmo.empty() does that
cosmoClass_nl.set(A_s=A_s*(sig8_0*1./sig8)**2)
cosmoClass_nl.compute()
h = cosmoClass_nl.pars['h']
pk_nl = N.asarray([ cosmoClass_nl.pk(x*h, 0.,)*h**3 for x in k ])
kpk = N.vstack((k,pk_nl))
N.savetxt(saveto,kpk)
print('saving', saveto )
return
def lookup_ppf(nsamples=100000, boxsize=256.0, Rth=1.,density=None, pathpk='data_itam/planck_pk.txt', saveto_ppf='data_itam/_ppf.txt',
saveto_rescale='data_itam/rescale_factor.txt'):
'''
This function writes a lookup table of the inverse CDF (percentile point function) of the target simulation.
Inputs::
nsamples: the number of points at which ppf is sampled\.
density: path to a binary with the unsmoothed density field.
Rth: smoothing scale of the Gaussian kernel, to smooth the density field.
Outputs::
- Lookup table for the point percent function of density field of simulation
- rescale factor to make variance of PDF and Pk consistent
'''
try:
d = N.load(density) # shape is (ng,ng,ng)
except:
ValueError("the simulation you chose does not exist")
dnlR = smoothfield(d,boxsize,Rth).flatten()
cdvars = N.var(dnlR,axis=None)
dnlR = N.log10(1.+dnlR)
dnlR = dnlR[N.isfinite(dnlR)]
rk = rankdata(dnlR)
rk /= len(rk)+1.
rksort = N.sort(rk)
dsort = N.sort(dnlR)
cdf = N.linspace(rksort[0],rksort[-1],nsamples)
ppf = N.interp(cdf,rksort,dsort,left=dsort[0],right=dsort[-1])
#plt.plot(ppf,cdf);plt.show();
ppf = N.vstack((cdf,ppf))
print('saving', saveto_ppf)
| N.savetxt(saveto_ppf,ppf) | numpy.savetxt |
"""
:py:class:`UtilsCommonMode` contains detector independent utilities for common mode correction
==============================================================================================
Usage::
from psana.detector.UtilsCommonMode import *
#OR
import psana.detector.UtilsCommonMode as ucm
ucm.common_mode_rows(arr, mask=None, cormax=None, npix_min=10)
ucm.common_mode_cols(arr, mask=None, cormax=None, npix_min=10)
ucm.common_mode_2d(arr, mask=None, cormax=None, npix_min=10)
ucm.common_mode_rows_hsplit_nbanks(data, mask, nbanks=4, cormax=None)
ucm.common_mode_2d_hsplit_nbanks(data, mask, nbanks=4, cormax=None)
This software was developed for the LCLS project.
If you use all or part of it, please give an appropriate acknowledgment.
Created on 2018-01-31 by <NAME>
2021-02-02 adopted to LCLS2
"""
import logging
logger = logging.getLogger(__name__)
import numpy as np
from math import fabs
from psana.pyalgos.generic.NDArrUtils import info_ndarr, print_ndarr
def common_mode_rows(arr, mask=None, cormax=None, npix_min=10):
"""Defines and applys common mode correction to 2-d arr for rows.
I/O parameters:
- arr (float) - i/o 2-d array of intensities
- mask (int or None) - the same shape 2-d array of bad/good = 0/1 pixels
- cormax (float or None) - maximal allowed correction in ADU
- npix_min (int) - minimal number of good pixels in row to evaluate and apply correction
"""
rows, cols = arr.shape
if mask is None:
cmode = np.median(arr,axis=1) # column of median values
else:
marr = np.ma.array(arr, mask=mask<1) # use boolean inverted mask (True for masked pixels)
cmode = | np.ma.median(marr,axis=1) | numpy.ma.median |
import numpy as np
class Weno:
def __init__(self):
self.epsilon = 1e-6
def weno(self, NumFl, Fl, L, In, Out):
""" Interface reconstruction using WENO scheme. """
# Built an extenday array with phantom cells to deal with periodicity
#data = np.concatenate((In[-2:], In, In[0:2]))
data = np.concatenate((In[-3:], In, In[0:2])) # FIXME: shift indexes so that there is no need to add 3 points at the left side
# Squared derivatives of the input
dp1m2p1 = np.square(np.convolve(data, np.flipud([1, -2, 1]), 'valid'))
dp1m4p3 = np.square(np.convolve(data, np.flipud([1, -4, 3]), 'valid'))
dp1p0m1 = np.square(np.convolve(data, np.flipud([1, 0, -1]), 'valid'))
dp3m4p1 = np.square(np.convolve(data, | np.flipud([3, -4, 1]) | numpy.flipud |
import argparse
import warnings
# Standard imports
import numpy as np
import pandas as pd
# For the SUPPORT dataset
from pycox.datasets import support
# SDV aspects
# from sdgym.synthesizers import Independent
# from sdv.demo import load_tabular_demo
from sdv.tabular import CopulaGAN, CTGAN, GaussianCopula, TVAE
# Other
from utils import set_seed, support_pre_proc, reverse_transformers
from metrics import distribution_metrics
warnings.filterwarnings("ignore") # We suppress warnings to avoid SDMETRICS throwing unique synthetic data warnings (i.e.
# data in synthetic set is not in the real data set) as well as SKLEARN throwing convergence warnings (pre-processing uses
# GMM from sklearn and this throws non convergence warnings)
set_seed(0)
MODEL_CLASSES = {
"CopulaGAN": CopulaGAN,
"CTGAN": CTGAN,
"GaussianCopula": GaussianCopula,
"TVAE": TVAE,
}
parser = argparse.ArgumentParser()
parser.add_argument(
"--n_runs", default=10, type=int, help="set number of runs/seeds",
)
parser.add_argument(
"--model_type",
default="GaussianCopula",
choices=MODEL_CLASSES.keys(),
type=str,
help="set model for baseline experiment",
)
parser.add_argument(
"--pre_proc_method",
default="GMM",
type=str,
help="Pre-processing method for the dataset. Either GMM or standard. (Gaussian mixture modelling method or standard scaler)",
)
parser.add_argument(
"--save_metrics",
default=False,
type=bool,
help="Set if we want to save the metrics - saved under Metric Breakdown.csv unless changed",
)
parser.add_argument(
"--gower",
default=False,
type=bool,
help="Do you want to calculate the average gower distance",
)
args = parser.parse_args()
n_seeds = args.n_runs
my_seeds = np.random.randint(1e6, size=n_seeds)
data_supp = support.read_df()
# Setup columns
original_continuous_columns = ["duration"] + [f"x{i}" for i in range(7, 15)]
original_categorical_columns = ["event"] + [f"x{i}" for i in range(1, 7)]
#%% -------- Data Pre-Processing -------- #
pre_proc_method = args.pre_proc_method
(
x_train,
data_supp,
reordered_dataframe_columns,
continuous_transformers,
categorical_transformers,
num_categories,
num_continuous,
) = support_pre_proc(data_supp=data_supp, pre_proc_method=pre_proc_method)
data = pd.DataFrame(x_train, columns=reordered_dataframe_columns)
# Define distributional metrics required - for sdv_baselines this is set by default
distributional_metrics = [
"SVCDetection",
"GMLogLikelihood",
"CSTest",
"KSTest",
"KSTestExtended",
"ContinuousKLDivergence",
"DiscreteKLDivergence",
]
# Define lists to contain the metrics achieved on the
# train/generate/evaluate runs
svc = []
gmm = []
cs = []
ks = []
kses = []
contkls = []
disckls = []
if args.gower:
gowers = []
# Perform the train/generate/evaluate runs
for i in range(n_seeds):
set_seed(my_seeds[i])
chosen_model = MODEL_CLASSES[args.model_type]
model = chosen_model() # field_transformers=transformer_dtypes)
print(f"Train + Generate + Evaluate {args.model_type}" f" - Run {i+1}/{n_seeds}")
model.fit(data)
new_data = model.sample(data.shape[0])
# new_data = Independent._fit_sample(data, None)
data_ = data.copy()
# Reverse the transformations
synthetic_supp = reverse_transformers(
synthetic_set=new_data,
data_supp_columns=data_supp.columns,
cont_transformers=continuous_transformers,
cat_transformers=categorical_transformers,
pre_proc_method=pre_proc_method,
)
metrics = distribution_metrics(
gower_bool=args.gower,
distributional_metrics=distributional_metrics,
data_supp=data_supp,
synthetic_supp=synthetic_supp,
categorical_columns=original_categorical_columns,
continuous_columns=original_continuous_columns,
saving_filepath=None,
pre_proc_method=pre_proc_method,
)
list_metrics = [metrics[i] for i in metrics.columns]
# New version has added a lot more evaluation metrics - only use fidelity metrics for now
svc.append(np.array(list_metrics[0]))
gmm.append(np.array(list_metrics[1]))
cs.append(np.array(list_metrics[2]))
ks.append(np.array(list_metrics[3]))
kses.append(np.array(list_metrics[4]))
contkls.append(np.array(list_metrics[5]))
disckls.append(np.array(list_metrics[6]))
if args.gower:
gowers.append(np.array(list_metrics[7]))
svc = np.array(svc)
gmm = np.array(gmm)
cs = | np.array(cs) | numpy.array |
# -*- coding: utf-8 -*-
from __future__ import print_function
import pandas as pd
import numpy as np
import math
import copy
import random
import glob
import os
unit_size = 5
feature_dim = 2048 + 1024
def iou_with_anchors(anchors_min, anchors_max, box_min, box_max):
"""Compute jaccard score between a box and the anchors.
"""
len_anchors = anchors_max - anchors_min
int_xmin = np.maximum(anchors_min, box_min)
int_xmax = np.minimum(anchors_max, box_max)
inter_len = np.maximum(int_xmax - int_xmin, 0.)
union_len = len_anchors - inter_len + box_max - box_min
# print inter_len,union_len
jaccard = np.divide(inter_len, union_len)
return jaccard
def ioa_with_anchors(anchors_min, anchors_max, box_min, box_max):
"""Compute intersection between score a box and the anchors.
"""
len_anchors = anchors_max - anchors_min
int_xmin = np.maximum(anchors_min, box_min)
int_xmax = np.minimum(anchors_max, box_max)
inter_len = np.maximum(int_xmax - int_xmin, 0.)
scores = np.divide(inter_len, len_anchors)
return scores
def getBatchList(numWindow, batch_size, shuffle=True):
## notice that there are some video appear twice in last two batch ##
window_list = range(numWindow)
batch_start_list = [i * batch_size for i in range(len(window_list) / batch_size)]
batch_start_list.append(len(window_list) - batch_size)
if shuffle == True:
random.shuffle(window_list)
batch_window_list = []
for bstart in batch_start_list:
batch_window_list.append(window_list[bstart:(bstart + batch_size)])
return batch_window_list
def getVideoFeature(videoname, subset):
appearance_path = '/home/litao/THUMOS14_ANET_feature/{}_appearance/'.format(subset)
denseflow_path = '/home/litao/THUMOS14_ANET_feature/{}_denseflow/'.format(subset)
rgb_feature = np.load(appearance_path + videoname + '.npy')
flow_feature = np.load(denseflow_path + videoname + '.npy')
return rgb_feature, flow_feature
def getBatchData(window_list, data_dict):
batch_bbox = []
batch_label = []
batch_index = [0]
batch_anchor_feature = []
for idx in window_list:
batch_bbox.extend(data_dict["gt_bbox"][idx])
batch_label.extend(data_dict["gt_label"][idx])
batch_index.append(batch_index[-1] + len(data_dict["gt_bbox"][idx]))
batch_anchor_feature.append(data_dict["feature"][idx])
batch_index = np.array(batch_index)
batch_bbox = np.array(batch_bbox)
batch_label = np.array(batch_label)
batch_anchor_feature = np.array(batch_anchor_feature)
return batch_index, batch_bbox, batch_label, batch_anchor_feature
def getFullData(dataSet):
ii = 0
# dataSet="Test"
annoDf = pd.read_csv("./data/thumos_14_annotations/" + dataSet + "_Annotation.csv")
videoNameList = list(set(annoDf.video.values[:]))
class_real = [0] + [7, 9, 12, 21, 22, 23, 24, 26, 31, 33,
36, 40, 45, 51, 68, 79, 85, 92, 93, 97]
list_data = []
list_gt_bbox = []
list_gt_label = []
for videoName in videoNameList:
print('complete {}/{}.'.format(ii, len(videoNameList)), end='\r')
ii += 1
video_annoDf = annoDf[annoDf.video == videoName]
gt_xmins = video_annoDf.startFrame.values[:]
gt_xmaxs = video_annoDf.endFrame.values[:]
gt_type_idx = video_annoDf.type_idx.values[:]
rgb_feature, flow_feature = getVideoFeature(videoName, dataSet.lower())
numSnippet = min(rgb_feature.shape[0], flow_feature.shape[0])
frameList = [1 + unit_size * i for i in range(numSnippet)]
df_data = np.concatenate((rgb_feature, flow_feature), axis=1)
df_snippet = frameList
window_size = 128
stride = window_size / 2
n_window = (numSnippet + stride - window_size) / stride
windows_start = [i * stride for i in range(n_window)]
if numSnippet < window_size:
windows_start = [0]
tmp_data = np.zeros((window_size - numSnippet, feature_dim))
df_data = np.concatenate((df_data, tmp_data), axis=0)
df_snippet.extend([df_snippet[-1] + unit_size * (i + 1) for i in range(window_size - numSnippet)])
elif numSnippet - windows_start[-1] - window_size > 20:
windows_start.append(numSnippet - window_size)
snippet_xmin = df_snippet
snippet_xmax = df_snippet[1:]
snippet_xmax.append(df_snippet[-1] + unit_size)
for start in windows_start:
tmp_data = df_data[start:start + window_size, :]
tmp_snippets = np.array(df_snippet[start:start + window_size])
tmp_anchor_xmins = snippet_xmin[start:start + window_size]
tmp_anchor_xmaxs = snippet_xmax[start:start + window_size]
tmp_gt_bbox = []
tmp_gt_label = []
tmp_ioa_list = []
for idx in range(len(gt_xmins)):
tmp_ioa = ioa_with_anchors(gt_xmins[idx], gt_xmaxs[idx], tmp_anchor_xmins[0], tmp_anchor_xmaxs[-1])
tmp_ioa_list.append(tmp_ioa)
if tmp_ioa > 0:
corrected_start = max(gt_xmins[idx], tmp_anchor_xmins[0]) - tmp_anchor_xmins[0]
corrected_end = min(gt_xmaxs[idx], tmp_anchor_xmaxs[-1]) - tmp_anchor_xmins[0]
tmp_gt_bbox.append([float(corrected_start) / (window_size * unit_size),
float(corrected_end) / (window_size * unit_size)])
# gt class label
one_hot = [0] * 21
one_hot[class_real.index(gt_type_idx[idx])] = 1
tmp_gt_label.append(one_hot)
# print tmp_ioa_list
if len(tmp_gt_bbox) > 0 and max(tmp_ioa_list) > 0.9:
list_gt_bbox.append(tmp_gt_bbox)
list_gt_label.append(tmp_gt_label)
list_data.append(tmp_data)
dataDict = {"gt_bbox": list_gt_bbox, "gt_label": list_gt_label,
"feature": list_data}
return dataDict
def getVideoData(videoName, subset):
list_data = []
list_snippets = []
rgb_feature, flow_feature = getVideoFeature(videoName, subset)
numSnippet = min(len(rgb_feature), len(flow_feature))
frameList = [1 + unit_size * i for i in range(numSnippet)]
df_data = np.concatenate((rgb_feature, flow_feature), axis=1)
df_snippet = frameList
window_size = 128
stride = window_size / 2
n_window = (numSnippet + stride - window_size) / stride
windows_start = [i * stride for i in range(n_window)]
if numSnippet < window_size:
windows_start = [0]
tmp_data = np.zeros((window_size - numSnippet, feature_dim))
df_data = np.concatenate((df_data, tmp_data), axis=0)
df_snippet.extend([df_snippet[-1] + unit_size * (i + 1) for i in range(window_size - numSnippet)])
else:
windows_start.append(numSnippet - window_size)
for start in windows_start:
tmp_data = df_data[start:start + window_size, :]
tmp_snippets = np.array(df_snippet[start:start + window_size])
list_data.append(tmp_data)
list_snippets.append(tmp_snippets)
list_snippets = np.array(list_snippets)
list_data = | np.array(list_data) | numpy.array |
# -*- coding: utf-8 -*-
"""
Created on Wed May 11 08:08:52 2016
@author: tkc
"""
import re
from collections import defaultdict
import pandas as pd
import numpy as np
import scipy
import scipy.stats
from scipy import optimize
from math import factorial # used by Savgol matrix
from scipy.optimize import curve_fit
#%%
def organizecolumns(df1,mycols):
''' Pass df and template (list of desired columns in desired order) and return reorganized newdf
'''
cols1=df1.columns.tolist()
newdf=df1 # avoids modification of passed df
uniquelist=[i for i in cols1 if i not in mycols]
for i,colname in enumerate(uniquelist): # remove cols from df1 that are absent from df2
# newdf.drop(colname, axis=1, inplace=True) # this modifies both passed and returned dfs
newdf=newdf.drop(colname, axis=1)
newdf=newdf[mycols] # reorder columns based on template df
return newdf
def parseelemlist(elemlist):
'''Find and separate multielement peaks to be averaged (e.g. Fe2 & Fe) from longer string of element peaks
e.g. splits "Mg Fe Fe2 Si" into "Mg Si" and "{Fe,[Fe,Fe2]} dictionary'''
# Strip numbers from strings within list
newlist=[re.match('\D+',i).group(0) for i in elemlist]
# find duplicated peaks (multiple peaks per element)
Multielem = defaultdict(list)
for i, item in enumerate(newlist):
Multielem[item].append(i)
Multielem = {k:v for k,v in Multielem.items() if len(v)>1} # dictionary with duplicated item and list with indices
duplist=list(Multielem.values()) # get list
duplist=[item for sublist in duplist for item in sublist] # single list with positions of duplicated elements
# now alter multipeak elements list to give dict with element and then list of peak for that element
for key,value in Multielem.items():
templist=value # dictionary value is list of elem peak index positions
peaklist=[]
for i, index in enumerate(templist): # create new list with original elem peak from index positions
peaklist.append(elemlist[index])
# now replace list of index positions with elempeak names
Multielem.update({key:peaklist}) # key will be multipeak element string i.e. "Fe"
# finally construct new single elements list with multipeak ones removed (handle each separately)
newelemlist=[]
for i in range(0,len(elemlist)):
if i not in duplist:
newelemlist.append(elemlist[i])
return newelemlist, Multielem
def parseelem2(elemlist, Multielem):
''' After multielement peaks removed, also move secondary peaks used as primary to dict (handle separately)
e.g. splits "S Mg Fe2 Si" into "S Mg Si" and "{Fe,[Fe2]} dictionary; same structure and df output
for averaging of Fe, Fe2, or straight Fe2 or straight Fe'''
# starting elemlist will only have single entries (i.e Ti2 but not Ti & Ti2)
newelemlist=[]
for i, elem in enumerate(elemlist):
if re.search(r'\d',elem): # has number
match=re.search(r'\d',elem)
newkey=elem[0:match.start()]
# store alt quant (i.e. on Ti2) with same structure as multiple quant (Ti & Ti2)
# Another entry in multielement list... makes things easier for later quant comparisons
templist=[] # peakIDs added as list (of length 1)
templist.append(elem) # list containing single string (keeps identical data structure)
Multielem.update({newkey:templist}) # add to existing dictionary for separate handling
else:
newelemlist.append(elemlist[i]) # just copy over
return newelemlist, Multielem # return altered element list and multielem dictionary
def getelemthresholds(elemlist, AESquantparams):
'''get element-dependent significance thresholds for each peak from AESquantparams
return dictionary with element and associated significance level'''
thresholds={} # returns list of element dependent thresholds for this element set
for i, elem in enumerate(elemlist):
# find row in AESquantparams for this element
thiselemdata=AESquantparams[(AESquantparams['element']==elem)]
thiselemdata=thiselemdata.squeeze() # series with this elements params
thresholds.update({elem:thiselemdata.siglevel})
return thresholds
def cloneparamrows(df):
''' Make param log entry for for each areanum - used by calccomposition to correctly process spe files with multiple spatial areas
passed df is usually list of spe files
this solves problem that AugerParamLog only has one entry (despite possibly having multiple distinct areas with different spectra'''
df['Areanumber']=1 # set existing entries as area 1
mycols=df.dtypes.index
newrows=pd.DataFrame(columns=mycols) # blank df for new entries
for index, row in df.iterrows():
numareas=int(df.loc[index]['Areas'])
for i in range(2,numareas+1):
newrow=df.loc[index] # clone this row as series
newrow=newrow.set_value('Areanumber',i)
newrows=newrows.append(newrow)
df=pd.concat([df,newrows], ignore_index=True) # merge new rows with existing ones
df=df.sort_values(['Filenumber','Areanumber'])
return df
def calccomp(df, Integquantlog, elemlist, AESquantparams):
'''Calculate elemental composition of given files based on input element list
threshold - ratio of element peak to noise peak (0 means no threshold applied
load element-dependent significance level from AESquantparams'''
thresholds=getelemthresholds(elemlist, AESquantparams) # Get list of sigma levels for significance/inclusion
# thresholds for both single and multipeak
elemlist, multipeaklist = parseelemlist(elemlist) # list of single peak elements and dict with multipeaks
# check if any of the single peaks are secondary (i.e. quant on Fe2 not main Fe)
elemlist, multipeaklist= parseelem2(elemlist, multipeaklist)
# two element lists needed (elements with one peak and elements with compositions averaged from two peaks i.e. Fe2, Fe3)
# to process compositions from multiple areas, clone rows from spe log (one for each areanum)
df=cloneparamrows(df) # splits single entry for 5 spatial area spe into 5 rows with Areanumber 1-5
df=df.reset_index(drop=True)
df['AESbasis']=0.0 # resets to zero if already present from calcamplitude
mycols=['Filenumber', 'Project', 'Filename', 'FilePath', 'Sample', 'Comments','AESbasis','Areanumber']
for i, elem in enumerate(elemlist): # add columns for basis
df[elem]=0.0 # add col for each element to spelist
df['sig'+elem]=0.0 # copy peak significance (ratio of integrated counts over 1 sigma of background)
df['err'+elem]=0.0 # another for total error in adjusted counts basis
mycols.append(elem)
mycols.append('sig'+elem)
mycols.append('err'+elem)
for i,elem in enumerate(list(multipeaklist.keys())): # get elements (keys) from dict
df[elem]=0.0
df['sig'+elem]=0.0
df['err'+elem]=0.0
mycols.append(elem)
mycols.append('sig'+elem)
mycols.append('err'+elem)
for i, elem in enumerate(elemlist): # now add at.% columns (e.g. %S, %Mg)
colname='%'+elem # at % columns named %S, %Mg, etc.
errname='err%'+elem
mycols.append(colname) # add to column list template
mycols.append(errname)
df[colname]=0.0
df[errname]=0.0
for i,elem in enumerate(list(multipeaklist.keys())): # add multipeak elements
colname='%'+elem # at % columns named %S, %Mg, etc.
errname='err%'+elem
mycols.append(colname) # add to column list template
mycols.append(errname)
df[colname]=0.0
df[errname]=0.0
for i in range(0,len(df)): # loop through all desired spectrum (multiarea ones already have duplicated rows)
filenum=df.iloc[i]['Filenumber']
areanum=df.iloc[i]['Areanumber']
match=Integquantlog[Integquantlog['Filenumber']==filenum] # find integ data for this filenumber
match=match[match['Areanumber']==areanum]
basis=0.0 #
for j, elem in enumerate(elemlist): # handle the single peak elements
temp=match[match['Element']==elem] # finds entry for this element
if len(temp)==1:
# thresholds is dict with required significance level for each element
thisthresh=thresholds.get(elem) # sig level for this element
df=df.set_value(i, 'sig'+elem, temp.iloc[0]['Significance']) # always copy peak significance level
if temp.iloc[0]['Significance']>thisthresh: # if above set threshold then calculate elem's value and add to basis
df=df.set_value(i, elem, temp.iloc[0]['Adjcnts']) # copy adjusted counts of this element
df=df.set_value(i, 'err'+elem, temp.iloc[0]['Erradjcnts'])
basis+=temp.iloc[0]['Adjcnts'] # add this element's value to AES basis
# now handle the multipeak elements (get average value from both peaks)
for key, value in multipeaklist.items(): # key is element (aka colname in df), value is list of peaks in Smdifpeakslog
templist=value # dictionary value is list of elem peak index positions
numlines=len(templist) # this is number of lines that are average (i.e. 2 for Fe&Fe2)
avgval=0.0 # working value for averaged adjamplitude
erravgval=0.0 # combined error from erradjcnts of each line
for k, peak in enumerate(templist): # create new list with original elem peak from index positions
temp=match[match['Element']==peak] # finds integquantlog entry for this peak (match already trimmed to filenum and area)
if len(temp)==1:
thisthresh=thresholds.get(peak) # sig level for this element/peak
df=df.set_value(i, 'sig'+elem, temp.iloc[0]['Significance']) # copy peak significance level
if temp.iloc[0]['Significance']>thisthresh:
avgval+=temp.iloc[0]['Adjcnts']
thiserrperc=temp.iloc[0]['Erradjcnts']/temp.iloc[0]['Adjcnts']**2
erravgval+=thiserrperc # sum of square of relative error
else:
numlines=numlines-1 # if peak is zeroed out and not added, this reduces # peaks in average
if numlines>0: # avoid divbyzero if peak is too small
avgval=avgval/numlines # this is now average basis for given element
erravgval=np.sqrt(erravgval) # sqrt of sum of squares is relative error
df=df.set_value(i, key, avgval) # copy adjusted amplitude of this element
df=df.set_value(i, key+'err', avgval*erravgval) # combined actual error of this elem (as detemined from mulitple lines)
# add value from this element to AESbasis
basis+=avgval
# end of multipeak elements loop
df=df.set_value(i, 'AESbasis', basis) # write total basis value to df
# Now compute at.% for each listed element (incl errors)
for j, elem in enumerate(elemlist):
colname='%'+elem
ratio=df.iloc[i][elem]/df.iloc[i]['AESbasis'] # initialized to zero in cases where peak is below significance threshold
df.set_value(i, colname, ratio)
temp=match[match['Element']==elem] # again find peak entry and get finds entry for this peak
# TODO maybe check threshold again (although element's value will be zero)
if len(temp)==1:
thiserr=temp.iloc[0]['Erradjcnts']
atpercerr=thiserr/df.iloc[i]['AESbasis']
errname='err%'+elem # error column
df.set_value(i, errname, atpercerr) # Writes absolute error in at%
# Also calculate for elements w/ multiple peaks (if present)
for key, value in multipeaklist.items():
templist=value # dictionary value is list of elem peak index positions
numlines=len(templist) # this is number of lines that are average (i.e. 2 for Fe&Fe2)
colname='%'+key
ratio=df.iloc[i][key]/df.iloc[i]['AESbasis']
df.set_value(i, colname, ratio)
# TODO need to propagate errors through Fe & Fe2
errlist=[] # list of errors in % (usually max of two)
for k, peak in enumerate(templist): # create new list with original elem peak from index positions
temp=match[match['Element']==peak] # finds entry for this peak
if len(temp)==1:
if temp.iloc[0]['Adjcnts']>0: # skip negative values
err=temp.iloc[0]['Erradjcnts']/temp.iloc[0]['Adjcnts']
errlist.append(err) # add this to list
# combine errors in quadrature
totalerr=0.0
for j, err in enumerate(errlist):
totalerr+=err**2
totalerr=np.sqrt(totalerr) # percent error in at %
# now get actual error
thisval=df.iloc[i][key] # this is averaged value computed above (possibly zero if below thresholds )
thiserr=thisval*totalerr # error (in Fe) as actual value based on average of multiple peaks
atpercerr=thiserr/df.iloc[i]['AESbasis']
errname='err%'+ key # error column
df.set_value(i, errname, atpercerr) # Writes absolute error in at%
# end of loop calculation for each spectrum
# organize data based on mycols template
df=organizecolumns(df,mycols)
return df
def calcadjcounts(df, AESquantparams, sig=2, kerrors=True):
'''For each elemental peak in interquantlog, calculate or recalcuated adjusted counts using k-factor2 and mass
result stored in adjcnts column and used for subsequent compositional determinations
can change AESquantresults and recalc at any time; sig (aka 2 sigma errors) is default setting
kerrors -- include error associated with kfactor (along with Poisson errors)'''
if 'Adjcnts' not in df:
df['Adjcnts']=0.0 # new column for adjusted amplitude (if not already present)
if 'Erradjcnts' not in df:
df['Erradjcnts']=0.0 # new column for associated error
if 'err%cnts' not in df:
df['err%cnts']=0.0 # percentage error only from counting statistics (not including kfactor err)
if 'err%total' not in df:
df['err%total']=0.0 # percentage error only from counting statistics (not including kfactor err)
# loop for each element, mask df, get appropriate k-factor & mass
df=df.reset_index(drop=True) # go ahead and reset index
elemlist=np.ndarray.tolist(df.Element.unique()) # list of unique elements from df
for i,elem in enumerate(elemlist):
match=AESquantparams[(AESquantparams['element']==elem)]
match=match.reset_index(drop=True)
kfactor2=match.iloc[0]['kfactor2'] # kfactor and mass for this element/peak
errkf2=match.iloc[0]['errkf2'] # percent error in above for integ method
mass=match.iloc[0]['mass']
elemmask=(df['Element']==elem) # mask for this element in loop
for j in range(0,len(df)): # loop and set adjamplitude to amp*kfact/mass
if elemmask[j]==True: # row has this element
newval=df.iloc[j]['Integcounts']*kfactor2/mass
percerr=sig/np.sqrt(df.iloc[j]['Integcounts']) # 2/sqrt(N) is percent error
totalerr=np.sqrt(errkf2**2+percerr**2) # combine in quadrature
err=newval*totalerr # error value is adjusted counts * 2 sig error percentage
df=df.set_value(j,'Adjcnts',newval)
df=df.set_value(j,'err%cnts',percerr)
df=df.set_value(j,'err%total',totalerr)
df=df.set_value(j,'Erradjcnts',err)
return df
''' TESTING
df=lowerfitpeak
'''
def makelinebackground(df, areanum, fitparams):
'''Create linear background under peak region
passed small slice of Augerfile df just peak region and small adjacent background '''
if fitparams[0]=='n/a': # prior linregresss problem
return df # return unmodified file
slope=fitparams[0]
intercept=fitparams[1]
backfitname='Backfit'+str(areanum)
for index,row in df.iterrows(): # blend between lower line and upper line
xval=df.loc[index]['Energy']
yval=slope*xval+intercept
df=df.set_value(index,backfitname,yval)
return df # return same df with interpolated background region added
def makeinterplinebackground(df, areanum, lowerfitparams, upperfitparams):
'''Create interpolated background from lower and upper peak fits
passed small slice of Augerfile df just peak region and small adjacent background '''
# check for n/a values
if lowerfitparams[0]=='n/a' or upperfitparams[0]=='n/a': # prior linregresss problem
return df # return unmodified file
lowslope=lowerfitparams[0]
lowintercept=lowerfitparams[1]
upslope=upperfitparams[0]
upintercept=upperfitparams[1]
backfitname='Backfit'+str(areanum)
if len(df)>0: # entire region passed should have no vals in backfit (only interpolated region)
evstep=1/(len(df)+1)
else:
print('Unspecified error in creating background')
return
startrow=df.iloc[0].name # index of first value
for index,row in df.iterrows(): # blend between lower line and upper line
xval=df.loc[index]['Energy']
yval=(1-evstep*(index-startrow))*(lowslope*xval+lowintercept)+evstep*(index-startrow)*(upslope*xval+upintercept)
df=df.set_value(index,backfitname,yval)
return df # return same df with interpolated background region added
def fitCapeak(df, areanum, elem, AugerFileName):
'''Pass appropriate chunk from Auger spectral dataframe, perform linear fit
return chunk with backfit column added '''
colname='Smcounts'+str(areanum)
backfitname='Backfit'+str(areanum)
xcol=df['Energy']
ycol=df[colname] # Counts1, Counts2 or whatever
# find relative minimum
try:
parabfunc=lambda x, a, b, c: a*x**2 + b*x + c # lambda definition of cubic poly
fitparams, cov =curve_fit(parabfunc, xcol, ycol) # scipy optimize
ss_res=np.dot((ycol-parabfunc(xcol,*fitparams)), (ycol-parabfunc(xcol,*fitparams))) # dot product of diff between data and function
ymean=np.mean(ycol) # mean of dataset
ss_tot=np.dot((ycol-ymean),(ycol-ymean))
R2=1-(ss_res/ss_tot) # coeff of determination
# diagonal of covariance matrix contains variances for fit params
except: # deal with common problems with linregress
print('Fitting error for', elem, ' in file ', AugerFileName)
fitparams=('n/a','n/a','n/a') # return all n/a
R2='n/a'
return df, fitparams, R2
for index,row in df.iterrows():
xval=df.loc[index]['Energy']
yval= fitparams[0] * xval**2+ fitparams[1] * xval + fitparams[2]
df=df.set_value(index, backfitname, yval)
return df, fitparams, R2
def makeCabackground(df, areanum, fitparams):
''' Fill background col of auger spe file with values derived from 2nd order poly fit (pass region under peak
not fitted by fit Ca peak (which only grabs adjacent background)'''
backfitname='Backfit'+str(areanum)
if len(fitparams)!=3: # prior fitting error already reported via print
return df
A=fitparams[0]
B=fitparams[1]
C=fitparams[2]
for index,row in df.iterrows(): # blend between lower line and upper line
xval=df.loc[index]['Energy']
yval=A*xval**2+ B* xval +C
df=df.set_value(index,backfitname,yval)
return df
def fitcubic(df, areanum, elem, AugerFileName):
'''Pass appropriate chunk from Auger spectral dataframe, perform cubic fit
return chunk with backfit column added '''
colname='Smcounts'+str(areanum) # use smoothed data for background fits
backfitname='Backfit'+str(areanum)
xcol=df['Energy']
ycol=df[colname] # Counts1, Counts2 or whatever
# find relative minimum
try:
cubicfunc=lambda x, a, b, c, d: a*x**3 + b*x**2 + c*x + d # lambda definition of cubic poly
fitparams, cov =curve_fit(cubicfunc, xcol, ycol) # scipy optimize
ss_res=np.dot((ycol-cubicfunc(xcol,*fitparams)), (ycol-cubicfunc(xcol,*fitparams))) # dot product of diff between data and function
ymean=np.mean(ycol) # mean of dataset
ss_tot=np.dot((ycol-ymean),(ycol-ymean))
R2=1-(ss_res/ss_tot) # coeff of determination
# TODO insert special handling for failed fits (some R2 threshold)
# Maybe restrictions on curvature
except: # deal with failed fit
print('Fitting error for', elem, ' in file ', AugerFileName)
fitparams=('n/a','n/a','n/a','n/a') # return all n/a
return df, fitparams
for index,row in df.iterrows():
xval=df.loc[index]['Energy']
yval= fitparams[0] * xval**3+ fitparams[1] * xval**2 + fitparams[2] * xval + fitparams[3]
df=df.set_value(index, backfitname, yval)
return df, fitparams, R2
def makecubicbackground(df, areanum, fitparams):
''' Fill background col of auger spe file with values derived from 2nd order poly fit (pass region under peak
not fitted by fit Ca peak (which only grabs adjacent background)'''
backfitname='Backfit'+str(areanum)
if len(fitparams)!=4: # prior fitting error already reported via print
return df
A=fitparams[0]
B=fitparams[1]
C=fitparams[2]
D=fitparams[3]
for index,row in df.iterrows(): # blend between lower line and upper line
xval=df.loc[index]['Energy']
yval= A * xval**3+ B * xval**2 + C * xval + D
df=df.set_value(index,backfitname,yval)
return df
'''
For background fit testing
Augerfile=pd.read_csv('C2010W_18Nov15_12231225.csv')
areanum=1
elem=Elemdata[0][0]
fittype=Elemdata[0][1]
integpeak=Elemdata[0][2]
lower1=Elemdata[0][3]
lower2=Elemdata[0][4]
upper1=Elemdata[0][5]
upper2=Elemdata[0][6]
df=fitregion
Augerfile.to_csv('C2010W_18Nov15_12231225.csv', index=False)
'''
''' TESTING OF BELOW FITS
plt.plot(xcol,ycol,'b-') # actual data in blue
plt.plot(xcol,gaussian(fitparams, xcol),'r-') # Gaussian fit in red
'''
def fitgauss(df, areanum, width, elem, AugerFileName, addgauss=True):
''' Gaussian fit of direct peaks (pass Augerfile just around peaks region
no need to save Gaussian fit, just return width and other params
integwidth pass from AESquantparams value'''
peakname='Peaks'+str(areanum)
# Remove nan values from peak region
df=df.dropna(subset=[peakname]) # remove nan entries from peak
# estimate initial Gaussian parameters from data
if df.empty: # deal with prior failed background fits (no data in this region after dropna
print('Gaussian fitting error for', elem, ' peak in file ', AugerFileName)
fitparams=('n/a','n/a','n/a','n/a') # return all n/a
rsquared='n/a'
ier='n/a'
return df, fitparams, rsquared, ier
xc=df[peakname].idxmax() # estimate center based on peak max index
xc=df.loc[xc]['Energy'] # associated energy value near center
peakarea=df[peakname].sum() # decent area estimate
y0=0 #
params0=[xc,width,peakarea,y0] # initial params list (first guess at gaussian params)
xcol=df['Energy']
ycol=df[peakname] # Counts1, Counts2 or whatever
xcol=xcol.as_matrix() # convert both to numpy matrices
ycol=ycol.as_matrix()
# define standard gaussian funct (xc, width, area and yoffset are init params)
gaussian=lambda params, x: params[3]+params[2]/(params[1]*np.sqrt(2*np.pi))*np.exp(-((x-params[0])**2/(2*params[1]**2)))
# thisgauss= gaussian(params0,xcol)
errfunc=lambda p, xcol, ycol: ycol- gaussian(p,xcol) # lambda error funct definition
# sigma2FWHM = lambda sigma: sigma * sqrt(2 * log(2)) * 2 / sqrt(2) # convert Gaussian widths to FWHM?
try:
fitparams, cov, infodict, mesg, ier =optimize.leastsq(errfunc,params0,args=(xcol,ycol),full_output=True)
ss_err=(infodict['fvec']**2).sum()
ss_tot=((ycol-ycol.mean())**2).sum()
rsquared=1-(ss_err/ss_tot)
except: # fitting problem
print('Gaussian fitting error for', elem, ' peak in file ', AugerFileName)
fitparams=('n/a','n/a','n/a','n/a') # return all n/a
rsquared='n/a'
ier='n/a'
return df, fitparams, rsquared, ier
if addgauss==True:
gaussname="Gauss"+str(areanum)
df[gaussname]='' # add col for gaussian fit
for index,row in df.iterrows():
xval=df.loc[index]['Energy']
yval=fitparams[3]+fitparams[2]/(fitparams[1]*np.sqrt(2*np.pi))*np.exp(-((xval-fitparams[0])**2/(2*fitparams[1]**2)))
df.set_value(index,gaussname,yval)
return df, fitparams, rsquared, ier
''' TESTING
For background fit testing
df=fitregion
Augerfile=pd.read_csv('C2010W_18Nov15_12231225.csv')
areanum=1
elem=Elemdata[1][0]
fittype=Elemdata[1][1]
integpeak=Elemdata[1][2]
lower1=Elemdata[1][3]
lower2=Elemdata[1][4]
upper1=Elemdata[1][5]
upper2=Elemdata[1][6]
integwidth=Elemdata[0][8]
if ier in [1,2,3,4]: print ('true')
'''
def findintegparams(Augerfile, Elements, AESquantparams, Shifts):
'''Grab integration width and expected counts peak position (also incorporates shift from deriv method)'''
halfwidths=[]
peakcenters=[]
Energyvals = Augerfile.Energy # for finding index #s corresponding to energy vals for this spectrum
for i, elem in enumerate(Elements):
thiselem=AESquantparams[AESquantparams['Element']==elem]
if len(thiselem)!=1:
print('WARNING ... AES quant parameters not found for ', elem)
halfwidths.append(4) # default integration width
peakcenters.append('n/a') #
return halfwidths, peakcenters
halfwidths.append(int((thiselem.iloc[0]['integwidth']-1)/2)) # integration uses half-width on either side of center
integpeakeV=thiselem.iloc[0]['negpeak']-thiselem.iloc[0]['integpeak']+Shifts[i] # shift of direct peak (defined relative to deriv peak)
temptuple=min(enumerate(Energyvals), key=lambda x: abs(x[1]-integpeakeV)) # tuple with index of closest and closest value
peakcenters.append(temptuple[0]) # first of tuple is closest index #
return halfwidths, peakcenters
def integpeaks(Augerfile, Backfitparams, areanum, Elements, Shifts, logmatch, AESquantparams):
''' Background fit for each direct peak, shift is list of energy shifts of negpeak (same order as Eledata (opens source spectrum as Augerfile,
fits peak backgrounds above and below using Elemdata, saves background to source csv (overwrites existing fits), also saves linear fit params to logdataframe with position/amplitude/etc;
desired elements out of data range are skipped (in prior findindices function)
backfitparams is all elements but only this Augerfile
'''
#create Smdifpeaks dataframe for temp storage of each peak's params
Backfitparams=Backfitparams.dropna(subset=['Rval1']) # skip integration/Gaussian fit if background fit failed
AugerFileName=logmatch.Filename #
# Create temp df to hold and pass linear fit data
mycols=['Filenumber', 'Filename', 'Filepath', 'Sample', 'Comments', 'Areanumber', 'Element', 'Integcounts',
'Backcounts', 'Significance', 'Xc', 'Width', 'Peakarea', 'Y0','Rsquared','Numchannels']
Integresults=pd.DataFrame(columns=mycols) # empty df for all integ results for elems in this spe file
peakname='Peaks'+str(areanum) # this is counts - background (but only calculated in vicinity of known elemental peaks)
backfitname='Backfit'+str(areanum)
# global shifts from smdifpeaks and local shift based on smoothed 2nd derivative
halfwidths, peakcenters=findintegparams(Augerfile, Elements, AESquantparams, Shifts)
# loop through and fit all peaks for each element in this spatial area
for i, elem in enumerate(Elements):
if i not in Backfitparams.index: # skips integ calc if backfit is n/a
continue
thisbackfit=Backfitparams[Backfitparams['Element']==elem]
if len(thisbackfit)!=1:
print('Problem retrieving fit boundaries for ',elem, ' in ', AugerFileName)
continue
lower1=thisbackfit.iloc[0]['Lower1']
upper2=thisbackfit.iloc[0]['Upper2']
fitregion=Augerfile[lower1:upper2+1]
if fitregion.empty==True: # skip if no data present (already should be skipped in Elemdata)
print('No data present for ', elem, ' in ', AugerFileName)
continue
# also need accurate lower/upper bounds ... available from backfitparams
Integresult=pd.DataFrame(index= | np.arange(0,1) | numpy.arange |
from numpy.testing import (assert_allclose, assert_almost_equal,
assert_array_equal, assert_array_almost_equal_nulp)
import numpy as np
import pytest
import matplotlib.mlab as mlab
from matplotlib.cbook.deprecation import MatplotlibDeprecationWarning
def _stride_repeat(*args, **kwargs):
with pytest.warns(MatplotlibDeprecationWarning):
return mlab.stride_repeat(*args, **kwargs)
class TestStride:
def get_base(self, x):
y = x
while y.base is not None:
y = y.base
return y
def calc_window_target(self, x, NFFT, noverlap=0, axis=0):
"""
This is an adaptation of the original window extraction algorithm.
This is here to test to make sure the new implementation has the same
result.
"""
step = NFFT - noverlap
ind = np.arange(0, len(x) - NFFT + 1, step)
n = len(ind)
result = np.zeros((NFFT, n))
# do the ffts of the slices
for i in range(n):
result[:, i] = x[ind[i]:ind[i]+NFFT]
if axis == 1:
result = result.T
return result
@pytest.mark.parametrize('shape', [(), (10, 1)], ids=['0D', '2D'])
def test_stride_windows_invalid_input_shape(self, shape):
x = np.arange(np.prod(shape)).reshape(shape)
with pytest.raises(ValueError):
mlab.stride_windows(x, 5)
@pytest.mark.parametrize('n, noverlap',
[(0, None), (11, None), (2, 2), (2, 3)],
ids=['n less than 1', 'n greater than input',
'noverlap greater than n',
'noverlap equal to n'])
def test_stride_windows_invalid_params(self, n, noverlap):
x = np.arange(10)
with pytest.raises(ValueError):
mlab.stride_windows(x, n, noverlap)
@pytest.mark.parametrize('shape', [(), (10, 1)], ids=['0D', '2D'])
def test_stride_repeat_invalid_input_shape(self, shape):
x = np.arange(np.prod(shape)).reshape(shape)
with pytest.raises(ValueError):
_stride_repeat(x, 5)
@pytest.mark.parametrize('axis', [-1, 2],
ids=['axis less than 0',
'axis greater than input shape'])
def test_stride_repeat_invalid_axis(self, axis):
x = np.array(0)
with pytest.raises(ValueError):
_stride_repeat(x, 5, axis=axis)
def test_stride_repeat_n_lt_1_ValueError(self):
x = np.arange(10)
with pytest.raises(ValueError):
_stride_repeat(x, 0)
@pytest.mark.parametrize('axis', [0, 1], ids=['axis0', 'axis1'])
@pytest.mark.parametrize('n', [1, 5], ids=['n1', 'n5'])
def test_stride_repeat(self, n, axis):
x = np.arange(10)
y = _stride_repeat(x, n, axis=axis)
expected_shape = [10, 10]
expected_shape[axis] = n
yr = np.repeat(np.expand_dims(x, axis), n, axis=axis)
assert yr.shape == y.shape
assert_array_equal(yr, y)
assert tuple(expected_shape) == y.shape
assert self.get_base(y) is x
@pytest.mark.parametrize('axis', [0, 1], ids=['axis0', 'axis1'])
@pytest.mark.parametrize('n, noverlap',
[(1, 0), (5, 0), (15, 2), (13, -3)],
ids=['n1-noverlap0', 'n5-noverlap0',
'n15-noverlap2', 'n13-noverlapn3'])
def test_stride_windows(self, n, noverlap, axis):
x = np.arange(100)
y = mlab.stride_windows(x, n, noverlap=noverlap, axis=axis)
expected_shape = [0, 0]
expected_shape[axis] = n
expected_shape[1 - axis] = 100 // (n - noverlap)
yt = self.calc_window_target(x, n, noverlap=noverlap, axis=axis)
assert yt.shape == y.shape
assert_array_equal(yt, y)
assert tuple(expected_shape) == y.shape
assert self.get_base(y) is x
@pytest.mark.parametrize('axis', [0, 1], ids=['axis0', 'axis1'])
def test_stride_windows_n32_noverlap0_unflatten(self, axis):
n = 32
x = np.arange(n)[np.newaxis]
x1 = np.tile(x, (21, 1))
x2 = x1.flatten()
y = mlab.stride_windows(x2, n, axis=axis)
if axis == 0:
x1 = x1.T
assert y.shape == x1.shape
assert_array_equal(y, x1)
def test_stride_ensure_integer_type(self):
N = 100
x = np.full(N + 20, np.nan)
y = x[10:-10]
y[:] = 0.3
# previous to #3845 lead to corrupt access
y_strided = mlab.stride_windows(y, n=33, noverlap=0.6)
assert_array_equal(y_strided, 0.3)
# previous to #3845 lead to corrupt access
y_strided = mlab.stride_windows(y, n=33.3, noverlap=0)
assert_array_equal(y_strided, 0.3)
# even previous to #3845 could not find any problematic
# configuration however, let's be sure it's not accidentally
# introduced
y_strided = _stride_repeat(y, n=33.815)
assert_array_equal(y_strided, 0.3)
def _apply_window(*args, **kwargs):
with pytest.warns(MatplotlibDeprecationWarning):
return mlab.apply_window(*args, **kwargs)
class TestWindow:
def setup(self):
np.random.seed(0)
n = 1000
self.sig_rand = np.random.standard_normal(n) + 100.
self.sig_ones = np.ones(n)
def check_window_apply_repeat(self, x, window, NFFT, noverlap):
"""
This is an adaptation of the original window application algorithm.
This is here to test to make sure the new implementation has the same
result.
"""
step = NFFT - noverlap
ind = np.arange(0, len(x) - NFFT + 1, step)
n = len(ind)
result = np.zeros((NFFT, n))
if np.iterable(window):
windowVals = window
else:
windowVals = window(np.ones(NFFT, x.dtype))
# do the ffts of the slices
for i in range(n):
result[:, i] = windowVals * x[ind[i]:ind[i]+NFFT]
return result
def test_window_none_rand(self):
res = mlab.window_none(self.sig_ones)
assert_array_equal(res, self.sig_ones)
def test_window_none_ones(self):
res = mlab.window_none(self.sig_rand)
assert_array_equal(res, self.sig_rand)
def test_window_hanning_rand(self):
targ = np.hanning(len(self.sig_rand)) * self.sig_rand
res = mlab.window_hanning(self.sig_rand)
assert_allclose(targ, res, atol=1e-06)
def test_window_hanning_ones(self):
targ = np.hanning(len(self.sig_ones))
res = mlab.window_hanning(self.sig_ones)
assert_allclose(targ, res, atol=1e-06)
def test_apply_window_1D_axis1_ValueError(self):
x = self.sig_rand
window = mlab.window_hanning
with pytest.raises(ValueError):
_apply_window(x, window, axis=1, return_window=False)
def test_apply_window_1D_els_wrongsize_ValueError(self):
x = self.sig_rand
window = mlab.window_hanning(np.ones(x.shape[0]-1))
with pytest.raises(ValueError):
_apply_window(x, window)
def test_apply_window_0D_ValueError(self):
x = np.array(0)
window = mlab.window_hanning
with pytest.raises(ValueError):
_apply_window(x, window, axis=1, return_window=False)
def test_apply_window_3D_ValueError(self):
x = self.sig_rand[np.newaxis][np.newaxis]
window = mlab.window_hanning
with pytest.raises(ValueError):
_apply_window(x, window, axis=1, return_window=False)
def test_apply_window_hanning_1D(self):
x = self.sig_rand
window = mlab.window_hanning
window1 = mlab.window_hanning(np.ones(x.shape[0]))
y, window2 = _apply_window(x, window, return_window=True)
yt = window(x)
assert yt.shape == y.shape
assert x.shape == y.shape
assert_allclose(yt, y, atol=1e-06)
assert_array_equal(window1, window2)
def test_apply_window_hanning_1D_axis0(self):
x = self.sig_rand
window = mlab.window_hanning
y = _apply_window(x, window, axis=0, return_window=False)
yt = window(x)
assert yt.shape == y.shape
assert x.shape == y.shape
assert_allclose(yt, y, atol=1e-06)
def test_apply_window_hanning_els_1D_axis0(self):
x = self.sig_rand
window = mlab.window_hanning(np.ones(x.shape[0]))
window1 = mlab.window_hanning
y = _apply_window(x, window, axis=0, return_window=False)
yt = window1(x)
assert yt.shape == y.shape
assert x.shape == y.shape
assert_allclose(yt, y, atol=1e-06)
def test_apply_window_hanning_2D_axis0(self):
x = np.random.standard_normal([1000, 10]) + 100.
window = mlab.window_hanning
y = _apply_window(x, window, axis=0, return_window=False)
yt = np.zeros_like(x)
for i in range(x.shape[1]):
yt[:, i] = window(x[:, i])
assert yt.shape == y.shape
assert x.shape == y.shape
assert_allclose(yt, y, atol=1e-06)
def test_apply_window_hanning_els1_2D_axis0(self):
x = np.random.standard_normal([1000, 10]) + 100.
window = mlab.window_hanning(np.ones(x.shape[0]))
window1 = mlab.window_hanning
y = _apply_window(x, window, axis=0, return_window=False)
yt = np.zeros_like(x)
for i in range(x.shape[1]):
yt[:, i] = window1(x[:, i])
assert yt.shape == y.shape
assert x.shape == y.shape
assert_allclose(yt, y, atol=1e-06)
def test_apply_window_hanning_els2_2D_axis0(self):
x = np.random.standard_normal([1000, 10]) + 100.
window = mlab.window_hanning
window1 = mlab.window_hanning(np.ones(x.shape[0]))
y, window2 = _apply_window(x, window, axis=0, return_window=True)
yt = np.zeros_like(x)
for i in range(x.shape[1]):
yt[:, i] = window1*x[:, i]
assert yt.shape == y.shape
assert x.shape == y.shape
assert_allclose(yt, y, atol=1e-06)
assert_array_equal(window1, window2)
def test_apply_window_hanning_els3_2D_axis0(self):
x = np.random.standard_normal([1000, 10]) + 100.
window = mlab.window_hanning
window1 = mlab.window_hanning(np.ones(x.shape[0]))
y, window2 = _apply_window(x, window, axis=0, return_window=True)
yt = _apply_window(x, window1, axis=0, return_window=False)
assert yt.shape == y.shape
assert x.shape == y.shape
assert_allclose(yt, y, atol=1e-06)
assert_array_equal(window1, window2)
def test_apply_window_hanning_2D_axis1(self):
x = np.random.standard_normal([10, 1000]) + 100.
window = mlab.window_hanning
y = _apply_window(x, window, axis=1, return_window=False)
yt = np.zeros_like(x)
for i in range(x.shape[0]):
yt[i, :] = window(x[i, :])
assert yt.shape == y.shape
assert x.shape == y.shape
assert_allclose(yt, y, atol=1e-06)
def test_apply_window_hanning_2D_els1_axis1(self):
x = np.random.standard_normal([10, 1000]) + 100.
window = mlab.window_hanning(np.ones(x.shape[1]))
window1 = mlab.window_hanning
y = _apply_window(x, window, axis=1, return_window=False)
yt = np.zeros_like(x)
for i in range(x.shape[0]):
yt[i, :] = window1(x[i, :])
assert yt.shape == y.shape
assert x.shape == y.shape
assert_allclose(yt, y, atol=1e-06)
def test_apply_window_hanning_2D_els2_axis1(self):
x = np.random.standard_normal([10, 1000]) + 100.
window = mlab.window_hanning
window1 = mlab.window_hanning(np.ones(x.shape[1]))
y, window2 = _apply_window(x, window, axis=1, return_window=True)
yt = np.zeros_like(x)
for i in range(x.shape[0]):
yt[i, :] = window1 * x[i, :]
assert yt.shape == y.shape
assert x.shape == y.shape
assert_allclose(yt, y, atol=1e-06)
assert_array_equal(window1, window2)
def test_apply_window_hanning_2D_els3_axis1(self):
x = np.random.standard_normal([10, 1000]) + 100.
window = mlab.window_hanning
window1 = mlab.window_hanning(np.ones(x.shape[1]))
y = _apply_window(x, window, axis=1, return_window=False)
yt = _apply_window(x, window1, axis=1, return_window=False)
assert yt.shape == y.shape
assert x.shape == y.shape
assert_allclose(yt, y, atol=1e-06)
def test_apply_window_stride_windows_hanning_2D_n13_noverlapn3_axis0(self):
x = self.sig_rand
window = mlab.window_hanning
yi = mlab.stride_windows(x, n=13, noverlap=2, axis=0)
y = _apply_window(yi, window, axis=0, return_window=False)
yt = self.check_window_apply_repeat(x, window, 13, 2)
assert yt.shape == y.shape
assert x.shape != y.shape
assert_allclose(yt, y, atol=1e-06)
def test_apply_window_hanning_2D_stack_axis1(self):
ydata = np.arange(32)
ydata1 = ydata+5
ydata2 = ydata+3.3
ycontrol1 = _apply_window(ydata1, mlab.window_hanning)
ycontrol2 = mlab.window_hanning(ydata2)
ydata = np.vstack([ydata1, ydata2])
ycontrol = np.vstack([ycontrol1, ycontrol2])
ydata = np.tile(ydata, (20, 1))
ycontrol = np.tile(ycontrol, (20, 1))
result = _apply_window(ydata, mlab.window_hanning, axis=1,
return_window=False)
assert_allclose(ycontrol, result, atol=1e-08)
def test_apply_window_hanning_2D_stack_windows_axis1(self):
ydata = np.arange(32)
ydata1 = ydata+5
ydata2 = ydata+3.3
ycontrol1 = _apply_window(ydata1, mlab.window_hanning)
ycontrol2 = mlab.window_hanning(ydata2)
ydata = np.vstack([ydata1, ydata2])
ycontrol = np.vstack([ycontrol1, ycontrol2])
ydata = np.tile(ydata, (20, 1))
ycontrol = np.tile(ycontrol, (20, 1))
result = _apply_window(ydata, mlab.window_hanning, axis=1,
return_window=False)
assert_allclose(ycontrol, result, atol=1e-08)
def test_apply_window_hanning_2D_stack_windows_axis1_unflatten(self):
n = 32
ydata = np.arange(n)
ydata1 = ydata+5
ydata2 = ydata+3.3
ycontrol1 = _apply_window(ydata1, mlab.window_hanning)
ycontrol2 = mlab.window_hanning(ydata2)
ydata = np.vstack([ydata1, ydata2])
ycontrol = np.vstack([ycontrol1, ycontrol2])
ydata = np.tile(ydata, (20, 1))
ycontrol = np.tile(ycontrol, (20, 1))
ydata = ydata.flatten()
ydata1 = mlab.stride_windows(ydata, 32, noverlap=0, axis=0)
result = _apply_window(ydata1, mlab.window_hanning, axis=0,
return_window=False)
assert_allclose(ycontrol.T, result, atol=1e-08)
class TestDetrend:
def setup(self):
np.random.seed(0)
n = 1000
x = np.linspace(0., 100, n)
self.sig_zeros = np.zeros(n)
self.sig_off = self.sig_zeros + 100.
self.sig_slope = np.linspace(-10., 90., n)
self.sig_slope_mean = x - x.mean()
sig_rand = np.random.standard_normal(n)
sig_sin = np.sin(x*2*np.pi/(n/100))
sig_rand -= sig_rand.mean()
sig_sin -= sig_sin.mean()
self.sig_base = sig_rand + sig_sin
self.atol = 1e-08
def test_detrend_none_0D_zeros(self):
input = 0.
targ = input
mlab.detrend_none(input)
assert input == targ
def test_detrend_none_0D_zeros_axis1(self):
input = 0.
targ = input
mlab.detrend_none(input, axis=1)
assert input == targ
def test_detrend_str_none_0D_zeros(self):
input = 0.
targ = input
mlab.detrend(input, key='none')
assert input == targ
def test_detrend_detrend_none_0D_zeros(self):
input = 0.
targ = input
mlab.detrend(input, key=mlab.detrend_none)
assert input == targ
def test_detrend_none_0D_off(self):
input = 5.5
targ = input
mlab.detrend_none(input)
assert input == targ
def test_detrend_none_1D_off(self):
input = self.sig_off
targ = input
res = mlab.detrend_none(input)
assert_array_equal(res, targ)
def test_detrend_none_1D_slope(self):
input = self.sig_slope
targ = input
res = mlab.detrend_none(input)
assert_array_equal(res, targ)
def test_detrend_none_1D_base(self):
input = self.sig_base
targ = input
res = mlab.detrend_none(input)
assert_array_equal(res, targ)
def test_detrend_none_1D_base_slope_off_list(self):
input = self.sig_base + self.sig_slope + self.sig_off
targ = input.tolist()
res = mlab.detrend_none(input.tolist())
assert res == targ
def test_detrend_none_2D(self):
arri = [self.sig_base,
self.sig_base + self.sig_off,
self.sig_base + self.sig_slope,
self.sig_base + self.sig_off + self.sig_slope]
input = np.vstack(arri)
targ = input
res = mlab.detrend_none(input)
assert_array_equal(res, targ)
def test_detrend_none_2D_T(self):
arri = [self.sig_base,
self.sig_base + self.sig_off,
self.sig_base + self.sig_slope,
self.sig_base + self.sig_off + self.sig_slope]
input = np.vstack(arri)
targ = input
res = mlab.detrend_none(input.T)
assert_array_equal(res.T, targ)
def test_detrend_mean_0D_zeros(self):
input = 0.
targ = 0.
res = mlab.detrend_mean(input)
assert_almost_equal(res, targ)
def test_detrend_str_mean_0D_zeros(self):
input = 0.
targ = 0.
res = mlab.detrend(input, key='mean')
assert_almost_equal(res, targ)
def test_detrend_detrend_mean_0D_zeros(self):
input = 0.
targ = 0.
res = mlab.detrend(input, key=mlab.detrend_mean)
assert_almost_equal(res, targ)
def test_detrend_mean_0D_off(self):
input = 5.5
targ = 0.
res = mlab.detrend_mean(input)
assert_almost_equal(res, targ)
def test_detrend_str_mean_0D_off(self):
input = 5.5
targ = 0.
res = mlab.detrend(input, key='mean')
assert_almost_equal(res, targ)
def test_detrend_detrend_mean_0D_off(self):
input = 5.5
targ = 0.
res = mlab.detrend(input, key=mlab.detrend_mean)
assert_almost_equal(res, targ)
def test_detrend_mean_1D_zeros(self):
input = self.sig_zeros
targ = self.sig_zeros
res = mlab.detrend_mean(input)
assert_allclose(res, targ, atol=self.atol)
def test_detrend_mean_1D_base(self):
input = self.sig_base
targ = self.sig_base
res = mlab.detrend_mean(input)
assert_allclose(res, targ, atol=self.atol)
def test_detrend_mean_1D_base_off(self):
input = self.sig_base + self.sig_off
targ = self.sig_base
res = mlab.detrend_mean(input)
assert_allclose(res, targ, atol=self.atol)
def test_detrend_mean_1D_base_slope(self):
input = self.sig_base + self.sig_slope
targ = self.sig_base + self.sig_slope_mean
res = mlab.detrend_mean(input)
assert_allclose(res, targ, atol=self.atol)
def test_detrend_mean_1D_base_slope_off(self):
input = self.sig_base + self.sig_slope + self.sig_off
targ = self.sig_base + self.sig_slope_mean
res = mlab.detrend_mean(input)
assert_allclose(res, targ, atol=1e-08)
def test_detrend_mean_1D_base_slope_off_axis0(self):
input = self.sig_base + self.sig_slope + self.sig_off
targ = self.sig_base + self.sig_slope_mean
res = mlab.detrend_mean(input, axis=0)
assert_allclose(res, targ, atol=1e-08)
def test_detrend_mean_1D_base_slope_off_list(self):
input = self.sig_base + self.sig_slope + self.sig_off
targ = self.sig_base + self.sig_slope_mean
res = mlab.detrend_mean(input.tolist())
assert_allclose(res, targ, atol=1e-08)
def test_detrend_mean_1D_base_slope_off_list_axis0(self):
input = self.sig_base + self.sig_slope + self.sig_off
targ = self.sig_base + self.sig_slope_mean
res = mlab.detrend_mean(input.tolist(), axis=0)
assert_allclose(res, targ, atol=1e-08)
def test_detrend_mean_2D_default(self):
arri = [self.sig_off,
self.sig_base + self.sig_off]
arrt = [self.sig_zeros,
self.sig_base]
input = np.vstack(arri)
targ = np.vstack(arrt)
res = mlab.detrend_mean(input)
assert_allclose(res, targ, atol=1e-08)
def test_detrend_mean_2D_none(self):
arri = [self.sig_off,
self.sig_base + self.sig_off]
arrt = [self.sig_zeros,
self.sig_base]
input = np.vstack(arri)
targ = np.vstack(arrt)
res = mlab.detrend_mean(input, axis=None)
assert_allclose(res, targ,
atol=1e-08)
def test_detrend_mean_2D_none_T(self):
arri = [self.sig_off,
self.sig_base + self.sig_off]
arrt = [self.sig_zeros,
self.sig_base]
input = np.vstack(arri).T
targ = np.vstack(arrt)
res = mlab.detrend_mean(input, axis=None)
assert_allclose(res.T, targ,
atol=1e-08)
def test_detrend_mean_2D_axis0(self):
arri = [self.sig_base,
self.sig_base + self.sig_off,
self.sig_base + self.sig_slope,
self.sig_base + self.sig_off + self.sig_slope]
arrt = [self.sig_base,
self.sig_base,
self.sig_base + self.sig_slope_mean,
self.sig_base + self.sig_slope_mean]
input = np.vstack(arri).T
targ = np.vstack(arrt).T
res = mlab.detrend_mean(input, axis=0)
assert_allclose(res, targ,
atol=1e-08)
def test_detrend_mean_2D_axis1(self):
arri = [self.sig_base,
self.sig_base + self.sig_off,
self.sig_base + self.sig_slope,
self.sig_base + self.sig_off + self.sig_slope]
arrt = [self.sig_base,
self.sig_base,
self.sig_base + self.sig_slope_mean,
self.sig_base + self.sig_slope_mean]
input = np.vstack(arri)
targ = np.vstack(arrt)
res = mlab.detrend_mean(input, axis=1)
assert_allclose(res, targ,
atol=1e-08)
def test_detrend_mean_2D_axism1(self):
arri = [self.sig_base,
self.sig_base + self.sig_off,
self.sig_base + self.sig_slope,
self.sig_base + self.sig_off + self.sig_slope]
arrt = [self.sig_base,
self.sig_base,
self.sig_base + self.sig_slope_mean,
self.sig_base + self.sig_slope_mean]
input = np.vstack(arri)
targ = np.vstack(arrt)
res = mlab.detrend_mean(input, axis=-1)
assert_allclose(res, targ,
atol=1e-08)
def test_detrend_2D_default(self):
arri = [self.sig_off,
self.sig_base + self.sig_off]
arrt = [self.sig_zeros,
self.sig_base]
input = np.vstack(arri)
targ = np.vstack(arrt)
res = mlab.detrend(input)
assert_allclose(res, targ, atol=1e-08)
def test_detrend_2D_none(self):
arri = [self.sig_off,
self.sig_base + self.sig_off]
arrt = [self.sig_zeros,
self.sig_base]
input = np.vstack(arri)
targ = np.vstack(arrt)
res = mlab.detrend(input, axis=None)
assert_allclose(res, targ, atol=1e-08)
def test_detrend_str_mean_2D_axis0(self):
arri = [self.sig_base,
self.sig_base + self.sig_off,
self.sig_base + self.sig_slope,
self.sig_base + self.sig_off + self.sig_slope]
arrt = [self.sig_base,
self.sig_base,
self.sig_base + self.sig_slope_mean,
self.sig_base + self.sig_slope_mean]
input = np.vstack(arri).T
targ = np.vstack(arrt).T
res = mlab.detrend(input, key='mean', axis=0)
assert_allclose(res, targ,
atol=1e-08)
def test_detrend_str_constant_2D_none_T(self):
arri = [self.sig_off,
self.sig_base + self.sig_off]
arrt = [self.sig_zeros,
self.sig_base]
input = np.vstack(arri).T
targ = np.vstack(arrt)
res = mlab.detrend(input, key='constant', axis=None)
assert_allclose(res.T, targ,
atol=1e-08)
def test_detrend_str_default_2D_axis1(self):
arri = [self.sig_base,
self.sig_base + self.sig_off,
self.sig_base + self.sig_slope,
self.sig_base + self.sig_off + self.sig_slope]
arrt = [self.sig_base,
self.sig_base,
self.sig_base + self.sig_slope_mean,
self.sig_base + self.sig_slope_mean]
input = np.vstack(arri)
targ = np.vstack(arrt)
res = mlab.detrend(input, key='default', axis=1)
assert_allclose(res, targ,
atol=1e-08)
def test_detrend_detrend_mean_2D_axis0(self):
arri = [self.sig_base,
self.sig_base + self.sig_off,
self.sig_base + self.sig_slope,
self.sig_base + self.sig_off + self.sig_slope]
arrt = [self.sig_base,
self.sig_base,
self.sig_base + self.sig_slope_mean,
self.sig_base + self.sig_slope_mean]
input = np.vstack(arri).T
targ = np.vstack(arrt).T
res = mlab.detrend(input, key=mlab.detrend_mean, axis=0)
assert_allclose(res, targ,
atol=1e-08)
def test_detrend_bad_key_str_ValueError(self):
input = self.sig_slope[np.newaxis]
with pytest.raises(ValueError):
mlab.detrend(input, key='spam')
def test_detrend_bad_key_var_ValueError(self):
input = self.sig_slope[np.newaxis]
with pytest.raises(ValueError):
mlab.detrend(input, key=5)
def test_detrend_mean_0D_d0_ValueError(self):
input = 5.5
with pytest.raises(ValueError):
mlab.detrend_mean(input, axis=0)
def test_detrend_0D_d0_ValueError(self):
input = 5.5
with pytest.raises(ValueError):
mlab.detrend(input, axis=0)
def test_detrend_mean_1D_d1_ValueError(self):
input = self.sig_slope
with pytest.raises(ValueError):
mlab.detrend_mean(input, axis=1)
def test_detrend_1D_d1_ValueError(self):
input = self.sig_slope
with pytest.raises(ValueError):
mlab.detrend(input, axis=1)
def test_detrend_mean_2D_d2_ValueError(self):
input = self.sig_slope[np.newaxis]
with pytest.raises(ValueError):
mlab.detrend_mean(input, axis=2)
def test_detrend_2D_d2_ValueError(self):
input = self.sig_slope[np.newaxis]
with pytest.raises(ValueError):
mlab.detrend(input, axis=2)
def test_detrend_linear_0D_zeros(self):
input = 0.
targ = 0.
res = mlab.detrend_linear(input)
assert_almost_equal(res, targ)
def test_detrend_linear_0D_off(self):
input = 5.5
targ = 0.
res = mlab.detrend_linear(input)
assert_almost_equal(res, targ)
def test_detrend_str_linear_0D_off(self):
input = 5.5
targ = 0.
res = mlab.detrend(input, key='linear')
assert_almost_equal(res, targ)
def test_detrend_detrend_linear_0D_off(self):
input = 5.5
targ = 0.
res = mlab.detrend(input, key=mlab.detrend_linear)
assert_almost_equal(res, targ)
def test_detrend_linear_1d_off(self):
input = self.sig_off
targ = self.sig_zeros
res = mlab.detrend_linear(input)
assert_allclose(res, targ, atol=self.atol)
def test_detrend_linear_1d_slope(self):
input = self.sig_slope
targ = self.sig_zeros
res = mlab.detrend_linear(input)
assert_allclose(res, targ, atol=self.atol)
def test_detrend_linear_1d_slope_off(self):
input = self.sig_slope + self.sig_off
targ = self.sig_zeros
res = mlab.detrend_linear(input)
assert_allclose(res, targ, atol=self.atol)
def test_detrend_str_linear_1d_slope_off(self):
input = self.sig_slope + self.sig_off
targ = self.sig_zeros
res = mlab.detrend(input, key='linear')
assert_allclose(res, targ, atol=self.atol)
def test_detrend_detrend_linear_1d_slope_off(self):
input = self.sig_slope + self.sig_off
targ = self.sig_zeros
res = mlab.detrend(input, key=mlab.detrend_linear)
assert_allclose(res, targ, atol=self.atol)
def test_detrend_linear_1d_slope_off_list(self):
input = self.sig_slope + self.sig_off
targ = self.sig_zeros
res = mlab.detrend_linear(input.tolist())
assert_allclose(res, targ, atol=self.atol)
def test_detrend_linear_2D_ValueError(self):
input = self.sig_slope[np.newaxis]
with pytest.raises(ValueError):
mlab.detrend_linear(input)
def test_detrend_str_linear_2d_slope_off_axis0(self):
arri = [self.sig_off,
self.sig_slope,
self.sig_slope + self.sig_off]
arrt = [self.sig_zeros,
self.sig_zeros,
self.sig_zeros]
input = np.vstack(arri).T
targ = np.vstack(arrt).T
res = mlab.detrend(input, key='linear', axis=0)
assert_allclose(res, targ, atol=self.atol)
def test_detrend_detrend_linear_1d_slope_off_axis1(self):
arri = [self.sig_off,
self.sig_slope,
self.sig_slope + self.sig_off]
arrt = [self.sig_zeros,
self.sig_zeros,
self.sig_zeros]
input = np.vstack(arri).T
targ = np.vstack(arrt).T
res = mlab.detrend(input, key=mlab.detrend_linear, axis=0)
assert_allclose(res, targ, atol=self.atol)
def test_detrend_str_linear_2d_slope_off_axis0_notranspose(self):
arri = [self.sig_off,
self.sig_slope,
self.sig_slope + self.sig_off]
arrt = [self.sig_zeros,
self.sig_zeros,
self.sig_zeros]
input = np.vstack(arri)
targ = np.vstack(arrt)
res = mlab.detrend(input, key='linear', axis=1)
assert_allclose(res, targ, atol=self.atol)
def test_detrend_detrend_linear_1d_slope_off_axis1_notranspose(self):
arri = [self.sig_off,
self.sig_slope,
self.sig_slope + self.sig_off]
arrt = [self.sig_zeros,
self.sig_zeros,
self.sig_zeros]
input = np.vstack(arri)
targ = np.vstack(arrt)
res = mlab.detrend(input, key=mlab.detrend_linear, axis=1)
assert_allclose(res, targ, atol=self.atol)
@pytest.mark.parametrize('iscomplex', [False, True],
ids=['real', 'complex'], scope='class')
@pytest.mark.parametrize('sides', ['onesided', 'twosided', 'default'],
scope='class')
@pytest.mark.parametrize(
'fstims,len_x,NFFT_density,nover_density,pad_to_density,pad_to_spectrum',
[
([], None, -1, -1, -1, -1),
([4], None, -1, -1, -1, -1),
([4, 5, 10], None, -1, -1, -1, -1),
([], None, None, -1, -1, None),
([], None, -1, -1, None, None),
([], None, None, -1, None, None),
([], 1024, 512, -1, -1, 128),
([], 256, -1, -1, 33, 257),
([], 255, 33, -1, -1, None),
([], 256, 128, -1, 256, 256),
([], None, -1, 32, -1, -1),
],
ids=[
'nosig',
'Fs4',
'FsAll',
'nosig_noNFFT',
'nosig_nopad_to',
'nosig_noNFFT_no_pad_to',
'nosig_trim',
'nosig_odd',
'nosig_oddlen',
'nosig_stretch',
'nosig_overlap',
],
scope='class')
class TestSpectral:
@pytest.fixture(scope='class', autouse=True)
def stim(self, request, fstims, iscomplex, sides, len_x, NFFT_density,
nover_density, pad_to_density, pad_to_spectrum):
Fs = 100.
x = np.arange(0, 10, 1 / Fs)
if len_x is not None:
x = x[:len_x]
# get the stimulus frequencies, defaulting to None
fstims = [Fs / fstim for fstim in fstims]
# get the constants, default to calculated values
if NFFT_density is None:
NFFT_density_real = 256
elif NFFT_density < 0:
NFFT_density_real = NFFT_density = 100
else:
NFFT_density_real = NFFT_density
if nover_density is None:
nover_density_real = 0
elif nover_density < 0:
nover_density_real = nover_density = NFFT_density_real // 2
else:
nover_density_real = nover_density
if pad_to_density is None:
pad_to_density_real = NFFT_density_real
elif pad_to_density < 0:
pad_to_density = int(2**np.ceil(np.log2(NFFT_density_real)))
pad_to_density_real = pad_to_density
else:
pad_to_density_real = pad_to_density
if pad_to_spectrum is None:
pad_to_spectrum_real = len(x)
elif pad_to_spectrum < 0:
pad_to_spectrum_real = pad_to_spectrum = len(x)
else:
pad_to_spectrum_real = pad_to_spectrum
if pad_to_spectrum is None:
NFFT_spectrum_real = NFFT_spectrum = pad_to_spectrum_real
else:
NFFT_spectrum_real = NFFT_spectrum = len(x)
nover_spectrum = 0
NFFT_specgram = NFFT_density
nover_specgram = nover_density
pad_to_specgram = pad_to_density
NFFT_specgram_real = NFFT_density_real
nover_specgram_real = nover_density_real
if sides == 'onesided' or (sides == 'default' and not iscomplex):
# frequencies for specgram, psd, and csd
# need to handle even and odd differently
if pad_to_density_real % 2:
freqs_density = np.linspace(0, Fs / 2,
num=pad_to_density_real,
endpoint=False)[::2]
else:
freqs_density = np.linspace(0, Fs / 2,
num=pad_to_density_real // 2 + 1)
# frequencies for complex, magnitude, angle, and phase spectrums
# need to handle even and odd differently
if pad_to_spectrum_real % 2:
freqs_spectrum = np.linspace(0, Fs / 2,
num=pad_to_spectrum_real,
endpoint=False)[::2]
else:
freqs_spectrum = np.linspace(0, Fs / 2,
num=pad_to_spectrum_real // 2 + 1)
else:
# frequencies for specgram, psd, and csd
# need to handle even and odd differentl
if pad_to_density_real % 2:
freqs_density = np.linspace(-Fs / 2, Fs / 2,
num=2 * pad_to_density_real,
endpoint=False)[1::2]
else:
freqs_density = np.linspace(-Fs / 2, Fs / 2,
num=pad_to_density_real,
endpoint=False)
# frequencies for complex, magnitude, angle, and phase spectrums
# need to handle even and odd differently
if pad_to_spectrum_real % 2:
freqs_spectrum = np.linspace(-Fs / 2, Fs / 2,
num=2 * pad_to_spectrum_real,
endpoint=False)[1::2]
else:
freqs_spectrum = np.linspace(-Fs / 2, Fs / 2,
num=pad_to_spectrum_real,
endpoint=False)
freqs_specgram = freqs_density
# time points for specgram
t_start = NFFT_specgram_real // 2
t_stop = len(x) - NFFT_specgram_real // 2 + 1
t_step = NFFT_specgram_real - nover_specgram_real
t_specgram = x[t_start:t_stop:t_step]
if NFFT_specgram_real % 2:
t_specgram += 1 / Fs / 2
if len(t_specgram) == 0:
t_specgram = np.array([NFFT_specgram_real / (2 * Fs)])
t_spectrum = np.array([NFFT_spectrum_real / (2 * Fs)])
t_density = t_specgram
y = np.zeros_like(x)
for i, fstim in enumerate(fstims):
y += np.sin(fstim * x * np.pi * 2) * 10**i
if iscomplex:
y = y.astype('complex')
# Interestingly, the instance on which this fixture is called is not
# the same as the one on which a test is run. So we need to modify the
# class itself when using a class-scoped fixture.
cls = request.cls
cls.Fs = Fs
cls.sides = sides
cls.fstims = fstims
cls.NFFT_density = NFFT_density
cls.nover_density = nover_density
cls.pad_to_density = pad_to_density
cls.NFFT_spectrum = NFFT_spectrum
cls.nover_spectrum = nover_spectrum
cls.pad_to_spectrum = pad_to_spectrum
cls.NFFT_specgram = NFFT_specgram
cls.nover_specgram = nover_specgram
cls.pad_to_specgram = pad_to_specgram
cls.t_specgram = t_specgram
cls.t_density = t_density
cls.t_spectrum = t_spectrum
cls.y = y
cls.freqs_density = freqs_density
cls.freqs_spectrum = freqs_spectrum
cls.freqs_specgram = freqs_specgram
cls.NFFT_density_real = NFFT_density_real
def check_freqs(self, vals, targfreqs, resfreqs, fstims):
assert resfreqs.argmin() == 0
assert resfreqs.argmax() == len(resfreqs)-1
assert_allclose(resfreqs, targfreqs, atol=1e-06)
for fstim in fstims:
i = np.abs(resfreqs - fstim).argmin()
assert vals[i] > vals[i+2]
assert vals[i] > vals[i-2]
def check_maxfreq(self, spec, fsp, fstims):
# skip the test if there are no frequencies
if len(fstims) == 0:
return
# if twosided, do the test for each side
if fsp.min() < 0:
fspa = np.abs(fsp)
zeroind = fspa.argmin()
self.check_maxfreq(spec[:zeroind], fspa[:zeroind], fstims)
self.check_maxfreq(spec[zeroind:], fspa[zeroind:], fstims)
return
fstimst = fstims[:]
spect = spec.copy()
# go through each peak and make sure it is correctly the maximum peak
while fstimst:
maxind = spect.argmax()
maxfreq = fsp[maxind]
assert_almost_equal(maxfreq, fstimst[-1])
del fstimst[-1]
spect[maxind-5:maxind+5] = 0
def test_spectral_helper_raises(self):
# We don't use parametrize here to handle ``y = self.y``.
for kwargs in [ # Various error conditions:
{"y": self.y+1, "mode": "complex"}, # Modes requiring ``x is y``.
{"y": self.y+1, "mode": "magnitude"},
{"y": self.y+1, "mode": "angle"},
{"y": self.y+1, "mode": "phase"},
{"mode": "spam"}, # Bad mode.
{"y": self.y, "sides": "eggs"}, # Bad sides.
{"y": self.y, "NFFT": 10, "noverlap": 20}, # noverlap > NFFT.
{"NFFT": 10, "noverlap": 10}, # noverlap == NFFT.
{"y": self.y, "NFFT": 10,
"window": np.ones(9)}, # len(win) != NFFT.
]:
with pytest.raises(ValueError):
mlab._spectral_helper(x=self.y, **kwargs)
@pytest.mark.parametrize('mode', ['default', 'psd'])
def test_single_spectrum_helper_unsupported_modes(self, mode):
with pytest.raises(ValueError):
mlab._single_spectrum_helper(x=self.y, mode=mode)
@pytest.mark.parametrize("mode, case", [
("psd", "density"),
("magnitude", "specgram"),
("magnitude", "spectrum"),
])
def test_spectral_helper_psd(self, mode, case):
freqs = getattr(self, f"freqs_{case}")
spec, fsp, t = mlab._spectral_helper(
x=self.y, y=self.y,
NFFT=getattr(self, f"NFFT_{case}"),
Fs=self.Fs,
noverlap=getattr(self, f"nover_{case}"),
pad_to=getattr(self, f"pad_to_{case}"),
sides=self.sides,
mode=mode)
assert_allclose(fsp, freqs, atol=1e-06)
assert_allclose(t, getattr(self, f"t_{case}"), atol=1e-06)
assert spec.shape[0] == freqs.shape[0]
assert spec.shape[1] == getattr(self, f"t_{case}").shape[0]
def test_csd(self):
freqs = self.freqs_density
spec, fsp = mlab.csd(x=self.y, y=self.y+1,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=self.nover_density,
pad_to=self.pad_to_density,
sides=self.sides)
assert_allclose(fsp, freqs, atol=1e-06)
assert spec.shape == freqs.shape
def test_csd_padding(self):
"""Test zero padding of csd()."""
if self.NFFT_density is None: # for derived classes
return
sargs = dict(x=self.y, y=self.y+1, Fs=self.Fs, window=mlab.window_none,
sides=self.sides)
spec0, _ = mlab.csd(NFFT=self.NFFT_density, **sargs)
spec1, _ = mlab.csd(NFFT=self.NFFT_density*2, **sargs)
assert_almost_equal(np.sum(np.conjugate(spec0)*spec0).real,
np.sum(np.conjugate(spec1/2)*spec1/2).real)
def test_psd(self):
freqs = self.freqs_density
spec, fsp = mlab.psd(x=self.y,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=self.nover_density,
pad_to=self.pad_to_density,
sides=self.sides)
assert spec.shape == freqs.shape
self.check_freqs(spec, freqs, fsp, self.fstims)
@pytest.mark.parametrize(
'make_data, detrend',
[(np.zeros, mlab.detrend_mean), (np.zeros, 'mean'),
(np.arange, mlab.detrend_linear), (np.arange, 'linear')])
def test_psd_detrend(self, make_data, detrend):
if self.NFFT_density is None:
return
ydata = make_data(self.NFFT_density)
ydata1 = ydata+5
ydata2 = ydata+3.3
ydata = np.vstack([ydata1, ydata2])
ydata = np.tile(ydata, (20, 1))
ydatab = ydata.T.flatten()
ydata = ydata.flatten()
ycontrol = np.zeros_like(ydata)
spec_g, fsp_g = mlab.psd(x=ydata,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=0,
sides=self.sides,
detrend=detrend)
spec_b, fsp_b = mlab.psd(x=ydatab,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=0,
sides=self.sides,
detrend=detrend)
spec_c, fsp_c = mlab.psd(x=ycontrol,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=0,
sides=self.sides)
assert_array_equal(fsp_g, fsp_c)
assert_array_equal(fsp_b, fsp_c)
assert_allclose(spec_g, spec_c, atol=1e-08)
# these should not be almost equal
with pytest.raises(AssertionError):
assert_allclose(spec_b, spec_c, atol=1e-08)
def test_psd_window_hanning(self):
if self.NFFT_density is None:
return
ydata = np.arange(self.NFFT_density)
ydata1 = ydata+5
ydata2 = ydata+3.3
ycontrol1, windowVals = _apply_window(ydata1,
mlab.window_hanning,
return_window=True)
ycontrol2 = mlab.window_hanning(ydata2)
ydata = np.vstack([ydata1, ydata2])
ycontrol = np.vstack([ycontrol1, ycontrol2])
ydata = np.tile(ydata, (20, 1))
ycontrol = np.tile(ycontrol, (20, 1))
ydatab = ydata.T.flatten()
ydataf = ydata.flatten()
ycontrol = ycontrol.flatten()
spec_g, fsp_g = mlab.psd(x=ydataf,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=0,
sides=self.sides,
window=mlab.window_hanning)
spec_b, fsp_b = mlab.psd(x=ydatab,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=0,
sides=self.sides,
window=mlab.window_hanning)
spec_c, fsp_c = mlab.psd(x=ycontrol,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=0,
sides=self.sides,
window=mlab.window_none)
spec_c *= len(ycontrol1)/(np.abs(windowVals)**2).sum()
assert_array_equal(fsp_g, fsp_c)
assert_array_equal(fsp_b, fsp_c)
assert_allclose(spec_g, spec_c, atol=1e-08)
# these should not be almost equal
with pytest.raises(AssertionError):
assert_allclose(spec_b, spec_c, atol=1e-08)
def test_psd_window_hanning_detrend_linear(self):
if self.NFFT_density is None:
return
ydata = np.arange(self.NFFT_density)
ycontrol = np.zeros(self.NFFT_density)
ydata1 = ydata+5
ydata2 = ydata+3.3
ycontrol1 = ycontrol
ycontrol2 = ycontrol
ycontrol1, windowVals = _apply_window(ycontrol1,
mlab.window_hanning,
return_window=True)
ycontrol2 = mlab.window_hanning(ycontrol2)
ydata = np.vstack([ydata1, ydata2])
ycontrol = np.vstack([ycontrol1, ycontrol2])
ydata = np.tile(ydata, (20, 1))
ycontrol = np.tile(ycontrol, (20, 1))
ydatab = ydata.T.flatten()
ydataf = ydata.flatten()
ycontrol = ycontrol.flatten()
spec_g, fsp_g = mlab.psd(x=ydataf,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=0,
sides=self.sides,
detrend=mlab.detrend_linear,
window=mlab.window_hanning)
spec_b, fsp_b = mlab.psd(x=ydatab,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=0,
sides=self.sides,
detrend=mlab.detrend_linear,
window=mlab.window_hanning)
spec_c, fsp_c = mlab.psd(x=ycontrol,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=0,
sides=self.sides,
window=mlab.window_none)
spec_c *= len(ycontrol1)/(np.abs(windowVals)**2).sum()
assert_array_equal(fsp_g, fsp_c)
assert_array_equal(fsp_b, fsp_c)
assert_allclose(spec_g, spec_c, atol=1e-08)
# these should not be almost equal
with pytest.raises(AssertionError):
assert_allclose(spec_b, spec_c, atol=1e-08)
def test_psd_windowarray(self):
freqs = self.freqs_density
spec, fsp = mlab.psd(x=self.y,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=self.nover_density,
pad_to=self.pad_to_density,
sides=self.sides,
window=np.ones(self.NFFT_density_real))
assert_allclose(fsp, freqs, atol=1e-06)
assert spec.shape == freqs.shape
def test_psd_windowarray_scale_by_freq(self):
win = mlab.window_hanning(np.ones(self.NFFT_density_real))
spec, fsp = mlab.psd(x=self.y,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=self.nover_density,
pad_to=self.pad_to_density,
sides=self.sides,
window=mlab.window_hanning)
spec_s, fsp_s = mlab.psd(x=self.y,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=self.nover_density,
pad_to=self.pad_to_density,
sides=self.sides,
window=mlab.window_hanning,
scale_by_freq=True)
spec_n, fsp_n = mlab.psd(x=self.y,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=self.nover_density,
pad_to=self.pad_to_density,
sides=self.sides,
window=mlab.window_hanning,
scale_by_freq=False)
assert_array_equal(fsp, fsp_s)
assert_array_equal(fsp, fsp_n)
assert_array_equal(spec, spec_s)
assert_allclose(spec_s*(win**2).sum(),
spec_n/self.Fs*win.sum()**2,
atol=1e-08)
@pytest.mark.parametrize(
"kind", ["complex", "magnitude", "angle", "phase"])
def test_spectrum(self, kind):
freqs = self.freqs_spectrum
spec, fsp = getattr(mlab, f"{kind}_spectrum")(
x=self.y,
Fs=self.Fs, sides=self.sides, pad_to=self.pad_to_spectrum)
assert_allclose(fsp, freqs, atol=1e-06)
assert spec.shape == freqs.shape
if kind == "magnitude":
self.check_maxfreq(spec, fsp, self.fstims)
self.check_freqs(spec, freqs, fsp, self.fstims)
@pytest.mark.parametrize(
'kwargs',
[{}, {'mode': 'default'}, {'mode': 'psd'}, {'mode': 'magnitude'},
{'mode': 'complex'}, {'mode': 'angle'}, {'mode': 'phase'}])
def test_specgram(self, kwargs):
freqs = self.freqs_specgram
spec, fsp, t = mlab.specgram(x=self.y,
NFFT=self.NFFT_specgram,
Fs=self.Fs,
noverlap=self.nover_specgram,
pad_to=self.pad_to_specgram,
sides=self.sides,
**kwargs)
if kwargs.get('mode') == 'complex':
spec = np.abs(spec)
specm = np.mean(spec, axis=1)
assert_allclose(fsp, freqs, atol=1e-06)
assert_allclose(t, self.t_specgram, atol=1e-06)
assert spec.shape[0] == freqs.shape[0]
assert spec.shape[1] == self.t_specgram.shape[0]
if kwargs.get('mode') not in ['complex', 'angle', 'phase']:
# using a single freq, so all time slices should be about the same
if np.abs(spec.max()) != 0:
assert_allclose(
np.diff(spec, axis=1).max() / np.abs(spec.max()), 0,
atol=1e-02)
if kwargs.get('mode') not in ['angle', 'phase']:
self.check_freqs(specm, freqs, fsp, self.fstims)
def test_specgram_warn_only1seg(self):
"""Warning should be raised if len(x) <= NFFT."""
with pytest.warns(UserWarning, match="Only one segment is calculated"):
mlab.specgram(x=self.y, NFFT=len(self.y), Fs=self.Fs)
def test_psd_csd_equal(self):
Pxx, freqsxx = mlab.psd(x=self.y,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=self.nover_density,
pad_to=self.pad_to_density,
sides=self.sides)
Pxy, freqsxy = mlab.csd(x=self.y, y=self.y,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=self.nover_density,
pad_to=self.pad_to_density,
sides=self.sides)
assert_array_almost_equal_nulp(Pxx, Pxy)
assert_array_equal(freqsxx, freqsxy)
@pytest.mark.parametrize("mode", ["default", "psd"])
def test_specgram_auto_default_psd_equal(self, mode):
"""
Test that mlab.specgram without mode and with mode 'default' and 'psd'
are all the same.
"""
speca, freqspeca, ta = mlab.specgram(x=self.y,
NFFT=self.NFFT_specgram,
Fs=self.Fs,
noverlap=self.nover_specgram,
pad_to=self.pad_to_specgram,
sides=self.sides)
specb, freqspecb, tb = mlab.specgram(x=self.y,
NFFT=self.NFFT_specgram,
Fs=self.Fs,
noverlap=self.nover_specgram,
pad_to=self.pad_to_specgram,
sides=self.sides,
mode=mode)
assert_array_equal(speca, specb)
assert_array_equal(freqspeca, freqspecb)
assert_array_equal(ta, tb)
@pytest.mark.parametrize(
"mode, conv", [
("magnitude", np.abs),
("angle", np.angle),
("phase", lambda x: np.unwrap(np.angle(x), axis=0))
])
def test_specgram_complex_equivalent(self, mode, conv):
specc, freqspecc, tc = mlab.specgram(x=self.y,
NFFT=self.NFFT_specgram,
Fs=self.Fs,
noverlap=self.nover_specgram,
pad_to=self.pad_to_specgram,
sides=self.sides,
mode='complex')
specm, freqspecm, tm = mlab.specgram(x=self.y,
NFFT=self.NFFT_specgram,
Fs=self.Fs,
noverlap=self.nover_specgram,
pad_to=self.pad_to_specgram,
sides=self.sides,
mode=mode)
assert_array_equal(freqspecc, freqspecm)
assert_array_equal(tc, tm)
assert_allclose(conv(specc), specm, atol=1e-06)
def test_psd_windowarray_equal(self):
win = mlab.window_hanning(np.ones(self.NFFT_density_real))
speca, fspa = mlab.psd(x=self.y,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=self.nover_density,
pad_to=self.pad_to_density,
sides=self.sides,
window=win)
specb, fspb = mlab.psd(x=self.y,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=self.nover_density,
pad_to=self.pad_to_density,
sides=self.sides)
assert_array_equal(fspa, fspb)
assert_allclose(speca, specb, atol=1e-08)
# extra test for cohere...
def test_cohere():
N = 1024
np.random.seed(19680801)
x = np.random.randn(N)
# phase offset
y = np.roll(x, 20)
# high-freq roll-off
y = np.convolve(y, np.ones(20) / 20., mode='same')
cohsq, f = mlab.cohere(x, y, NFFT=256, Fs=2, noverlap=128)
assert_allclose(np.mean(cohsq), 0.837, atol=1.e-3)
assert np.isreal(np.mean(cohsq))
#*****************************************************************
# These Tests where taken from SCIPY with some minor modifications
# this can be retrieved from:
# https://github.com/scipy/scipy/blob/master/scipy/stats/tests/test_kdeoth.py
#*****************************************************************
class TestGaussianKDE:
def test_kde_integer_input(self):
"""Regression test for #1181."""
x1 = np.arange(5)
kde = mlab.GaussianKDE(x1)
y_expected = [0.13480721, 0.18222869, 0.19514935, 0.18222869,
0.13480721]
np.testing.assert_array_almost_equal(kde(x1), y_expected, decimal=6)
def test_gaussian_kde_covariance_caching(self):
x1 = np.array([-7, -5, 1, 4, 5], dtype=float)
xs = np.linspace(-10, 10, num=5)
# These expected values are from scipy 0.10, before some changes to
# gaussian_kde. They were not compared with any external reference.
y_expected = [0.02463386, 0.04689208, 0.05395444, 0.05337754,
0.01664475]
# set it to the default bandwidth.
kde2 = mlab.GaussianKDE(x1, 'scott')
y2 = kde2(xs)
| np.testing.assert_array_almost_equal(y_expected, y2, decimal=7) | numpy.testing.assert_array_almost_equal |
# -*- coding: utf-8 -*--
"""
Created on Tue Oct 23 09:42:24 2018
@author: William
"""
import re #import regex
import os
path_to_cpp = ''
#OS walk to find the cpp compilation
for root, dirs, files in os.walk(".", topdown=False):
for branch in dirs:
if 'ssa_cpp' in branch:
path_to_cpp = os.path.join(root, branch)
if path_to_cpp != '':
try:
cwd = os.getcwd()
os.chdir(path_to_cpp)
import ssa_translation
os.chdir(cwd)
except:
os.chdir(cwd)
try:
from snapgene_reader import snapgene_file_to_dict, snapgene_file_to_seqrecord
except:
pass
import time
import json, codecs
from scipy import sparse
from scipy.stats import pearsonr
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.patches as mpatches
import matplotlib.animation as animation
from matplotlib.collections import PatchCollection
from matplotlib import cm
from matplotlib import gridspec
from matplotlib.patches import Ellipse
#import scipy.stats.trim_mean as tmean
from scipy.stats import kde
try:
from Bio import SeqIO
from Bio import Entrez
except:
print('BioPython is not installed, polling genbank will not be possible')
pass
import translation_models as models
class rSNAPsim():
"""
The Single Molecule Simulator (SMS) provides a python class for running
single molecule mRNA translation simulations
When presented with a valid protein sequence the SMS can find open reading frames
and simulate intensity trajectories from translation of the protein with given fluorescent tags.
*model description*
link to paper here / image
*main functions*
-open_seq_file(filepath), opens a txt or .gb file and gets the sequence
-get_orfs(nt_sequence, min_codons), returns open reading frames of a given
sequence and a minimum codon length per protein
-get_temporal_proteins(), gets the proteins after get_orfs
-analyze_poi(aa_seq,nt_seq), analyzes the proteins of intrest for
codon sensitivity and elongation rates
-__.poi(), class to contain proteins of intrest after analyzed
-run_default(), runs get_orfs, get_temporal proteins, and analyze_poi
with the first protien found in the sequence
*attributes*
**gene_sequence_str** = string of the nucleotide sequence
**tag_dict** = dictionary with various types of fluorescent tag epitopes
**tag_full** = dictionary of full tag sequences
**aa_keys** = amino acid single letter keys
**codon_types** = flag dictionary of which amino acids are set to Wild-type, fast, or slow
**aa_table** = dictionary of amino acids
**aa_table_r** = reverse dictionary (amino acid letters are the keys)
**strGeneCopy** = dictionary of wild-type tRNA copy numbers
**strGeneCopy_fast** = dictionary of fast tRNA copy numbers
**strGeneCopy_slow** = dictionary of slow tRNA copy numbers
**slow_codons_value** = list of slowest codon tRNA copy numbers
**fast_codons_value** = list of fastest codon tRNA copy numbers
**sensitivity_fast_slow** = list of sensitivity for amino acids
**poi** = Class container for proteins of intrest
**orfs** = dictionary of open reading frames with keys 1,2,3
**seq_str** = sequence string
**proteins** = dictionary of proteins detected in the sequence by ORF
**tagged_proteins** = dictionary of proteins that were detected and tagged
*POI*
Protein of intrest has the following attributes:
**aa_seq** = amino acid sequence
**nt_seq** = nucleotide sequence
**gene_length** = length of the gene
**tag_length** = length of the tags
**total_length** = total length of the full amino acid sequence
**name** = name of the gene
**tag_types** = what types of tags does the protien have
**tag_epitopes** = type of tags and epitope lists per tag
**codon_sensitivity** = how sensitive is the protein per amino acid sequence?
**CAI** = codon activation index
**CAI_codons** = means of the codon activation
*ssa*
The ssa container class has the following attributes:
**no_ribosomes** = number of ribosomes
**n_traj** = number of trajectories
**k** = all kelongation rates (calculated from codon sequence)
**no_rib_per_mrna** = number of ribosomes per mRNA strand on average
**rib_density** = ribosome density
**rib_means** = ribosome means
**rib_vec** = raw ribosome location matrix for each trajectory
**intensity_vec** = fluorescence intensities
**time_vec_fixed** = the time vector
**start_time** = the time the simulation was started
**evaluating_inhibitor** = was there an inhibitor present?
**evaluating_frap** = was the simulation subjected to a FRAP test
**time_inhibit** = the time of the perturbation
**autocorr_vec** = autocorrelation vector of intensities
**mean_autocorr** = the average autocorrelations, averaged over trajectories
**error_autocorr** = the standard deviation of the autocorrelation
**dwell_time** = how long do the ribosomes stay on the mRNA strand calculated by the simulation
**ke_sim** = the calculated average elongation rate from the simulations
"""
def __init__(self):
self.gene_sequence_str = ''
self.tag_dict = {'T_SunTag':'EELLSKNYHLENEVARLKK',
'T_Flag':'DYKDDDDK',
'T_Hemagglutinin':'YPYDVPDYA'}
self.tag_colors = {'T_SunTag':'green',
'T_Flag':'blue',
'T_Hemagglutinin':'blue'}
self.tag_full = {'T_Flag':('ATGGACTACAAGGACGACGACGACAAAGGTGAC'
'TACAAAGATGATGACGATAAAGGCGACTATA'
'AGGACGATGACGACAAGGGCGGAAACTCACTGA'
'TCAAGGAAAACATGCGGATGAAGGTGGTGAT'
'GGAGGGCTCCGTGAATGGTCACCAGTTCAAGTG'
'CACCGGAGAGGGAGAGGGAAACCCGTACATG'
'GGAACTCAGACCATGCGCATTAAGGTCATCGAA'
'GGAGGTCCGCTGCCGTTCGCTTTCGATATCC'
'TGGCCACTTCGTTCGGAGGAGGGTCGCGCACGTTC'
'ATCAAGTACCCGAAGGGAATCCCGGACTT'
'CTTTAAGCAGTCATTCCCGGAAGGATTCACTTGGG'
'AACGGGTGACCCGGTATGAAGATGGAGGT'
'GTGGTGACTGTCATGCAAGATACTTCGCTGGAGGATGGG'
'TGCCTCGTGTACCACGTCCAAGTCC'
'GCGGAGTGAATTTCCCGTCCAACGGACCAGTGATGCAG'
'AAAAAGACGAAGGGTTGGGAACCTAA'
'TACTGAAATGATGTACCCCGCAGACGGAGGGCTGAGGG'
'GCTACACCCACATGGCGCTGAAGGTC'
'GACGGAGGAGATTACAAGGATGACGACGATAAGCAACAA'
'GATTACAAAGACGATGATGACAAGG'
'GCCAGCAGGGCGACTACAAGGACGACGACGACAAGCAG'
'CAGGACTACAAAGATGACGATGATAA'
'AGGAGGAGGACATCTGTCCTGTTCGTTCGTGACCACCT'
'ACAGATCAAAGAAAACCGTGGGAAAC'
'ATCAAGATGCCGGGCATTCATGCCGTCGACCACCGCCT'
'GGAGCGGCTCGAAGAATCAGACAATG'
'AGATGTTCGTCGTGCAAAGAGAACATGCCGTGGCCAAGTT'
'CGCGGGACTGGGAGGCGGTGGAGG'
'CGATTACAAAGACGATGATGACAAGGGTGACTATAAAGA'
'CGACGATGACAAAGGGGATTACAAG'
'GATGATGATGATAAGGGAGGCGGTGGATCAGGTGGAG'
'GAGGTTCACTGCAG')}
self.aa_keys = ['A', 'R', 'N', 'D', 'C', 'Q', 'E', 'G', 'H', 'I', 'L', 'K', 'M', 'F',
'P', 'S', 'T', 'W', 'Y', 'V', '*']
self.codon_types = dict(zip(self.aa_keys, np.ones((1, 21)).flatten().astype(int).tolist()))
self.aa_table = {
'ATA':'I', 'ATC':'I', 'ATT':'I', 'ATG':'M',
'ACA':'T', 'ACC':'T', 'ACG':'T', 'ACT':'T',
'AAC':'N', 'AAT':'N', 'AAA':'K', 'AAG':'K',
'AGC':'S', 'AGT':'S', 'AGA':'R', 'AGG':'R',
'CTA':'L', 'CTC':'L', 'CTG':'L', 'CTT':'L',
'CCA':'P', 'CCC':'P', 'CCG':'P', 'CCT':'P',
'CAC':'H', 'CAT':'H', 'CAA':'Q', 'CAG':'Q',
'CGA':'R', 'CGC':'R', 'CGG':'R', 'CGT':'R',
'GTA':'V', 'GTC':'V', 'GTG':'V', 'GTT':'V',
'GCA':'A', 'GCC':'A', 'GCG':'A', 'GCT':'A',
'GAC':'D', 'GAT':'D', 'GAA':'E', 'GAG':'E',
'GGA':'G', 'GGC':'G', 'GGG':'G', 'GGT':'G',
'TCA':'S', 'TCC':'S', 'TCG':'S', 'TCT':'S',
'TTC':'F', 'TTT':'F', 'TTA':'L', 'TTG':'L',
'TAC':'Y', 'TAT':'Y', 'TAA':'*', 'TAG':'*',
'TGC':'C', 'TGT':'C', 'TGA':'*', 'TGG':'W',
'AUA':'I', 'AUC':'I', 'AUU':'I', 'AUG':'M',
'ACA':'T', 'ACC':'T', 'ACG':'T', 'ACU':'T',
'AAC':'N', 'AAU':'N', 'AAA':'K', 'AAG':'K',
'AGC':'S', 'AGU':'S', 'AGA':'R', 'AGG':'R',
'CUA':'L', 'CUC':'L', 'CUG':'L', 'CUU':'L',
'CCA':'P', 'CCC':'P', 'CCG':'P', 'CCU':'P',
'CAC':'H', 'CAU':'H', 'CAA':'Q', 'CAG':'Q',
'CGA':'R', 'CGC':'R', 'CGG':'R', 'CGU':'R',
'GUA':'V', 'GUC':'V', 'GUG':'V', 'GUU':'V',
'GCA':'A', 'GCC':'A', 'GCG':'A', 'GCU':'A',
'GAC':'D', 'GAU':'D', 'GAA':'E', 'GAG':'E',
'GGA':'G', 'GGC':'G', 'GGG':'G', 'GGU':'G',
'UCA':'S', 'UCC':'S', 'UCG':'S', 'UCU':'S',
'UUC':'F', 'UUU':'F', 'UUA':'L', 'UUG':'L',
'UAC':'Y', 'UAU':'Y', 'UAA':'*', 'UAG':'*',
'UGC':'C', 'UGU':'C', 'UGA':'*', 'UGG':'W',}
self.aa_table_r = {'A':['GCA', 'GCC', 'GCG', 'GCT','GCU'],
'R':['CGA', 'CGC', 'CGG', 'CGT','AGG','AGA','CGU'],
'N':['AAC', 'AAT','AAU'],
'D':['GAC', 'GAT','GAU'],
'C':['TGC', 'TGT','UGC','UGU'],
'Q':['CAA', 'CAG'],
'E':['GAA', 'GAG'],
'G':['GGT', 'GGC', 'GGA', 'GGC','GGU'],
'H':['CAC', 'CAT','CAU'],
'I':['ATT', 'ATC', 'ATA','AUU','AUC','AUA'],
'L':['CTA', 'CTC', 'CTG', 'CTT', 'TTA', 'TTG','CUA', 'CUC', 'CUG', 'CUU', 'UUA', 'UUG'],
'K':['AAA', 'AAG'],
'M':['ATG','AUG'],
'F':['TTC', 'TTT','UUC','UUU'],
'P':['CCT', 'CCC', 'CCG', 'CCA','CCU'],
'S':['TCA', 'TCC', 'TCG', 'TCT','AGC','AGT','UCA','UCC','UCG'],
'T':['ACA', 'ACC', 'ACG', 'ACT','ACU'],
'W':['TGG','UGG'],
'Y':['TAT', 'TAC','UAC','UAU'],
'V':['GTA', 'GTC', 'GTT','GTG','GUG','GUU','GUC','GUA'],
'*':['TGA', 'TAG', 'TAA','UGA','UAG','UAA']
}
self.strGeneCopy = {'TTT': 17.6, 'TCT': 15.2, 'TAT': 12.2, 'TGT': 10.6, 'TTC': 20.3,
'TCC': 17.7, 'TAC': 15.3, 'TGC': 12.6, 'TTA': 7.7, 'TCA': 12.2,
'TAA': 1.0, 'TGA': 1.6, 'TTG': 12.9, 'TCG': 4.4, 'TAG': 0.8,
'TGG': 13.2, 'CTT': 13.2, 'CCT': 17.5, 'CAT': 10.9, 'CGT': 4.5,
'CTC': 19.6, 'CCC': 19.8, 'CAC': 15.1, 'CGC': 10.4, 'CTA': 7.2,
'CCA': 16.9, 'CAA': 12.3, 'CGA': 6.2, 'CTG': 39.6, 'CCG': 6.9,
'CAG': 34.2, 'CGG': 11.4, 'ATT': 16.0, 'ACT': 13.1, 'AAT': 17.0,
'AGT': 12.1, 'ATC': 20.8, 'ACC': 18.9, 'AAC': 19.1, 'AGC': 19.5,
'ATA': 7.5, 'ACA': 15.1, 'AAA': 24.4, 'AGA': 12.2, 'ATG': 22.0,
'ACG': 6.1, 'AAG': 31.9, 'AGG': 12.0, 'GTT': 11.0, 'GCT': 18.4,
'GAT': 21.8, 'GGT': 10.8, 'GTC': 14.5, 'GCC': 27.7, 'GAC': 25.1,
'GGC': 22.2, 'GTA': 7.1, 'GCA': 15.8, 'GAA': 29.0, 'GGA': 16.5,
'GTG': 28.1, 'GCG': 7.4, 'GAG': 39.6, 'GGG': 16.5}
# add the U codons
for key in list(self.strGeneCopy.keys()):
if 'T' in key:
val = self.strGeneCopy[key]
newkey = key.replace('T','U')
self.strGeneCopy[newkey] = val
self.strGeneCopy_fast = {'GCT': 27.7, 'GCC': 27.7, 'GCA': 27.7, 'GCG': 27.7, #A
'CGT': 12.2, 'CGC': 12.2, 'CGA': 12.2, 'CGG': 12.2,
'AGA': 12.2, 'AGG': 12.2, # R
'AAT': 19.1, 'AAC': 19.1, #N
'GAT': 25.1, 'GAC': 25.1, # D
'TGT': 12.6, 'TGC': 12.6, # C
'CAA': 34.2, 'CAG': 34.2, # Q
'GAA': 39.6, 'GAG': 39.6, #E
'GGT': 22.2, 'GGC': 22.2, 'GGA': 22.2, 'GGG': 22.2, # G
'CAT': 15.1, 'CAC': 15.1, # H
'ATT': 20.8, 'ATC': 20.8, 'ATA': 20.8, # I
'TTA': 39.6, 'TTG': 39.6, 'CTT': 39.6, 'CTC': 39.6,
'CTA': 39.6, 'CTG': 39.6, # L
'AAA': 31.9, 'AAG': 31.9, # K
'ATG': 22.0, #M
'TTT': 20.3, 'TTC': 20.3, # F
'CCT': 19.8, 'CCC': 19.8, 'CCA': 19.8, 'CCG': 19.8, # P
'TCT': 19.5, 'TCC': 19.5, 'TCA': 19.5, 'TCG': 19.5,
'AGT': 19.5, 'AGC': 19.5, # S
'ACT': 18.9, 'ACC': 18.9, 'ACA': 18.9, 'ACG': 18.9, # T
'TGG': 13.2, #W
'TAT': 15.3, 'TAC': 15.3, # Y
'GTT': 28.1, 'GTC': 28.1, 'GTA':28.1, 'GTG': 28.1, # V
'TAA': 1.6, 'TAG': 1.6, 'TGA':1.6 #STOP
}
for key in list(self.strGeneCopy_fast.keys()):
if 'T' in key:
val = self.strGeneCopy_fast[key]
newkey = key.replace('T','U')
self.strGeneCopy_fast[newkey] = val
self.strGeneCopy_slow = {'GCT': 7.4, 'GCC': 7.4, 'GCA': 7.4, 'GCG': 7.4, #A
'CGT': 4.5, 'CGC': 4.5, 'CGA': 4.5, 'CGG': 4.5,
'AGA':4.5, 'AGG':4.5, #R
'AAT': 17.0, 'AAC':17.0, #%N
'GAT': 21.8, 'GAC': 21.8, #D
'TGT': 10.6, 'TGC':10.6, #C
'CAA': 12.3, 'CAG': 12.3, #Q
'GAA': 29.0, 'GAG': 29.0, #E
'GGT': 10.8, 'GGC': 10.8, 'GGA': 10.8, 'GGG': 10.8, #G
'CAT': 10.9, 'CAC':10.9, #H
'ATT': 7.5, 'ATC': 7.5, 'ATA': 7.5, #I
'TTA': 7.2, 'TTG':7.2, 'CTT': 7.2, 'CTC': 7.2,
'CTA': 7.2, 'CTG': 7.2, #L
'AAA': 24.4, 'AAG': 24.4, #K
'ATG': 22.0, #M
'TTT': 17.6, 'TTC': 17.6, #F
'CCT': 6.9, 'CCC': 6.9, 'CCA': 6.9, 'CCG': 6.9, #P
'TCT': 4.4, 'TCC': 4.4, 'TCA': 4.4, 'TCG': 4.4,
'AGT': 4.4, 'AGC': 4.4, #S
'ACT': 6.1, 'ACC': 6.1, 'ACA': 6.1, 'ACG': 6.1,#T
'TGG': 13.2, #W
'TAT': 12.2, 'TAC': 12.2, #Y
'GTT': 7.1, 'GTC':7.1, 'GTA': 7.1, 'GTG': 7.1, # V
'TAA': 0.8, 'TAG': 0.8, 'TGA': 0.8 #STOP CODON}
}
for key in list(self.strGeneCopy_slow.keys()):
if 'T' in key:
val = self.strGeneCopy_slow[key]
newkey = key.replace('T','U')
self.strGeneCopy_slow[newkey] = val
self.fast_codons_value = [27.7, 12.2, 19.1, 25.1, 12.6, 34.2, 39.6, 22.2, 15.1,
20.8, 39.6, 31.9, 22, 20.3, 19.8, 19.5,
18.9, 13.2, 15.3, 28.1, 1.6]
self.slow_codons_value = [7.4, 4.5, 17, 21.8, 10.6, 12.3, 29, 10.8, 10.9, 7.5, 7.2,
24.4, 22, 17.6, 6.9, 4.4, 6.1, 13.2, 12.2, 7.1, .8]
fullcodonkeys = ['GCT', 'CGT', 'AAT', 'GAT', 'TGT', 'CAA', 'GAA', 'GGT', 'CAT',
'ATT', 'TTA', 'AAA', 'ATG', 'TTT', 'CCT', 'TCT',
'ACT', 'TGG', 'TAT', 'GTT', 'TAA',
'GCU', 'CGU', 'AAU', 'GAU', 'UGU', 'CAA', 'GAA', 'GGU', 'CAU',
'AUU', 'UUA', 'AAA', 'AUG', 'UUU', 'CCU', 'TCU',
'ACU', 'UGG', 'UAU', 'GUU', 'UAA', ]
codonkeys = ['GCT', 'CGT', 'AAT', 'GAT', 'TGT', 'CAA', 'GAA', 'GGT', 'CAT',
'ATT', 'TTA', 'AAA', 'ATG', 'TTT', 'CCT', 'TCT',
'ACT', 'TGG', 'TAT', 'GTT', 'TAA']
self.sensitivity_fast_slow = []
for i in range(len(codonkeys)):
self.sensitivity_fast_slow.append(self.strGeneCopy_fast[codonkeys[i]] / self.strGeneCopy_slow[codonkeys[i]])
def __update_sensitivity(self):
"""
updates sensitivities for the GUI implementation call
"""
self.fast_codons_value = []
for key in self.aa_keys:
values = []
codons = self.aa_table_r[key]
for codon in codons:
values.append(self.strGeneCopy[codon])
self.fast_codons_value.append(max(values))
for codon in codons:
self.strGeneCopy_fast[codon] = max(values)
self.slow_codons_value = []
for key in self.aa_keys:
values = []
codons = self.aa_table_r[key]
for codon in codons:
values.append(self.strGeneCopy_slow[codon])
self.slow_codons_value.append(min(values))
for codon in codons:
self.strGeneCopy_slow[codon] = min(values)
codonkeys = ['GCT', 'CGT', 'AAT', 'GAT', 'TGT', 'CAA', 'GAA', 'GGT', 'CAT', 'ATT',
'TTA', 'AAA', 'ATG', 'TTT', 'CCT', 'TCT', 'ACT', 'TGG', 'TAT', 'GTT', 'TAA']
self.sensitivity_fast_slow = []
for i in range(len(codonkeys)):
self.sensitivity_fast_slow.append(self.strGeneCopy_fast[codonkeys[i]] / self.strGeneCopy_slow[codonkeys[i]])
def load_tags(self):
f= open("custom_tags.txt","r")
raw = f.readlines()
previous_tags = []
for line in raw:
if line != '\n':
previous_tags.append(line)
for line in previous_tags:
custom_tag = line.strip('\n').split('---')
if custom_tag[0] not in self.tag_dict.keys():
self.tag_dict[custom_tag[0]] = custom_tag[2]
self.tag_full[custom_tag[0]] = custom_tag[1]
f.close()
def add_tag(self,nt_seq,name):
'''
add a custom tag sequence
'''
f= open("custom_tags.txt","r")
raw = f.readlines()
previous_tags = []
for line in raw:
if line != '\n':
previous_tags.append(line)
if not set(nt_seq.lower()).issubset( set(['a','t','c','g','u'])):
print('invalid NT sequence')
f.close()
return
aa_seq = self.nt2aa(nt_seq)
newtag =name+'---'+ nt_seq.lower() + '---'+ aa_seq.upper()+'\n'
if newtag not in previous_tags:
previous_tags.append(newtag)
f.close()
f= open("custom_tags.txt","w+")
for item in previous_tags:
f.write('%s' % item)
f.close()
def nt2aa(self, nt_seq):
'''
Translates nucleotides sequences to amino acid sequences
*args*
**nt_seq**, nucleotide sequence as a string
*returns*
**aa_seq**, amino acid sequence as string
'''
aa = ''
for i in range(0, len(nt_seq), 3):
aa += self.aa_table[nt_seq[i:i+3]]
return aa
def get_orfs(self, nt_seq='', min_codons=80):
'''
Returns open reading frames of the nucleotide sequence given
orfs = {'1':[proteins],
'2':[proteins],
'3':[proteins]}
*keyword args*
**nt_seq**, nucleotide sequence as a string. If left blank uses
the self.sequence_str
**min_codons**, minimum amount of codons to be considered
a protein in the open reading frame
'''
if nt_seq == '':
nt_seq = self.sequence_str
allstarts = np.array([m.start() for m in re.finditer('(?=A[TU]G((?:.{3})+?)[TU](?:AG|AA|GA))', nt_seq)])
#allsegments = re.findall('(?=A[TU]G((?:.{3})+?)[TU](?:AG|AA|GA))',self.sequence_str)
allstops = np.array([m.start() for m in re.finditer('(?=[TU](?:AG|AA|GA))', nt_seq)])
start_frames = allstarts%3
stop_frames = allstops%3
min_len = min_codons*3
orf1_starts = allstarts[np.where(start_frames == 0)]
orf2_starts = allstarts[np.where(start_frames == 1)]
orf3_starts = allstarts[np.where(start_frames == 2)]
orf1_stops = allstops[np.where(stop_frames == 0)]
orf2_stops = allstops[np.where(stop_frames == 1)]
orf3_stops = allstops[np.where(stop_frames == 2)]
self.starts = [orf1_starts, orf2_starts, orf3_starts]
self.stops = [orf1_stops, orf2_stops, orf3_stops]
self.orfs = {'1':[], '2':[], '3':[]}
self.orfs = {'1':[], '2':[], '3':[]}
laststop = 0
for start in orf1_starts:
nextstop = orf1_stops[np.where(orf1_stops > start)[0][0]]
if (nextstop - start) > min_len:
if nextstop != laststop:
self.orfs['1'].append((start, nextstop))
laststop = nextstop
laststop = 0
for start in orf2_starts:
nextstop = orf2_stops[np.where(orf2_stops > start)[0][0]]
if (nextstop - start) > min_len:
if nextstop != laststop:
self.orfs['2'].append((start, nextstop))
laststop = nextstop
laststop = 0
for start in orf3_starts:
nextstop = orf3_stops[np.where(orf3_stops > start)[0][0]]
if (nextstop - start) > min_len:
if nextstop != laststop:
self.orfs['3'].append((start, nextstop))
laststop = nextstop
def get_k_construct(self, nt_seq, k_init, k_elong_mean, codon_types=None):
'''
Returns the k_elongation rates of a given nucleotide sequence under constructed conditions
given some sort of key describing which amino acids are slow, fast or natural
*args*
**nt_seq**, nucleotide sequence to get the propensities of
**k_init**, initiation rate of starting translation
**k_elong_mean**, average rate of elongation for the protein translation
*keyword args*
**codon_types**, a dictonary or identifier determining which amino acids are slow, fast or natural
self.codon_types is an example dictionary for the user to change / utilize, if codon_types is left blank
get_k_construct uses this internal dictonary
ex: codon_types = 'slow' or 'rare' all amino acids set to slow
codon_types = 'fast' or 'common' all amino acids set to fast
codon_types = 'natural' all amino acids set to fast
codon_types = {'A':[0], 'T':[2]} A set to slow, T set to fast
codon_types = {'rare':['A','R'],'common':['L']} A and R set to slow, L set to fast
'''
if codon_types == None:
codon_types = self.codon_types
else:
all_natural = dict(zip(self.aa_keys, np.ones((1, 20)).flatten().astype(int).tolist()))
if isinstance(codon_types, str):
if codon_types == 'rare' or codon_types == 'slow':
all_natural = dict(zip(self.aa_keys, np.zeros((1, 20)).flatten().astype(int).tolist()))
if codon_types == 'common' or codon_types == 'fast':
all_natural = dict(zip(self.aa_keys, (2*np.ones((1, 20))).flatten().astype(int).tolist()))
if isinstance(codon_types, dict):
for key in codon_types.keys():
if isinstance(key, str):
if key.lower() not in ['rare', 'common', 'natural']:
if key.upper() in self.aa_keys:
if codon_types[key] in [0, 1, 2]:
all_natural[key] = key
if codon_types[key] in ['rare', 'common', 'natural']:
if codon_types[key] == 'rare':
all_natural[key] = 0
if codon_types[key] == 'common':
all_natural[key] = 2
if codon_types[key] == 'natural':
all_natural[key] = 1
else:
newkeys = codon_types[key]
for newkey in newkeys:
if newkey.upper() in self.aa_keys:
if key.lower() == 'rare':
all_natural[newkey.upper()] = 0
if key.lower() == 'common':
all_natural[newkey.upper()] = 2
if key.lower() == 'natural':
all_natural[newkey.upper()] = 1
if isinstance(key, int):
newkeys = codon_types[key]
for newkey in newkeys:
all_natural[newkey] = key
codon_types = all_natural
aa_seq = self.nt2aa(nt_seq)
tRNA_design = np.zeros((1, len(aa_seq)))
tRNA_norm = np.zeros((1, len(aa_seq)))
seperated_codons = [nt_seq[i:i+3] for i in range(0, len(nt_seq), 3)] #split codons by 3
for i in range(len(seperated_codons)):
tRNA_norm[0, i] = self.strGeneCopy[seperated_codons[i]]
for i in range(len(self.aa_keys)-1):
fs = codon_types[self.aa_keys[i]]
indexes = [m.start() for m in re.finditer(self.aa_keys[i], aa_seq)]
for index in indexes:
if fs == 0:
tRNA_design[0, index] = self.slow_codons_value[i]
if fs == 2:
tRNA_design[0, index] = self.fast_codons_value[i]
if fs == 1:
tRNA_design[0, index] = tRNA_norm[0, index]
tRNA_design[0, -1] = tRNA_norm[0, -1]
mean_tRNA_copynumber = np.mean(list(self.strGeneCopy.values()))
k_elongation_design = (tRNA_design / mean_tRNA_copynumber) * k_elong_mean
all_k_design = [k_init] + k_elongation_design.flatten().tolist() + [k_elong_mean]
return all_k_design
def get_ui(self, nt_seq):
'''
return the ratio of average gene copy number / sequence codon copy number
'''
mean_u = np.mean(self.strGeneCopy.values())
ui = []
for i in range(0, len(nt_seq), 3):
ui.append(mean_u/ self.strGeneCopy[nt_seq[i:i+3]])
return ui
def get_k_3_frame(self,nt_seq,k_elong_mean):
kelongs = []
for n in range(3):
if n !=0:
codons = nt_seq[n:-(3-n)]
else:
codons = nt_seq
genelength = int(len(codons)/3)
seperated_codons = [codons[i:i+3] for i in range(0, len(codons), 3)] #split codons by 3
k_elongation = np.zeros((1, genelength))
tRNA_copynumber = np.zeros((1, genelength))
for i in range(len(seperated_codons)):
tRNA_copynumber[0, i] = self.strGeneCopy[seperated_codons[i]]
mean_tRNA_copynumber = np.mean(list(self.strGeneCopy.values()))
k_elongation = (tRNA_copynumber / mean_tRNA_copynumber) * k_elong_mean
k_elongation.flatten().tolist()[:-1]
kelongs = kelongs + k_elongation.flatten().tolist()[:-1]
return kelongs
def get_k(self, nt_seq, k_init, k_elong_mean):
'''
returns all propensities for a given nucleotide sequence
*args*
**nt_seq**, nucleotide sequence as a string
**k_initiation**, initiation rate of ribosome binding
**k_elong_mean**, average rate of elgonation experimentally found
'''
codons = nt_seq
genelength = int(len(codons)/3)
seperated_codons = [codons[i:i+3] for i in range(0, len(codons), 3)] #split codons by 3
k_elongation = np.zeros((1, genelength))
tRNA_copynumber = np.zeros((1, genelength))
for i in range(len(seperated_codons)):
tRNA_copynumber[0, i] = self.strGeneCopy[seperated_codons[i]]
mean_tRNA_copynumber = np.mean(list(self.strGeneCopy.values()))
k_elongation = (tRNA_copynumber / mean_tRNA_copynumber) * k_elong_mean
all_k = [k_init] + k_elongation.flatten().tolist()[:-1] + [10]
return all_k
def get_temporal_proteins(self):
'''
gets all the temporal proteins after getting the ORFs
__.tagged_proteins = dictionary with keys of tag types and a list of proteins
__.pois = list of proteins of intrest
__.pois_seq = list of nucleotide sequences of proteins of sequences
__.proteins = dictonary with keys of 1 2 or 3 orfs
'''
self.proteins = {'1':[], '2':[], '3':[]}
self.tagged_proteins = {a:[] for a in self.tag_dict.keys()}
self.tagged_protein_seq = {a:[] for a in self.tag_dict.keys()}
for i in range(len(self.orfs)):
for j in range(len(self.orfs[str(i+1)])):
pro = self.nt2aa(self.sequence_str[self.orfs[str(i+1)][j][0]:self.orfs[str(i+1)][j][1]+3])
nt_seq = self.sequence_str[self.orfs[str(i+1)][j][0]:self.orfs[str(i+1)][j][1]+3]
self.proteins[str(i+1)].append(pro)
for tag in self.tag_dict.keys():
if self.tag_dict[tag] in pro:
self.tagged_protein_seq[tag].append(nt_seq)
self.tagged_proteins[tag].append(pro)
tags = 0
for key in self.tagged_proteins.keys():
tags += len(self.tagged_proteins[key])
self.pois = []
self.pois_seq = []
for tag in self.tag_dict.keys():
for i in range(len(self.tagged_proteins[tag])):
if self.tagged_proteins[tag][i] not in self.pois:
self.pois.append(self.tagged_proteins[tag][i])
self.pois_seq.append(self.tagged_protein_seq[tag][i])
if len(self.pois) == 0:
POIs = []
pois_s = []
pois_nt = []
for i in range(len(self.gb_obj.features)):
try:
self.gb_obj.features[i].qualifiers['translation']
if tags == 0:
POIs.append(self.gb_obj.features[i])
pois_s.append(self.nt2aa(self.tag_full['T_Flag']) + self.gb_obj.features[i].qualifiers['translation'][0])
pois_nt.append(self.tag_full['T_Flag'] + str(self.gb_obj.seq)[int(self.gb_obj.features[i].location.start):int(self.gb_obj.features[i].location.end)])
else:
POIs.append(self.gb_obj.features[i])
pois_s.append(self.gb_obj.features[i].qualifiers['translation'][0])
pois_nt.append(str(self.gb_obj.seq)[int(self.gb_obj.features[i].location.start):int(self.gb_obj.features[i].location.end)])
except:
pass
self.pois = pois_s
self.pois_seq = pois_nt
def analyze_poi(self, protein, sequence, epitope_loc = 'front'):
'''
Analyzes the protein of intrest and stores it in __.POI
*args*
**protein**, amino acid sequence as a string
**sequence**, nucleotide sequence that goes with the protein
**epitope_loc**, consider the epitope location as the front, middle or back:
DDYDDK: front: 0, middle: 3, back: 6 for epitope location
'''
self.POI = poi()
self.POI.nt_seq = sequence
self.POI.aa_seq = protein
self.POI.name = self.sequence_name
self.POI.total_length = len(protein)
'''
for key in self.tagged_proteins:
if protein in self.tagged_proteins[key]:
self.POI.tag_types.append(key)
'''
self.POI.tag_types = []
for tag in self.tag_dict.keys():
if self.tag_dict[tag] in protein:
self.POI.tag_types.append(tag)
#''.join(sms.poi[0].split('DYKDDDDK')
self.POI.tag_epitopes = {a:[] for a in self.POI.tag_types}
gs = protein
for i in range(len(self.POI.tag_types)):
try:
nt_tag = self.tag_full[self.POI.tag_types[i]]
aa_tag = self.nt2aa(nt_tag)
except:
epi = self.tag_dict[self.POI.tag_types[i]]
firstep = self.POI.aa_seq.find(epi)
lastep = len(self.POI.aa_seq) - self.POI.aa_seq[::-1].find(epi[::-1])
aa_tag = self.POI.aa_seq[firstep:lastep]
nt_tag = self.POI.nt_seq[3*firstep:3*lastep]
if epitope_loc == 'front':
offset = 0
if epitope_loc == 'middle':
offset = int(len(self.tag_dict[self.POI.tag_types[i]])/2)
if epitope_loc == 'back':
offset = len(self.tag_dict[self.POI.tag_types[i]])
self.POI.tag_epitopes[self.POI.tag_types[i]] = [m.start()+1+offset for m in re.finditer(self.tag_dict[self.POI.tag_types[i]], self.POI.aa_seq)]
gs = gs.replace(aa_tag, '')
self.POI.gene_seq = gs
self.POI.gene_length = len(gs)
codons = []
for i in range(0, len(sequence), 3):
codons.append(sequence[i:i+3])
self.POI.codons = codons
self.POI.codon_sensitivity, self.POI.CAI, self.POI.CAI_codons = self.codon_usage(self.POI.nt_seq)
def open_seq_file(self, seqfile):
'''
Reads a sequence file, either a .txt file or a .gb genbank file
*args*
**seqfile**, sequence file either in txt, gb, gbk format
'''
seq = seqfile
self.sequence_name = ''
if '.dna' in seq:
self.sequence_name = seq[:-4]
try:
seq_record = snapgene_file_to_seqrecord(seq)
except:
print('To read .dna files please install snapegenereader: pip install snapgene_reader - https://github.com/IsaacLuo/SnapGeneFileReader' )
self.sequence_str = seq_record.seq.tostring()
if '.txt' in seq:
with open(seq) as f:
raw = f.readlines()
raw = ''.join(raw)
onlychar = re.split(r'[^A-Za-z]', raw)
validt = ['A', 'G', 'T', 'C']
validu = ['A', 'G', 'U', 'C']
namelen = 0
self.sequence_str = ''
for i in range(len(onlychar)):
section = onlychar[i]
if set(section.upper()) == set(validt):
self.sequence_str += section.upper()
elif set(section.upper()) == set(validu):
self.sequence_str += section.upper()
else:
if len(section)>namelen:
self.sequence_name = section
namelen = len(section)
if '.gb' in seq:
gb_record = SeqIO.read(open(seq, "r"), "genbank")
self.sequence_str = str(gb_record.seq)
self.sequence_name = gb_record.name
self.gb_obj = gb_record
if self.sequence_name == '':
self.sequence_name = seqfile.replace('.txt','')
self.sequence_name = seqfile.replace('.gb','')
def codon_usage(self, nt_seq):
'''
Analyzes codon useage from the nucleotide sequence
*args*
**nt_seq**, nucleotide sequence as a string
*returns*
**codon_sensitivity**, a list of codon sensitivity for the nucleotide sequence
**cai**, cai value
'''
codon_usage = np.zeros((1, 21))
gene_len = len(nt_seq)/3
aa_seq = self.nt2aa(nt_seq)
for i in range(len(self.aa_keys)-1):
codon_usage[0, i] = len(re.findall(self.aa_keys[i], aa_seq))
codon_usage[0, 20] = len(re.findall('\*', aa_seq))
codon_norm = codon_usage/gene_len
codon_sensitivity = np.round(codon_norm*self.sensitivity_fast_slow, 2)
cai_codons = []
for i in range(0, len(nt_seq), 3):
cai_codons.append(self.strGeneCopy[nt_seq[i:i+3]] / self.strGeneCopy_fast[nt_seq[i:i+3]])
cai = self.geomean(cai_codons)
return codon_sensitivity, cai, cai_codons
def get_probvec(self):
'''
returns the probe vectors (epitope positions by codon position) associated with the tagged sequence stored in POI
*returns*
**probe_vec**, cumlative probe intensity vector by codon position. Ex: [0,0,0,0,1,1,1,1,2,2,2,3,3,3 etc]
**probe_loc**, epitope posistion as a binary vector, 1 for epitope pos, 0 for everything else
'''
probePositions = []
keylist = list(self.POI.tag_epitopes.keys())
for n in range(len(keylist)):
probePosition = []
key = keylist[n]
probePosition = probePosition + self.POI.tag_epitopes[key]
if probePosition != []:
probePosition = np.unique(probePosition).tolist()
probePositions.append(probePosition)
genelength = self.POI.total_length
pvfull = np.zeros((1, genelength+1)).astype(int).flatten()
if len(probePositions) > 1:
k = 0
for n in range(len(keylist)):
pv = np.zeros((1, genelength+1)).astype(int).flatten()
key = keylist[n]
probePosition = probePositions[k]
k+=1
if len(self.POI.tag_epitopes[key]) != 0:
for i in range(len(probePosition)):
pv[probePosition[i]:] = i+1
if n > 0:
pvfull = np.vstack((pvfull,pv))
else:
pvfull = pv
else:
probePosition = probePositions[0]
for n in range(len(keylist)):
pv = np.zeros((1, genelength+1)).astype(int).flatten()
key = keylist[n]
if len(self.POI.tag_epitopes[key]) != 0:
for i in range(len(probePosition)):
pv[probePosition[i]:] = i+1
if n > 0:
pvfull = np.vstack((pvfull,pv))
else:
pvfull = pv
numtags = 0
for key in keylist:
if len(self.POI.tag_epitopes[key]) != 0:
numtags += 1
ploc = np.zeros((numtags, self.POI.total_length+1)).astype(int)
numind = 0
for n in range(len(keylist)):
key = keylist[n]
if len(self.POI.tag_epitopes[key]) != 0:
ploc[numind][self.POI.tag_epitopes[key]] = 1
numind += 1
return pvfull, ploc
def simple_model(self, poi, tag, ki,ke):
'''
Simplified model
returns the analytical tau, intensity mean, and intensity variance
calculated from the simplified model
'''
L = poi.total_length #get the total length of the gene
Lm = np.mean(poi.tag_epitopes[tag]) #the mean location of the tag epitopes
L_tag = int((poi.tag_epitopes[tag][-1] - poi.tag_epitopes[tag][0]) / 2)
ke_analytical = L*ke / np.sum(self.get_ui(poi.nt_seq[:-3]))
tau_analytical = L_tag/ke_analytical #analytical tau ie autocovariance time
mean_analytical = ki*tau_analytical* (1.-Lm/float(L)) # mean intensity
var_analytical = ki*tau_analytical* (1.-Lm/float(L))**2 #var intensity
return tau_analytical,mean_analytical,var_analytical
def get_binned_k_emphasize_probes(self,k,bins,pl):
'''
evenly bins elongation rates as best it can.
'''
probe_region_start = np.where(pl > 0)[0]
probe_region_end = np.where(pl > 0)[-1]
binsize = int(np.floor(len(k)/bins))
binned_ks = []
k_binned = np.zeros(bins)
k_lens = np.ones(bins)*binsize
to_redistribute = len(k)%bins
k_lens[-to_redistribute:] = binsize+1
inds = np.hstack(([0.], np.cumsum(k_lens))).astype(int)
for i in range(0,bins):
binned_ks = binned_ks + [k[inds[i]:inds[i+1]].tolist(),]
for i in range(0,bins):
k_binned[i] = np.mean(binned_ks[i])/len(binned_ks[i])
return k_binned,k_lens
def get_binned_k(self,k,bins):
'''
evenly bins elongation rates as best it can.
'''
binsize = int(np.floor(len(k)/bins))
binned_ks = []
k_binned = np.zeros(bins)
k_lens = np.ones(bins)*binsize
to_redistribute = len(k)%bins
k_lens[-to_redistribute:] = binsize+1
inds = np.hstack(([0.], np.cumsum(k_lens))).astype(int)
for i in range(0,bins):
binned_ks = binned_ks + [k[inds[i]:inds[i+1]].tolist(),]
for i in range(0,bins):
k_binned[i] = 1/np.mean(1/np.array(binned_ks[i]))
return k_binned,k_lens
def get_binned_probe_vec(self,probe_loc,bins):
'''
bin the probe vector as even as possible
'''
probe_loc = np.atleast_2d(probe_loc)
binsize = int(np.floor(probe_loc.shape[1]/bins))
probeloc_binned = np.zeros((np.atleast_2d(probe_loc).shape[0],bins))
probe_lens = np.ones((np.atleast_2d(probe_loc).shape[0],bins))*binsize
to_redistribute = len(probe_loc)%bins
np.atleast_2d(probe_loc).shape[0]
probe_lens[-to_redistribute:] = binsize+1
inds = np.hstack(([0.], np.cumsum(probe_lens,axis=1)[0,:])).astype(int)
for i in range(0,bins):
probeloc_binned[:,i] = np.sum(probe_loc[:,inds[i]:inds[i+1]],axis=1)
probevec_binned = np.cumsum(probeloc_binned,axis=1)
return probevec_binned.astype(int), probeloc_binned.astype(int)
def ssa_binned(self,nt_seq=None, bins = 50,all_k=None, k_elong_mean=10, k_initiation=.03, probePosition=[], n_traj=100, tf=1000, start_time=0, tstep=1000, time_inhibit=0, evaluating_frap=False, evaluating_inhibitor=False,force_python = False):
if nt_seq == None: #get sequence if none was passed
nt_seq = self.POI.nt_seq
genelength = int(len(nt_seq)/3)
if len(probePosition) == 0:
pv,probePosition = self.get_probvec()
if all_k == None: # build the k vector if one was not provided
codons = nt_seq
genelength = int(len(codons)/3)
seperated_codons = [codons[i:i+3] for i in range(0, len(codons), 3)] #split codons by 3
k_elongation = np.zeros((1, genelength))
tRNA_copynumber = np.zeros((1, genelength))
for i in range(len(seperated_codons)):
tRNA_copynumber[0, i] = self.strGeneCopy[seperated_codons[i]]
mean_tRNA_copynumber = np.mean(list(self.strGeneCopy.values()))
k_elongation = (tRNA_copynumber / mean_tRNA_copynumber) * k_elong_mean
all_k = [k_initiation] + k_elongation.flatten().tolist()[:-1] + [10]
kbin,klen = self.get_binned_k(k_elongation.flatten()[:-1],bins)
all_k = [k_initiation] + kbin.flatten().tolist() #
pv,probePosition = self.get_binned_probe_vec(probePosition,bins)
footprint = 0
if isinstance(probePosition,list):
probePosition = np.array([probePosition]).astype(int)
ssa_obj = self.__solve_ssa(genelength, all_k,pv,probePosition,n_traj, tf, start_time, tstep, time_inhibit, evaluating_frap, evaluating_inhibitor,force_python,footprint)
return ssa_obj
def ssa_solver(self, nt_seq=None, all_k=None, k_elong_mean=10, k_initiation=.03, probePosition=[], n_traj=100, tf=1000, start_time=0, tstep=1000, time_inhibit=0, evaluating_frap=False, evaluating_inhibitor=False,force_python = False,N_rib=200):
'''
Solve stochastic simulation algorithms (SSA) for the translation simulation.
*keyword args*
**nt_seq**, nucleotide sequence to simulate
**all_k**, the propensity rates for each codon location (obtained via get_k)
**k_elong_mean**, average elongation rate to normalize by
**k_initiation**, rate of mRNA translation initiation
**probePosition**, binary vector of probe positions, i.e. where the tag epitopes start by codon position
**n_traj**, number of trajectories
**tf**, final time point
**tstep**, number of time steps to record from 0 to tf
**time_inhibit**, inhibition time of translation either, harringtonine assay or FRAP
**evaluating_frap**, true or false for evaluating frap assay at time_inhibit
**evaluating_inhibitor**, true or false for evaluating harringtonine at time_inhibit
*returns*
**ssa_obj**, a ssa() class containing the raw ribosome posistions simulated and statistics such as intensity vectors from the SSA trajectory group
'''
if len(probePosition) == 0:
'''
try:
probePosition = []
for key in self.POI.tag_epitopes.keys():
probePosition = probePosition + self.POI.tag_epitopes[key]
probePosition = np.unique(probePosition).tolist()
except:
print('No POI found')
#nt_seq = self.tag_full['T_flag'] + nt_seq
'''
pv,probePosition = self.get_probvec()
if nt_seq == None:
nt_seq = self.POI.nt_seq
genelength = int(len(nt_seq)/3)
if all_k == None:
codons = nt_seq
genelength = int(len(codons)/3)
seperated_codons = [codons[i:i+3] for i in range(0, len(codons), 3)] #split codons by 3
k_elongation = np.zeros((1, genelength))
tRNA_copynumber = np.zeros((1, genelength))
for i in range(len(seperated_codons)):
tRNA_copynumber[0, i] = self.strGeneCopy[seperated_codons[i]]
mean_tRNA_copynumber = np.mean(list(self.strGeneCopy.values()))
k_elongation = (tRNA_copynumber / mean_tRNA_copynumber) * k_elong_mean
all_k = [k_initiation] + k_elongation.flatten().tolist()[:-1] + [10]
if isinstance(probePosition,list):
probePosition = np.array([probePosition]).astype(int)
footprint = 9
ssa_obj = self.__solve_ssa(genelength, all_k,pv,probePosition,n_traj, tf, start_time, tstep, time_inhibit, evaluating_frap, evaluating_inhibitor,force_python, footprint, N_rib)
return ssa_obj
def build_ODE(self,k,t,kbind, pl):
m = models.TranslateCorrs()
m.N = len(k)
m.tf = t[-1]
m.ptimes = len(t)
m.ke = k
#m.ke = 13.567*np.ones(kelong[1:].shape[0])
# m.ke[0] = 0.0
#m.kb = kelong[0]
m.kb = kbind
m.fi = 1
m.ti = t[0]
print(m.__dict__)
# Solve correlations
print("*****SOLVING MOMENT EQUATIONS*****")
m.binary = pl
start = time.time()
m.csolve()
solve_time = time.time()-start
print("Time to solve: %f" %solve_time)
print("Done.")
mean_I = m.map_to_fluorescence3(m.mu_ss)
var_I = m.map_to_fluorescence(m.var_ss)
print(mean_I)
print(var_I)
return m.tvec,np.ravel((m.intensity)/var_I), m.soln,m
def __solve_ssa(self,genelength,all_k,pv,probePosition,n_traj, tf, start_time, tstep, time_inhibit, evaluating_frap, evaluating_inhibitor,force_python,footprint,N_rib):
non_consider_time = start_time
'''
if probePosition.shape[0] <= 1:
pv = np.zeros((1, genelength+1)).astype(int).flatten()
for i in range(len(probePosition[0])):
pv[probePosition[0][i]:] = i+1
else:
pv = np.zeros((probePosition.shape[0], genelength+1)).astype(int)
for j in range(probePosition.shape[0]):
for i in range(len(probePosition)):
pv[j][probePosition[j][i]:] = i+1
'''
npoints = tstep #non_consider_time + tstep
time_vec_fixed = np.linspace(0, npoints-1, npoints, dtype=np.float64)
truetime = np.linspace(0, tf, tstep, dtype=np.float64)
rib_vec = []
solutions = []
evf = int(evaluating_frap)
evi = int(evaluating_inhibitor)
try:
intime = float(time_inhibit)
except:
intime = 0
# if evaluating_frap == True or evaluating_inhibitor == True:
# for i in range(nRepetitions):
#
# soln = self.SSA(all_k,time_vec_fixed,inhibit_time=time_inhibit+non_consider_time,FRAP=evaluating_frap,Inhibitor=evaluating_inhibitor)
# solutions.append(soln)
# else:
solutionssave = []
st = time.time()
#try:
if force_python == True:
st[0]
rib_vec = []
solutions = []
solutionssave = []
#N_rib = 200
all_results = np.zeros((n_traj, N_rib*len(time_vec_fixed)), dtype=np.int32)
all_ribtimes = np.zeros((n_traj,int(1.3*all_k[0]*truetime[-1])),dtype=np.float64)
result = np.zeros((len(time_vec_fixed)*N_rib), dtype=np.int32)
nribs = np.array([0],dtype=np.int32)
k = np.array(all_k)
seeds = np.random.randint(0, 0x7FFFFFF, n_traj)
all_frapresults = np.zeros((n_traj,N_rib*len(time_vec_fixed)),dtype=np.int32)
all_collisions = np.zeros((n_traj,int(1.3*all_k[0]*truetime[-1])),dtype=np.int32)
all_nribs = np.zeros((n_traj,1))
all_col_points = []
x0 = np.zeros((N_rib),dtype=np.int32)
for i in range(n_traj):
result = np.zeros((len(time_vec_fixed)*N_rib), dtype=np.int32)
ribtimes = np.zeros((int(1.3*k[0]*truetime[-1])),dtype=np.float64)
frapresult = np.zeros((len(time_vec_fixed)*N_rib),dtype=np.int32)
coltimes = np.zeros((int(1.3*k[0]*truetime[-1])),dtype=np.int32)
colpointsx = np.zeros(len(k[1:-1])*(int(1.3*k[0]*truetime[-1])),dtype=np.int32)
colpointst = np.zeros(len(k[1:-1])*(int(1.3*k[0]*truetime[-1])),dtype=np.float64)
nribs = np.array([0],dtype=np.int32)
ssa_translation.run_SSA(result, ribtimes, coltimes, colpointsx,colpointst, k[1:-1],frapresult, truetime, k[0], k[-1], evf, evi, intime, seeds[i],nribs,x0,footprint, N_rib)
#ssa_translation.run_SSA(result, ribtimes, coltimes, k[1:-1],frapresult, truetime, k[0], k[-1], evf, evi, intime, seeds[i],nribs)
all_results[i, :] = result
all_frapresults[i,:] = frapresult
all_ribtimes[i,:] = ribtimes
all_collisions[i,:] = coltimes
all_nribs[i,:] = nribs
endcolrec = np.where(colpointsx == 0)[0][0]
colpoints = np.vstack((colpointsx[:endcolrec],colpointst[:endcolrec]))
all_col_points.append(colpoints.T)
for i in range(n_traj):
soln = all_results[i, :].reshape((N_rib, len(time_vec_fixed)))
validind = np.where(np.sum(soln,axis=1)!=0)[0]
if np.max(validind) != N_rib-1:
validind = np.append(np.where(np.sum(soln,axis=1)!=0)[0],np.max(validind)+1)
so = soln[(validind,)]
solutionssave.append(so)
solutions.append(soln)
collisions = np.array([[]])
watched_ribs = []
for i in range(n_traj):
totalrib = all_nribs[i]
if totalrib > all_collisions.shape[1]:
collisions = np.append(collisions, all_collisions[i][:])
watched_ribs.append(int(all_collisions.shape[1]))
else:
collisions = np.append(collisions, all_collisions[i][:int(totalrib[0])])
watched_ribs.append(int(totalrib[0]))
sttime = time.time() - st
# except:
#
# print('C++ library failed, Using Python Implementation')
# rib_vec = []
#
# solutions = []
# solutionssave = []
# N_rib = 200
# collisions = np.array([[]])
# all_results = np.zeros((n_traj, N_rib*len(time_vec_fixed)), dtype=np.int32)
# all_col_points = []
# watched_ribs = []
# for i in range(n_traj):
#
# soln,all_ribtimes,Ncol,col_points = self.SSA(all_k, truetime, inhibit_time=time_inhibit+non_consider_time, FRAP=evaluating_frap, Inhibitor=evaluating_inhibitor)
# #soln = soln.reshape((1, (len(time_vec_fixed)*N_rib)))
#
# collisions = np.append(collisions,Ncol)
# watched_ribs.append(int(len(collisions)))
# validind = np.where(np.sum(soln,axis=1)!=0)[0]
# all_col_points.append(np.array(col_points))
# if np.max(validind) != N_rib-1:
# validind = np.append(np.where(np.sum(soln,axis=1)!=0)[0],np.max(validind)+1)
#
# so = soln[(validind,)]
#
# solutionssave.append(so)
#
# solutions.append(soln)
#
# result = soln.reshape((1, (len(time_vec_fixed)*N_rib)))
# all_results[i, :] = result
#
# sttime = time.time() - st
#
#
# #rb = sparse.lil_matrix((len(time_vec_fixed),genelength),dtype=int)
# #for j in range(soln.shape[1]):
#
# #if len(np.where(soln[:,j]!=0)[0]) !=0:
# #print(np.where(soln[:,j]!=0)[0])
#
#
# #rb[j,np.where(soln[:,j]!=0)[0]] = 1
#
#
# #for value in soln[:,j][np.where(soln[:,j]!=0)[0]].astype(int):
#
# #rb[j, value-1] = 1
#
# #rib_vec.append(rb)
#
#
no_ribosomes = np.zeros((n_traj, (genelength+1)))
startindex = np.where(truetime >= non_consider_time)[0][0]
#all_results = all_results[:,startindex*N_rib:]
for i in range(len(solutions)):
for j in range(len(solutions[0][0][startindex:])):
rib_pos = solutions[i][startindex:, j][np.nonzero(solutions[i][startindex:, j])]
no_ribosomes[i, rib_pos.astype(int)] += 1
no_ribosomes = no_ribosomes[:, 1:]
ribosome_means = np.mean(no_ribosomes, axis=0)
ribosome_density = ribosome_means/npoints
no_ribosomes_per_mrna = np.mean(no_ribosomes)
if probePosition.shape[0] <=1:
I = np.zeros((n_traj, len(time_vec_fixed[startindex:])))
else:
I = np.zeros((int(probePosition.shape[0]),n_traj, len(time_vec_fixed[startindex:])))
#I = np.zeros((1,tstep+1))
if evaluating_frap == False:
if probePosition.shape[0] <=1:
for i in range(n_traj):
traj = all_results[i, :].reshape((N_rib, len(time_vec_fixed))).T
I[i, :] = np.sum(np.multiply(pv.flatten()[traj], traj>0), axis=1)[startindex:].T
else:
for j in range(probePosition.shape[0]):
for i in range(n_traj):
traj = all_results[i, :].reshape((N_rib, len(time_vec_fixed))).T
I[j,i, :] = np.sum(pv[j][traj], axis=1)[startindex:].T
intensity_vec = I
else:
fraptime = time_inhibit
inds = np.where(truetime > fraptime)
inds2 = np.where(truetime < fraptime+20)
inds = np.intersect1d(inds,inds2)
endfrap = inds[-1]-1
for i in range(n_traj):
traj = all_results[i, :].reshape((N_rib, len(time_vec_fixed))).T
nribs = np.sum(solutionssave[i][:,endfrap]!=0)
#ribloc = solutionssave[i][:,endfrap]
#adj_pv = pv[solutionssave[i][:,inds[-1]][:nribs]]
frap_app = 20
revI = self.get_negative_intensity(traj,genelength,pv,truetime,fraptime+start_time,fraptime+start_time+frap_app)
I[i, :] = np.sum(pv[traj], axis=1)[startindex:].T
I[i,inds[0]:inds[0]+20] = 0
#I[i,endfrap-startindex:] = np.sum(pv[traj],axis=1)[endfrap-startindex:].T
I[i,inds[0]+frap_app:len(revI)+inds[0]+frap_app] = I[i,inds[0]+frap_app:len(revI)+inds[0]+frap_app] + revI
intensity_vec = I
ssa_obj = ssa()
ssa_obj.no_ribosomes = no_ribosomes
ssa_obj.n_traj = n_traj
ssa_obj.k = all_k
ssa_obj.no_rib_per_mrna = no_ribosomes_per_mrna
ssa_obj.rib_density = ribosome_density
ssa_obj.rib_means = ribosome_means
ssa_obj.rib_vec = rib_vec
ssa_obj.intensity_vec = intensity_vec
ssa_obj.time_vec_fixed = time_vec_fixed
ssa_obj.time = truetime
ssa_obj.time_rec = truetime[startindex:]
ssa_obj.start_time = non_consider_time
ssa_obj.watched_ribs = watched_ribs
try:
ssa_obj.col_points = all_col_points
except:
pass
ssa_obj.evaluating_inhibitor = evaluating_inhibitor
ssa_obj.evaluating_frap = evaluating_frap
ssa_obj.time_inhibit = time_inhibit
ssa_obj.solutions = solutionssave
ssa_obj.solvetime = sttime
ssa_obj.collisions = collisions
try:
ssa_obj.ribtimes = all_ribtimes[np.where(all_ribtimes > 0)]
except:
pass
#solt = solutions.T
fragmented_trajectories = []
fragtimes = []
maxlen = 0
fragmentspertraj= []
for k in range(n_traj):
ind = np.array([next(j for j in range(0,solutions[k].shape[0]) if int(solutions[k][j, i]) == 0 or int(solutions[k][j, i]) == -1) for i in range(0, solutions[k].shape[1])])
changes = ind[1:] - ind[:-1]
addindexes = np.where(changes > 0)[0]
subindexes = np.where(changes < 0)[0]
sub = solutions[k][:,1:] - solutions[k][:,:-1]
neutralindexes = np.unique(np.where(sub < 0)[1])
neutralindexes = np.setxor1d(neutralindexes, subindexes)
for index in neutralindexes:
pre = solutions[k][:,index]
post = solutions[k][:,index+1]
changecount = 0
while len(np.where(post - pre < 0)[0]) > 0:
post = np.append([genelength],post)
pre = np.append(pre,0)
changecount+=1
for i in range(changecount):
addindexes = np.sort(np.append(addindexes,index))
subindexes = np.sort(np.append(subindexes,index))
changes[index] = -changecount
ind[index] += changecount
for index in np.where(np.abs(changes)>1)[0]:
if changes[index] < 0:
for i in range(np.abs(changes[index])-1):
subindexes = np.sort(np.append(subindexes,index))
else:
for i in range(np.abs(changes[index])-1):
addindexes = np.sort(np.append(addindexes,index))
truefrags = len(subindexes)
if len(subindexes) < len(addindexes):
subindexes = np.append(subindexes, (np.ones((len(addindexes)-len(subindexes)))*(len(truetime)-1)).astype(int))
fragmentspertraj.append(len(subindexes))
for m in range(min(len(subindexes),len(addindexes))):
traj = solutions[k][:, addindexes[m]:subindexes[m]+1]
traj_ind = changes[addindexes[m]:subindexes[m]+1]
startind = ind[addindexes[m]]
minusloc = [0] + np.where(traj_ind < 0)[0].astype(int).tolist()
fragment = np.array([])
iterind = startind
if subindexes[m]-addindexes[m] > 0:
if len(minusloc) > 1:
if m <= truefrags:
for n in range(len(minusloc)-1):
iterind = iterind + min(0,traj_ind[minusloc[n]])
fragment = np.append(fragment, traj[iterind, minusloc[n]+1:minusloc[n+1]+1].flatten())
fragment = np.append(fragment, traj[0, minusloc[-1]+1:].flatten())
else:
for n in range(len(minusloc)-1):
iterind = iterind + min(0,traj_ind[minusloc[n]])
fragment = np.append(fragment, traj[iterind, minusloc[n]+1:minusloc[n+1]+1].flatten())
fragment = np.append(fragment, traj[m-truefrags, minusloc[-1]+1:].flatten())
else:
fragment = solutions[k][startind][addindexes[m]:subindexes[m]+1].flatten()
fragtimes.append(addindexes[m]+1)
fragmented_trajectories.append(fragment)
#if m <= truefrags:
#kes.append(genelength/truetime[len(fragment)])
if len(fragment) > maxlen:
maxlen = len(fragment)
fragarray = np.zeros((len(fragmented_trajectories), maxlen))
for i in range(len(fragmented_trajectories)):
fragarray[i][0:len(fragmented_trajectories[i])] = fragmented_trajectories[i]
ssa_obj.fragments = fragarray
ssa_obj.fragtimes = fragtimes
ssa_obj.frag_per_traj = fragmentspertraj
ssa_obj.full_frags = truefrags
ssa_obj.all_results = all_results
if probePosition.shape[0] > 1:
for i in range(probePosition.shape[0]):
if i > 0:
autocorr_vec2, mean_autocorr2, error_autocorr2, dwelltime2, ke_sim2 = self.get_autocorr(intensity_vec[i], truetime, 0, genelength)
autocorr_vec = np.vstack((autocorr_vec,autocorr_vec2))
mean_autocorr = np.vstack((mean_autocorr,mean_autocorr2))
error_autocorr = np.vstack((error_autocorr,error_autocorr2))
dwelltime.append(dwelltime2)
ke_sim.append(ke_sim2)
else:
autocorr_vec, mean_autocorr, error_autocorr, dwelltime, ke_sim = self.get_autocorr(intensity_vec[i], truetime, 0, genelength)
autocorr_vec_norm, mean_autocorr_norm, error_autocorr_norm, dwelltime, ke_sim = self.get_autocorr_norm(intensity_vec[i], truetime, 0, genelength)
dwelltime = [dwelltime]
ke_sim = [ke_sim]
else:
autocorr_vec, mean_autocorr, error_autocorr, dwelltime, ke_sim = self.get_autocorr(intensity_vec, truetime, 0, genelength)
autocorr_vec_norm, mean_autocorr_norm, error_autocorr_norm, dwelltime, ke_sim = self.get_autocorr_norm(intensity_vec, truetime, 0, genelength)
acov,nacov = self.get_all_autocovariances(intensity_vec,truetime,genelength )
ssa_obj.autocorr_vec = autocorr_vec
ssa_obj.mean_autocorr = mean_autocorr
ssa_obj.error_autocorr = error_autocorr
ssa_obj.autocorr_vec_norm = autocorr_vec_norm
ssa_obj.mean_autocorr_norm = mean_autocorr_norm
ssa_obj.error_autocorr_norm = error_autocorr_norm
ssa_obj.dwelltime = dwelltime
ssa_obj.ke_sim = ke_sim
ssa_obj.ke_true = float(genelength)/np.mean(ssa_obj.ribtimes)
ssa_obj.probe = probePosition
try:
ssa_obj.autocovariance_dict = acov
ssa_obj.autocovariance_norm_dict = nacov
except:
pass
return ssa_obj
def get_negative_intensity(self,solution,gene_length,pv,tvec,ti,stop_frap):
startindex = np.where(tvec >= ti)[0][0]
stop_frap = np.where(tvec >= stop_frap)[0][0]
solution = solution.T
fragmented_trajectories = []
fragtimes = []
endfragtimes = []
maxlen = 0
fragmentspertraj= []
ind = np.array([next(j for j in range(0,solution.shape[0]) if int(solution[j, i]) == 0 or int(solution[j, i]) == -1) for i in range(0, solution.shape[1])])
changes = ind[1:] - ind[:-1]
addindexes = np.where(changes > 0)[0]
subindexes = np.where(changes < 0)[0]
sub = solution[:,1:] - solution[:,:-1]
neutralindexes = np.unique(np.where(sub < 0)[1])
neutralindexes = np.setxor1d(neutralindexes, subindexes)
for index in neutralindexes:
pre = solution[:,index]
post = solution[:,index+1]
changecount = 0
while len(np.where(post - pre < 0)[0]) > 0:
post = np.append([gene_length],post)
pre = np.append(pre,0)
changecount+=1
for i in range(changecount):
addindexes = np.sort(np.append(addindexes,index))
subindexes = np.sort(np.append(subindexes,index))
changes[index] = -changecount
ind[index] += changecount
for index in np.where(np.abs(changes)>1)[0]:
if changes[index] < 0:
for i in range(np.abs(changes[index])-1):
subindexes = np.sort(np.append(subindexes,index))
else:
for i in range(np.abs(changes[index])-1):
addindexes = np.sort(np.append(addindexes,index))
truefrags = len(subindexes)
if len(subindexes) < len(addindexes):
subindexes = np.append(subindexes, (np.ones((len(addindexes)-len(subindexes)))*(len(tvec)-1)).astype(int))
fragmentspertraj.append(len(subindexes))
for m in range(min(len(subindexes),len(addindexes))):
traj = solution[:, addindexes[m]:subindexes[m]+1]
traj_ind = changes[addindexes[m]:subindexes[m]+1]
startind = ind[addindexes[m]]
minusloc = [0] + np.where(traj_ind < 0)[0].astype(int).tolist()
fragment = np.array([])
iterind = startind
if subindexes[m]-addindexes[m] > 0:
if len(minusloc) > 1:
if m <= truefrags:
for n in range(len(minusloc)-1):
iterind = iterind + min(0,traj_ind[minusloc[n]])
fragment = np.append(fragment, traj[iterind, minusloc[n]+1:minusloc[n+1]+1].flatten())
fragment = np.append(fragment, traj[0, minusloc[-1]+1:].flatten())
else:
for n in range(len(minusloc)-1):
iterind = iterind + min(0,traj_ind[minusloc[n]])
fragment = np.append(fragment, traj[iterind, minusloc[n]+1:minusloc[n+1]+1].flatten())
fragment = np.append(fragment, traj[m-truefrags, minusloc[-1]+1:].flatten())
else:
fragment = solution[startind][addindexes[m]:subindexes[m]+1].flatten()
fragtimes.append(addindexes[m]+1)
if addindexes[m]+1 + len(fragment) > len(tvec):
endfragtimes.append(len(tvec))
else:
endfragtimes.append(addindexes[m]+1 + len(fragment))
fragmented_trajectories.append(fragment)
#if m <= truefrags:
#kes.append(genelength/truetime[len(fragment)])
if len(fragment) > maxlen:
maxlen = len(fragment)
fragarray = np.zeros((len(fragmented_trajectories), maxlen))
for i in range(len(fragmented_trajectories)):
fragarray[i][0:len(fragmented_trajectories[i])] = fragmented_trajectories[i]
affected_frags = []
fragindexes = []
for i in range(len(fragtimes)):
if np.sum([fragtimes[i]> np.array([startindex, stop_frap]), endfragtimes[i] > np.array([startindex, stop_frap])]) in [1,2,3]:
affected_frags.append(i)
fragindexes.append([fragtimes[i],endfragtimes[i]])
#affected_frags = np.intersect1d(np.where(np.array(fragtimes) >= startindex), np.where(np.array(fragtimes)<= stop_frap))
if len(fragindexes)> 0:
findexes = np.array(fragindexes)
frange = findexes[:,1]-stop_frap
afterfrapribs = findexes[np.where(frange > 0 )]
relevantfrags = np.array(affected_frags)[np.where(frange > 0 )]
if len(relevantfrags) > 0:
cooked_ribs = 0#(len(affected_frags) - len(relevantfrags))*max(pv)
stopfrapindex = stop_frap - afterfrapribs[:,0]
rfrags = fragarray[relevantfrags]
np.diag(rfrags[:,stopfrapindex])
laglen = afterfrapribs[:,1] - stop_frap
posistions_at_end_of_FRAP = np.diag(rfrags[:,stopfrapindex])
offset = pv[posistions_at_end_of_FRAP.astype(int)]
trailing_intensity = np.zeros((max(laglen)))
for i in range(len(laglen)):
trailing_intensity[:laglen[i]] -= offset[i]
trailing_intensity= trailing_intensity-cooked_ribs
else:
trailing_intensity = np.array([0])
else:
trailing_intensity = np.array([0])
return trailing_intensity
def ssa_solver_append(self, ssa_obj, n=100):
nRepetitions = ssa_obj.n_traj
all_k = ssa_obj.k
no_ribosomes_per_mrna = ssa_obj.no_rib_per_mrna
ribosome_density = ssa_obj.rib_density
ribosome_means = ssa_obj.rib_means
rib_vec = ssa_obj.rib_vec
intensity_vec = ssa_obj.intensity_vec
time_vec_fixed = ssa_obj.time_vec_fixed
non_consider_time = ssa_obj.start_time
evaluating_inhibitor = ssa_obj.evaluating_inhibitor
evaluating_frap = ssa_obj.evaluating_frap
time_inhibit = ssa_obj.time_inhibit
truetime = ssa_obj.time
tstep = len(ssa_obj.time)
npoints = tstep #non_consider_time + tstep
rib_vec = []
solutions = []
pv = ssa_obj.probe
genelength = len(pv[0])-1
evf = int(evaluating_frap)
evi = int(evaluating_inhibitor)
try:
intime = float(time_inhibit)
except:
intime = 0
solutionssave = []
st = time.time()
n_traj = n
force_python = False
try:
if force_python == True:
st[0]
rib_vec = []
solutions = []
solutionssave = []
N_rib = 200
all_results = np.zeros((n_traj, N_rib*len(time_vec_fixed)), dtype=np.int32)
all_ribtimes = np.zeros((n_traj,int(1.3*all_k[0]*truetime[-1])),dtype=np.float64)
result = np.zeros((len(time_vec_fixed)*N_rib), dtype=np.int32)
nribs = np.array([0],dtype=np.int32)
k = np.array(all_k)
seeds = np.random.randint(0, 0x7FFFFFF, n_traj)
all_frapresults = np.zeros((n_traj,N_rib*len(time_vec_fixed)),dtype=np.int32)
all_collisions = np.zeros((n_traj,int(1.3*all_k[0]*truetime[-1])),dtype=np.int32)
all_nribs = np.zeros((n_traj,1))
all_col_points = []
for i in range(n_traj):
result = np.zeros((len(time_vec_fixed)*N_rib), dtype=np.int32)
ribtimes = np.zeros((int(1.3*k[0]*truetime[-1])),dtype=np.float64)
frapresult = np.zeros((len(time_vec_fixed)*N_rib),dtype=np.int32)
coltimes = np.zeros((int(1.3*k[0]*truetime[-1])),dtype=np.int32)
colpointsx = np.zeros(len(k[1:-1])*(int(1.3*k[0]*truetime[-1])),dtype=np.int32)
colpointst = np.zeros(len(k[1:-1])*(int(1.3*k[0]*truetime[-1])),dtype=np.float64)
nribs = np.array([0],dtype=np.int32)
ssa_translation.run_SSA(result, ribtimes, coltimes, colpointsx,colpointst, k[1:-1],frapresult, truetime, k[0], k[-1], evf, evi, intime, seeds[i],nribs)
#ssa_translation.run_SSA(result, ribtimes, coltimes, k[1:-1],frapresult, truetime, k[0], k[-1], evf, evi, intime, seeds[i],nribs)
all_results[i, :] = result
all_frapresults[i,:] = frapresult
all_ribtimes[i,:] = ribtimes
all_collisions[i,:] = coltimes
all_nribs[i,:] = nribs
endcolrec = np.where(colpointsx == 0)[0][0]
colpoints = np.vstack((colpointsx[:endcolrec],colpointst[:endcolrec]))
all_col_points.append(colpoints.T)
for i in range(n_traj):
soln = all_results[i, :].reshape((N_rib, len(time_vec_fixed)))
validind = np.where(np.sum(soln,axis=1)!=0)[0]
if np.max(validind) != N_rib-1:
validind = np.append(np.where(np.sum(soln,axis=1)!=0)[0],np.max(validind)+1)
so = soln[(validind,)]
solutionssave.append(so)
solutions.append(soln)
collisions = np.array([[]])
watched_ribs = []
for i in range(n_traj):
totalrib = all_nribs[i]
if totalrib > all_collisions.shape[1]:
collisions = np.append(collisions, all_collisions[i][:])
watched_ribs.append(int(all_collisions.shape[1]))
else:
collisions = np.append(collisions, all_collisions[i][:int(totalrib[0])])
watched_ribs.append(int(totalrib[0]))
sttime = time.time() - st
except:
print('C++ library failed, Using Python Implementation')
rib_vec = []
solutions = []
solutionssave = []
N_rib = 200
collisions = np.array([[]])
all_results = np.zeros((n_traj, N_rib*len(time_vec_fixed)), dtype=np.int32)
all_col_points = []
watched_ribs = []
for i in range(n_traj):
soln,all_ribtimes,Ncol,col_points = self.SSA(all_k, truetime, inhibit_time=time_inhibit+non_consider_time, FRAP=evaluating_frap, Inhibitor=evaluating_inhibitor)
#soln = soln.reshape((1, (len(time_vec_fixed)*N_rib)))
collisions = np.append(collisions,Ncol)
watched_ribs.append(int(len(collisions)))
validind = np.where(np.sum(soln,axis=1)!=0)[0]
all_col_points.append(np.array(col_points))
if np.max(validind) != N_rib-1:
validind = np.append(np.where(np.sum(soln,axis=1)!=0)[0],np.max(validind)+1)
so = soln[(validind,)]
solutionssave.append(so)
solutions.append(soln)
result = soln.reshape((1, (len(time_vec_fixed)*N_rib)))
all_results[i, :] = result
sttime = time.time() - st
#rb = sparse.lil_matrix((len(time_vec_fixed),genelength),dtype=int)
#for j in range(soln.shape[1]):
#if len(np.where(soln[:,j]!=0)[0]) !=0:
#print(np.where(soln[:,j]!=0)[0])
#rb[j,np.where(soln[:,j]!=0)[0]] = 1
#for value in soln[:,j][np.where(soln[:,j]!=0)[0]].astype(int):
#rb[j, value-1] = 1
#rib_vec.append(rb)
no_ribosomes = np.zeros((n_traj, (genelength+1)))
startindex = np.where(truetime >= non_consider_time)[0][0]
#all_results = all_results[:,startindex*N_rib:]
for i in range(len(solutions)):
for j in range(len(solutions[0][0][startindex:])):
rib_pos = solutions[i][startindex:, j][np.nonzero(solutions[i][startindex:, j])]
no_ribosomes[i, rib_pos.astype(int)] += 1
no_ribosomes = no_ribosomes[:, 1:]
ribosome_means = np.mean(no_ribosomes, axis=0)
ribosome_density = ribosome_means/npoints
no_ribosomes_per_mrna = np.mean(no_ribosomes)
if pv.shape[0] <=1:
I = np.zeros((n_traj, len(time_vec_fixed[startindex:])))
else:
I = np.zeros((int(pv.shape[0]),n_traj, len(time_vec_fixed[startindex:])))
#I = np.zeros((1,tstep+1))
if evaluating_frap == False:
if pv.shape[0] <=1:
for i in range(n_traj):
traj = all_results[i, :].reshape((N_rib, len(time_vec_fixed))).T
I[i, :] = np.sum(pv[0][traj], axis=1)[startindex:].T
else:
for j in range(pv.shape[0]):
for i in range(n_traj):
traj = all_results[i, :].reshape((N_rib, len(time_vec_fixed))).T
I[j,i, :] = np.sum(pv[j][traj], axis=1)[startindex:].T
intensity_vec = I
else:
fraptime = time_inhibit
inds = np.where(truetime > fraptime)
inds2 = np.where(truetime < fraptime+20)
inds = np.intersect1d(inds,inds2)
endfrap = inds[-1]-1
for i in range(n_traj):
traj = all_results[i, :].reshape((N_rib, len(time_vec_fixed))).T
nribs = np.sum(solutionssave[i][:,endfrap]!=0)
#ribloc = solutionssave[i][:,endfrap]
#adj_pv = pv[solutionssave[i][:,inds[-1]][:nribs]]
frap_app = 20
revI = self.get_negative_intensity(traj,genelength,pv,truetime,fraptime+start_time,fraptime+start_time+frap_app)
I[i, :] = np.sum(pv[traj], axis=1)[startindex:].T
I[i,inds[0]:inds[0]+20] = 0
#I[i,endfrap-startindex:] = np.sum(pv[traj],axis=1)[endfrap-startindex:].T
I[i,inds[0]+frap_app:len(revI)+inds[0]+frap_app] = I[i,inds[0]+frap_app:len(revI)+inds[0]+frap_app] + revI
intensity_vec = I
new_ssa_obj = ssa()
new_ssa_obj.no_ribosomes = np.vstack(( ssa_obj.no_ribosomes , no_ribosomes))
new_ssa_obj.n_traj = n_traj+ssa_obj.n_traj
new_ssa_obj.k = all_k
new_ssa_obj.no_rib_per_mrna = float(n_traj)/(n_traj+ssa_obj.n_traj) * no_ribosomes_per_mrna + float(ssa_obj.n_traj)/(n_traj+ssa_obj.n_traj) * ssa_obj.no_rib_per_mrna
new_ssa_obj.rib_density = ribosome_density
new_ssa_obj.rib_means = ribosome_means
new_ssa_obj.rib_means = np.mean(np.vstack((ssa_obj.rib_means,ribosome_means)),0)
new_ssa_obj.rib_vec = rib_vec
new_ssa_obj.intensity_vec = np.vstack((ssa_obj.intensity_vec,intensity_vec))
new_ssa_obj.time_vec_fixed = time_vec_fixed
new_ssa_obj.time = truetime
new_ssa_obj.time_rec = truetime[startindex:]
new_ssa_obj.start_time = non_consider_time
new_ssa_obj.watched_ribs = ssa_obj.watched_ribs + watched_ribs
try:
new_ssa_obj.col_points = ssa_obj.col_points + all_col_points
except:
pass
new_ssa_obj.evaluating_inhibitor = evaluating_inhibitor
new_ssa_obj.evaluating_frap = evaluating_frap
new_ssa_obj.time_inhibit = time_inhibit
new_ssa_obj.solutions = ssa_obj.solutions + solutionssave
new_ssa_obj.solvetime = sttime
new_ssa_obj.collisions = np.hstack((ssa_obj.collisions,collisions))
try:
new_ssa_obj.ribtimes = np.hstack((ssa_obj.ribtimes, all_ribtimes[np.where(all_ribtimes > 0)]))
except:
pass
#solt = solutions.T
fragmented_trajectories = []
fragtimes = []
maxlen = 0
fragmentspertraj= []
for k in range(n_traj):
ind = np.array([next(j for j in range(0,solutions[k].shape[0]) if int(solutions[k][j, i]) == 0 or int(solutions[k][j, i]) == -1) for i in range(0, solutions[k].shape[1])])
changes = ind[1:] - ind[:-1]
addindexes = np.where(changes > 0)[0]
subindexes = np.where(changes < 0)[0]
sub = solutions[k][:,1:] - solutions[k][:,:-1]
neutralindexes = np.unique(np.where(sub < 0)[1])
neutralindexes = np.setxor1d(neutralindexes, subindexes)
for index in neutralindexes:
pre = solutions[k][:,index]
post = solutions[k][:,index+1]
changecount = 0
while len(np.where(post - pre < 0)[0]) > 0:
post = np.append([genelength],post)
pre = np.append(pre,0)
changecount+=1
for i in range(changecount):
addindexes = np.sort(np.append(addindexes,index))
subindexes = np.sort(np.append(subindexes,index))
changes[index] = -changecount
ind[index] += changecount
for index in np.where(np.abs(changes)>1)[0]:
if changes[index] < 0:
for i in range(np.abs(changes[index])-1):
subindexes = np.sort(np.append(subindexes,index))
else:
for i in range(np.abs(changes[index])-1):
addindexes = np.sort(np.append(addindexes,index))
truefrags = len(subindexes)
if len(subindexes) < len(addindexes):
subindexes = np.append(subindexes, (np.ones((len(addindexes)-len(subindexes)))*(len(truetime)-1)).astype(int))
fragmentspertraj.append(len(subindexes))
for m in range(min(len(subindexes),len(addindexes))):
traj = solutions[k][:, addindexes[m]:subindexes[m]+1]
traj_ind = changes[addindexes[m]:subindexes[m]+1]
startind = ind[addindexes[m]]
minusloc = [0] + np.where(traj_ind < 0)[0].astype(int).tolist()
fragment = np.array([])
iterind = startind
if subindexes[m]-addindexes[m] > 0:
if len(minusloc) > 1:
if m <= truefrags:
for n in range(len(minusloc)-1):
iterind = iterind + min(0,traj_ind[minusloc[n]])
fragment = np.append(fragment, traj[iterind, minusloc[n]+1:minusloc[n+1]+1].flatten())
fragment = np.append(fragment, traj[0, minusloc[-1]+1:].flatten())
else:
for n in range(len(minusloc)-1):
iterind = iterind + min(0,traj_ind[minusloc[n]])
fragment = np.append(fragment, traj[iterind, minusloc[n]+1:minusloc[n+1]+1].flatten())
fragment = np.append(fragment, traj[m-truefrags, minusloc[-1]+1:].flatten())
else:
fragment = solutions[k][startind][addindexes[m]:subindexes[m]+1].flatten()
fragtimes.append(addindexes[m]+1)
fragmented_trajectories.append(fragment)
#if m <= truefrags:
#kes.append(genelength/truetime[len(fragment)])
if len(fragment) > maxlen:
maxlen = len(fragment)
fragarray = np.zeros((len(fragmented_trajectories), maxlen))
for i in range(len(fragmented_trajectories)):
fragarray[i][0:len(fragmented_trajectories[i])] = fragmented_trajectories[i]
fraglen_size = max(fragarray.shape[1],ssa_obj.fragments.shape[1])
if fragarray.shape[1] != fraglen_size:
fragarray = np.hstack((fragarray, np.zeros((fragarray.shape[0],fraglen_size-fragarray.shape[1]))) )
if ssa_obj.fragments.shape[1] != fraglen_size:
ssa_obj.fragments = np.hstack((ssa_obj.fragments, np.zeros((ssa_obj.fragments.shape[0],fraglen_size-ssa_obj.fragments.shape[1]))) )
new_ssa_obj.fragments = np.vstack((ssa_obj.fragments,fragarray))
new_ssa_obj.fragtimes = ssa_obj.fragtimes+fragtimes
new_ssa_obj.frag_per_traj = fragmentspertraj
new_ssa_obj.full_frags = ssa_obj.full_frags + truefrags
new_ssa_obj.all_results = np.vstack((ssa_obj.all_results,all_results))
if pv.shape[0] > 1:
for i in range(pv.shape[0]):
if i > 0:
autocorr_vec2, mean_autocorr2, error_autocorr2, dwelltime2, ke_sim2 = self.get_autocorr(intensity_vec[i], truetime, 0, genelength)
autocorr_vec = np.vstack((autocorr_vec,autocorr_vec2))
mean_autocorr = np.vstack((mean_autocorr,mean_autocorr2))
error_autocorr = np.vstack((error_autocorr,error_autocorr2))
dwelltime.append(dwelltime2)
ke_sim.append(ke_sim2)
else:
autocorr_vec, mean_autocorr, error_autocorr, dwelltime, ke_sim = self.get_autocorr(intensity_vec[i], truetime, 0, genelength)
autocorr_vec_norm, mean_autocorr_norm, error_autocorr_norm, dwelltime, ke_sim = self.get_autocorr_norm(intensity_vec[i], truetime, 0, genelength)
dwelltime = [dwelltime]
ke_sim = [ke_sim]
else:
autocorr_vec, mean_autocorr, error_autocorr, dwelltime, ke_sim = self.get_autocorr(intensity_vec, truetime, 0, genelength)
autocorr_vec_norm, mean_autocorr_norm, error_autocorr_norm, dwelltime, ke_sim = self.get_autocorr_norm(intensity_vec, truetime, 0, genelength)
acov,nacov = self.get_all_autocovariances(intensity_vec,truetime,genelength )
new_ssa_obj.autocorr_vec = autocorr_vec
new_ssa_obj.mean_autocorr = mean_autocorr
new_ssa_obj.error_autocorr = error_autocorr
new_ssa_obj.autocorr_vec_norm = autocorr_vec_norm
new_ssa_obj.mean_autocorr_norm = mean_autocorr_norm
new_ssa_obj.error_autocorr_norm = error_autocorr_norm
new_ssa_obj.dwelltime = dwelltime
new_ssa_obj.ke_sim = float(n_traj)/(n_traj+ssa_obj.n_traj) * ke_sim + float(ssa_obj.n_traj)/(n_traj+ssa_obj.n_traj) * ssa_obj.ke_sim
new_ssa_obj.ke_true = float(genelength)/np.mean( new_ssa_obj.ribtimes )
new_ssa_obj.probe = ssa_obj.probe
new_ssa_obj.autocovariance_dict = acov
new_ssa_obj.autocovariance_norm_dict = nacov
# try:
# probePosition = []
# for key in self.POI.tag_epitopes.keys():
# probePosition = probePosition + self.POI.tag_epitopes[key]
# probePosition = np.unique(probePosition).tolist()
# except:
# print('No POI found')
# #nt_seq = self.tag_full['T_flag'] + nt_seq
#
#
# nt_seq = self.POI.nt_seq
# genelength = int(len(nt_seq)/3)
#
#
#
# pv = np.zeros((1, genelength)).astype(int).flatten()
#
# for i in range(len(probePosition)):
# pv[probePosition[i]:] = i
#
#
#
#
#
# npoints = len(time_vec_fixed)
# tstep = npoints-non_consider_time
# for i in range(n):
#
# soln = self.SSA(all_k, time_vec_fixed, inhibit_time=time_inhibit+non_consider_time, FRAP=evaluating_frap, Inhibitor=evaluating_inhibitor)
#
# rb = sparse.lil_matrix((len(time_vec_fixed), genelength), dtype=int)
# for j in range(soln.shape[1]):
#
# #if len(np.where(soln[:,j]!=0)[0]) !=0:
# #print(np.where(soln[:,j]!=0)[0])
#
#
# #rb[j,np.where(soln[:,j]!=0)[0]] = 1
#
#
# for value in soln[:, j][np.where(soln[:, j] != 0 )[0]].astype(int):
#
# rb[j, value-1] = 1
#
# rib_vec.append(rb)
#
#
# no_ribosomes = np.zeros((len(rib_vec), genelength))
#
#
#
# for i in range(len(rib_vec)):
# no_ribosomes[i] = np.sum(rib_vec[i].todense()[non_consider_time:], axis=0).flatten()
#
# ribosome_means = np.mean(no_ribosomes, axis=0)
# ribosome_density = ribosome_means/npoints
#
# no_ribosomes_per_mrna = np.mean(no_ribosomes)
#
# intensity_vec = np.zeros((len(rib_vec), tstep+1))
#
# I = np.zeros((1, tstep+1))
# for i in range(len(rib_vec)):
# for j in range(tstep):
# temp_output = rib_vec[i][non_consider_time + j, :].todense()
#
# I[0, j] = np.sum(pv * temp_output.flatten().T)
# intensity_vec[i] = I
#
#
#
# ssa_obj = ssa()
#
# ssa_obj.n_traj = nRepetitions + n
# ssa_obj.k = all_k
# ssa_obj.no_rib_per_mrna = no_ribosomes_per_mrna
# ssa_obj.rib_density = ribosome_density
# ssa_obj.rib_means = ribosome_means
# ssa_obj.rib_vec = rib_vec
# ssa_obj.intensity_vec = intensity_vec
# ssa_obj.time_vec_fixed = time_vec_fixed
# ssa_obj.start_time = non_consider_time
# ssa_obj.probe = probePosition
# ssa_obj.evaluating_inhibitor = evaluating_inhibitor
# ssa_obj.evaluating_frap = evaluating_frap
# ssa_obj.time_inhibit = time_inhibit
#
#
#
# if evaluating_inhibitor == False:
# autocorr_vec, mean_autocorr, error_autocorr, dwelltime, ke_sim = self.get_autocorr(intensity_vec, time_vec_fixed, 0, genelength)
# ssa_obj.autocorr_vec = autocorr_vec
# ssa_obj.mean_autocorr = mean_autocorr
# ssa_obj.error_autocorr = error_autocorr
# ssa_obj.dwelltime = dwelltime
# ssa_obj.ke_sim = ke_sim
return new_ssa_obj
def multitau_acc(self, ivec, n, sampling_rate, sample_rate_seconds):
'''
Multi-tau acc
'''
sigmas = 3
acc = np.array([[]])
for i in range(0, n):
tempdata = ivec[i, :].flatten()
tempdata[np.where(tempdata > tmean(tempdata, 10)) + sigmas*np.std(tempdata)] = 0
tempdata[np.where(tempdata < tmean(tempdata, 10)) - sigmas*np.std(tempdata)] = 0
if np.isnan(tempdata[0]):
tempdata = tempdata[1:]
if np.isnan(tempdata[-1]):
tempdata = tempdata[:-1]
outliers = np.where(tempdata == 0)[0]
if outliers[-1] == len(tempdata)-1:
outliers = outliers[:-1]
if outliers[0] == 0:
outliers = outliers[1:]
tempdata[outliers] = 1/2*(tempdata[outliers-1] + tempdata[outliers+1])
tempdata = tempdata-np.mean(tempdata)
preacc = self.get_acc2(tempdata)
if i == 0:
acc = preacc
else:
acc = np.hstack((acc, preacc))
for i in range(0, n):
data = acc[i]
data[0:sample_rate_seconds] = []
binnedData_1 = data
def geomean(self, iterable):
'''geometric mean used for codon sensitivity calculations
'''
a = np.array(iterable)
return a.prod()**(1.0/len(a))
def SSA(self, k, t_array, inhibit_time=0, FRAP=False, Inhibitor=False):
'''
mRNA Translation simulation python implementation
given a propensity vector k, time array to record, and inhibitory conditions, run a single trajectory of translation simulation
The simulation is described here: [PUT LINK HERE TO PAPER]
*args*
**k**, propensity vector of size gene length + 2, [initiation rate, Codon dependent rates, completion rate / unbinding rate]
for reference the codon dependent rates are refering to the time rate of a ribosome to move on to the next codon
**t_array**, time points to record the ribosome posistions at
*keyword args*
**inhibit_time**, the time to start inhibition assays if FRAP or Inhibitor (harringtonine) == True
**FRAP**, True or false to apply Fluorescence Recovery After Photobleaching (FRAP) https://en.wikipedia.org/wiki/Fluorescence_recovery_after_photobleaching
**Inhibitor**, True or false to apply harringtonine at inhibit_time. Harringtonine acts as a protien translation initiation inhibitor
'''
#SSA params and propensities
R = 10 #exclusion volume (ribosome footprint), ribosomes cant be less than 10 codons apart because of their physical size
kelong = np.array([k[1:-1]]).T #rates for ribosomes moving to the next codon, based on tRNA concentrations
N = len(kelong) #Number of codons in the mRNA
kbind = k[0] #rate for a ribosome to bind and start translation
kcompl = k[-1] #rate for a ribosome at the end of the mRNA to unbind
X = np.array([0, 0], dtype=int) #the updating ribosome posistion vector that is changed in the simulation
Ncol = np.zeros((1,0))
#example X arrays and how its formatted:
# X = [423 30 10 0 ] read from left to right theres a ribosome in position 423 30 and 10, with a 0 kept as a buffer for simulation
t = t_array[0] #time point
Nt = len(t_array) #number of time points to record over
tf = t_array[-1] #final time point
N_rib = 200 #Maximum number of ribosomes on a single mRNA (hard limit for the simulation not a physical constant)
col = np.zeros((1,N_rib))
X_array = np.zeros((N_rib, Nt)) #recording array that records the ribosome posistions over time array points
NR = 0 #number of ribosomes bound
it = 1 #number of iterations
Sn_p = np.eye(max(NR+1, 2), dtype=int) #stoichiometry for the SSA
wn_p = np.zeros((X.shape[0], 1)) # propensities for the SSA
T = np.array([0, 0], dtype=float)
ribtimes = np.array([[0,0]],dtype=float)
col_points = []
#wn_p = np.zeros((1,X.shape[0])).flatten()
wshape = len(wn_p)
Inhibit_condition = 1 #set up inhibitor flags
while t < tf:
if Inhibitor == True:
if t >= inhibit_time:
Inhibit_condition = 0
else:
Inhibit_condition = 1
else:
Inhibit_condition = 1
if FRAP == True : #if the Photobleaching is happening, "remove" ribosomes
if t >= inhibit_time and t < inhibit_time + 20:
#X = np.array([0, 0])
a=1
#T = np.array([0,0])
oldNR = NR
#other options for NR calc
#NR = len(np.where(X>0)[0])
#NR = len(np.where(X!=0)[0])
#NR = len(np.argwhere(X))
#NR = np.nonzero(X)[0].shape[0]
#NR = max(0,len(X)-1)
#NR = np.sum(X!=0)
#NR = np.where(X!=0)[0][-1]+1
#NR = np.flatnonzero(X).shape[0]
NR = len(np.flatnonzero(X)) #each iteration get the number of ribosomes on the mRNA
if X.shape[0] < NR+1: #if the last reaction added a ribosome put a 0 on the end of X vec
X = np.append(X, [0])
T = np.append(T, [0])
T[-2] = t
X[-1] = 0
T[-1] = 0
X = X[0:max(NR, 1)+1] #clear any additional 0's on the end
T = T[0:max(NR, 1)+1]
if oldNR != NR: #if the number of ribosomes has changed reallocate the sizes of stoich and propensities
Sn_p = np.eye(max(NR+1, 2), dtype=int)
wn_p = np.zeros((X.shape[0], 1))
wshape = len(wn_p)
Sn = Sn_p
wn = wn_p
#get indices of where X vecs are > 0 ie where the ribosome values are
inds = X > 0
wn[inds] = kelong[X[inds]-1] #update propensities
if X[0] == N: #if the ribosome in the 0 position is at the end of the mRNA set propensities to the reaction for completion
Sn[:, 0] = (np.append(X[1:], np.array([0]))-X[0:])
wn[0] = kcompl
#if there are no ribosomes or when there is enough room for a new ribosome to bind add the propensity for binding
if NR == 0:
wn[NR] = kbind*Inhibit_condition
if NR > 0 and X[NR-1] > R:
wn[NR] = kbind*Inhibit_condition
REST = np.less(X[1:]+10, X[0:-1]) #apply the footprint condition ie set any propensities where it violates the > 10 codons apart rule to 0
wn[1:] = (wn[1:].T*REST).T #apply that logical^ to propensities
w0 = sum(wn.flat) #get the sum of propensities
randnum = np.random.random_sample(2) #update time to point of next reaction (exponential waiting time distb)
t = (t-np.log(randnum[0])/w0)
while it < Nt and t > t_array[it]: #record state if past timepoint
X_array[0:len(X), it] = X
it += 1
if t < tf: #if still running simulation pick which reaction happened via random number and propensity sum
r2 = w0*randnum[1]
tmp = 0
for i in range(wshape):
tmp = tmp + wn[i]
if tmp >= r2:
event = i
break
X = (X + Sn[:, event].T) #update X vector for new ribosome state
if np.sum(Sn[:,event]) < 0 :
ribtimes = np.vstack((ribtimes,[T[0],t]))
T[:-1] = T[1:]
Ncol = np.append(Ncol,col[0][0] )
col = np.atleast_2d(np.append(col[:,1:],[0]))
else:
if X[event-1] == X[event] + R:
col[0][event] +=1
col_points.append( (X[event],t) )
return X_array,ribtimes[1:,:],Ncol,col_points #return the completed simulation
def get_acc2(self, data, trunc=False):
'''
Get autocorrelation function
*NOT* multi-tau
'''
N = len(data)
fvi = np.fft.fft(data, n=2*N)
acf = fvi*np.conjugate(fvi)
acf = np.fft.ifft(acf)
acf = np.real(acf[:N])/float(N)
if trunc:
acf[acf < 0]=0
for i in range(1, len(acf)):
if acf[i] > acf[i-1]:
acf[i] = acf[i-1]
return acf
#
# def get_cc(self, data1,data2, trunc=False):
# '''
# Get crosscorrelation function fft version
#
# data1, data2 are 1xN vectors of signals to correlate
#
# '''
# N = len(data1)
# fvi_1 = np.fft.fft(data1, n=2*N)
# fvi_2 = np.fft.fft(data2, n=2*N)
#
# ccf = fvi_1*np.conjugate(fvi_2)
# ccf = np.fft.ifft(ccf)
# ccf = np.real(ccf)/float(N)
# #ccf = np.hstack((ccf[::-1][:-1],ccf))
#
# if trunc:
# ccf[ccf < 0]=0
# for i in range(1, len(ccf)):
# if ccf[i] > ccf[i-1]:
# ccf[i] = ccf[i-1]
# return ccf
def elongation_animation(self, ti=0, tf=1000, tstep=1000, cell_radius=50, imagesize=5, dpi=90, filename='simulated_cell', ssa_obj=None, fcolor='#00FF00' ,rnacolor='#FF0000', xkcd=False):
'''
function that creates a mrna translation animation
'''
custom_cmap = ['#69dd42', '#e5361b', '#db11c7']
def rpts(x, y, angle):
nx = np.cos(angle)*x - np.sin(angle)*y
ny = np.sin(angle)*x + np.cos(angle)*y
return nx, ny
def update_line(num, xpos, ypos, line): #function for the FuncAnimation
if num != 0:
ax.get_lines()[-1].remove()
for child in ax.get_children(): #remove the previous patch collection (green spots)
if isinstance(child, PatchCollection):
child.remove()
patches = []
gp = []
ep = []
radi = np.ones(xpos[:, inds[num]].shape)*4 #create a max radius of 3 for intensity vecs
ypos = np.ones(xpos[:, inds[num]].shape)*(ytop+3)
x = xpos[:, inds[num]]
x[np.where(x == 0)] = x[np.where(x == 0)] - 300
for x1, y1, r in zip(xpos[:, inds[num]], ypos, radi): #make circle objects of radius based on ivec
circle = mpatches.Circle((x1, y1), r, facecolor='#FF0000', edgecolor='k')
patches.append(circle)
pcolor = custom_cmap[0]
for i in range(len(x.flatten())):
if x[i] > 0:
xpts = np.linspace(0, int(x[i])-1, int(x[i]))
ypts = 5*np.sin(1/10*np.linspace(0, int(x[i])-1, int(x[i])))
xpts, ypts = rpts(ypts, xpts, 1)
ypts = ypts+ytop+3
xpts = xpts+x[i]
radi = np.ones(xpts.shape)*1
k = 0
ypts = np.fliplr(np.atleast_2d(ypts))
ypts = ypts.flatten()
xpts = np.fliplr(np.atleast_2d(xpts))
xpts = xpts.flatten()
for x2, y2, r2 in zip(xpts, ypts, radi):
probloc = False
j = 0
for key in epitopes.keys():
if k in epitopes[key]:
probloc = True
pcolor = custom_cmap[j]
j += 1
rx = np.random.rand()*2
ry = np.random.rand()*2
if probloc == False:
circle = mpatches.Circle((x2+rx, y2+ry), r2, facecolor='#0000FF', edgecolor='#FFFFFF', lw=2, ls='solid')
gp.append(circle)
else:
circle = mpatches.Circle((x2+rx, y2+ry), r2*3, facecolor='#00FF00', edgecolor='#000000', lw=2, ls='solid')
ep.append(circle)
k += 1
#fig.gca().add_artist(circle)
'''
xs = np.flip(np.sort(xpos[:,inds[num]][0].flatten()),axis=0)
for i in range(max_ribs):
line.set_data(xpos[:,inds[num]],ypos[inds[num]])
line.set_linewidth(0)
line.set_marker('o')
line.set_markersize(3)
'''
p = PatchCollection(patches, facecolors=('#FF0000',), zorder=5) #create a patch collection to add to axis
m = PatchCollection(gp, facecolors=('#0000FF',), lw=2, zorder=3) #create a patch collection to add to axis
e = PatchCollection(ep, facecolors=(pcolor,), zorder=4)
n = num
ax.plot(np.linspace(0, tag_length, len(ssa_obj.time_vec_fixed[ssa_obj.start_time:]))[:n], 3*ssa_obj.intensity_vec.flatten()[:n]+total_length, color=pcolor)
fldot = mpatches.Ellipse((total_length-30, total_length+40), width=ssa_obj.intensity_vec.flatten()[n], height=ssa_obj.intensity_vec.flatten()[n]*1.0, color=pcolor)
f = [fldot]
fe = PatchCollection(f, facecolors=(pcolor,), zorder=4)
ax.add_collection(p) #adds the circles to axis
ax.add_collection(m) #adds the circles to axis
ax.add_collection(e)
ax.add_collection(fe)
plt.xlabel(str(inds[num])) #update time label
return line,
if ssa_obj == None:
ssa_obj = self.ssa_solver(n_traj=1, tf=tf, tstep=tstep)
if xkcd == True:
plt.xkcd()
fig1 = plt.figure(figsize=(imagesize+5, imagesize), dpi=dpi) #make figure
fig1.tight_layout()
ax = fig1.add_subplot('111')
ax.set_aspect(1)
tag_length = self.POI.tag_length
total_length = self.POI.total_length
epitopes = self.POI.tag_epitopes
tag_length = total_length - self.POI.gene_length
ax.cla()
ybot = 90
ytop = 110
ax.plot([0, total_length], [ybot, ybot], color='white', zorder=3)
ax.plot([0, total_length], [ytop, ytop], color='white', zorder=3)
ax.plot([0, 0], [ybot, ytop], color='white', zorder=3)
ax.plot([total_length, total_length], [ybot, ytop], color='white', zorder=3)
ax.axis([-10, total_length+10, 80, total_length+np.max(ssa_obj.intensity_vec)*3+20])
ax.plot([tag_length, tag_length], [ybot, ytop], color='white', linewidth=1, zorder=3)
k = 0
for key in epitopes.keys():
for i in range(len(epitopes[key])):
ax.plot([epitopes[key][i], epitopes[key][i]], [ybot, ytop], color=custom_cmap[k], linewidth=2, zorder=3)
rect = mpatches.Rectangle(xy=(tag_length, ybot), width=total_length-tag_length, height=ytop-ybot, color='#0000FF')
#ax.fill_between([tag_length,tag_length,total_length,total_length],[ybot,ytop,ytop,ybot],color='#00FF00')
ax.add_patch(rect)
k += 1
ticks = np.linspace(0, total_length, 10).astype(int)
ax.set_xticks(ticks)
ax.set_xlabel('Codon Position')
ax.get_yaxis().set_visible(False)
ax.set_facecolor('k')
filename = 'elong.gif'
Writer = animation.writers['pillow']
print('making movie...')
max_ribs = np.max(np.nonzero(ssa_obj.solutions[0])[0])
l, = plt.plot([], [], 'r-')
t = ssa_obj.time_vec_fixed[ssa_obj.start_time:]
inds = np.linspace(0, len(t)-1, len(t)).astype(int)
xpos = np.zeros((max_ribs, len(ssa_obj.time_vec_fixed[ssa_obj.start_time:])))
ypos = np.ones((1, len(ssa_obj.time_vec_fixed[ssa_obj.start_time:]))).flatten()
xpos[:, :] = ssa_obj.solutions[0][:max_ribs, ssa_obj.start_time:len(ssa_obj.time_vec_fixed)]
writer = Writer(fps=30, metadata=dict(artist='Me'), bitrate=1800)
line_ani = animation.FuncAnimation(fig1, update_line, len(ssa_obj.time_vec_fixed[ssa_obj.start_time:]), fargs=(xpos, ypos, l),
interval=50, blit=True)
line_ani.save((filename), writer=writer) #save the animation
def simulate_cell(self, diffusion_constant, kon, koff, kRNA, kdecay, ti=0, tf=1000, tstep=1000, cell_radius=50, imagesize=5, dpi=90, filename='simulated_cell', ssa_obj=None, fcolor='#00FF00', rnacolor='#FF0000'):
'''
[DNA] ==kRNA==> [RNA] <==koff== [RNA*] ==translation simulation==> [Protein]===> null
// || /\
|| `'=====kon====='`
||
\/
null
'''
print('simulating RNA creation....')
t = np.linspace(ti, tf, tstep)
dna_s = np.array([[ 0, 0],
[ 1, -1]])
dna_w1 = np.array([[kRNA, 0],
[0, 0]],dtype=float)
dna_w0 = np.array([[0], [0]])
dna_si = GenericSSA(type='linear' )
dna_si.W1 = dna_w1
dna_si.W0 = dna_w0
dna_si.S = dna_s
dna_si.ti = t[0]
dna_si.tf = t[-1]
dna_si.n = 1
xi = np.zeros((2, 1))
xi[0] = 1
dna_si.xi = xi
dna_si.ptimes = len(t)
dna_si.time_variant = False
dna_si._solve(1)
rna_creation_data = dna_si.data
stoich = np.array([[ 0, 0, 1],
[ -1, 1, -1],
[ 1, -1, 0]])
propensity = np.array([[0, kon, 0],
[0, 0,koff],
[0,kdecay, 0]], dtype=float)
w0 = np.array([[0],[0],[0]])
solver_instance = GenericSSA(type='linear' )
solver_instance.W1 = propensity
solver_instance.W0 = w0
solver_instance.S = stoich
solver_instance.ti = t[0]
solver_instance.tf = t[-1]
solver_instance.n = 1
xi = np.zeros((3,1))
xi[1] = 1
solver_instance.xi = xi
solver_instance.ptimes = len(t)
solver_instance.time_variant = False
print('simulating RNA activation....')
R = cell_radius
squarelen = float(R/np.sqrt(2))
n_RNA_t = np.zeros((len(t),int(np.max(rna_creation_data[1]))))
nRNA = 0
nparticles = (int(np.max(rna_creation_data[1])))
for i in range(len(t)):
while nRNA != rna_creation_data[1][i]:
data = solver_instance._solve(1)
rnaonoff = data[2] + 1 - data[0]
n_RNA_t[i:, nRNA] = rnaonoff[:-i].flatten()
nRNA += 1
rna_particles = n_RNA_t.T
rna_exist = np.where(rna_particles >0,1,0)
rnaex = data
print('simulating RNA motion....')
rna_locations = np.empty((nparticles, len(t), 2))
dt = t[-1]/len(t)
delta = diffusion_constant
def linecirc(m, b, xc, yc, r):
if np.isinf(m) == False:
a = 1+m**2
e = 2*(m*(b-yc)-xc)
c = yc**2+xc**2 + b**2-2*yc*b-r**2
x = np.roots([a, e, c])
if np.isreal(x).all() == False:
x = [np.nan, np.nan]
y = [np.nan, np.nan]
else:
y = [b + m*x[0], b+m*x[1]]
elif abs(xc-b) > r:
x = [np.nan, np.nan]
else:
x = [b, b]
step = np.sqrt(r**2-(b-xc)**2)
y = [yc + step, yc-step]
return [x[0], y[0]], [x[1], y[1]]
def dist(x1, y1, x2, y2):
return np.sqrt((x1-x2)**2+(y1-y2)**2)
for i in range(nparticles):
x = np.empty((2,len(t) - np.where(rna_exist[i] != 0 )[0][0] ))
centers = np.zeros(x.shape)
x[:,0] = np.random.random()*squarelen
x0 = [ ((R+squarelen/4) - (R-squarelen/4))*np.random.random() + (R-squarelen/4),((R+squarelen/4) - (R-squarelen/4))*np.random.random() + (R-squarelen/4) ]
x0 = x0 - np.array([R, R])
x[:,0] =x0
r = norm.rvs(size=np.array(x0).shape + (len(t) - np.where(rna_exist[i] !=0 )[0][0],), scale=delta*np.sqrt(dt))
out = np.empty(r.shape)
np.cumsum(r, axis=-1, out=out)
out += np.expand_dims(np.array(x0), axis=-1)
#out = np.array([[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36],
#[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36]])
centers = np.zeros(out.shape)
dists = np.zeros((x.shape[1], 1)).flatten()
incirc = np.hypot(out.T[:, 0]-centers.T[:, 0], out.T[:, 1]-centers.T[:, 1])
dists[np.where(out[0] != 0)] = incirc[np.where(out[0] != 0)]
while len(np.where(dists>R)[0]) != 0: #trajectory left the cell
out = out.T
left_cell = np.where(dists > R)[0][0]
pts = [[out[left_cell][0], out[left_cell][1]], [out[left_cell-1][0], out[left_cell-1][1]]]
p = np.polyfit([out[left_cell][0], out[left_cell-1][0]], [out[left_cell][1], out[left_cell-1][1]], 1)
m = p[0]
b = p[1]
intercepts = linecirc(m, b, 0, 0, R)
if dist(*tuple(intercepts[0])+tuple(pts[0])) > dist(*tuple(intercepts[1])+tuple(pts[0])):
inter = np.array(intercepts[1])
else:
inter = np.array(intercepts[0])
a = out[left_cell] - inter
out[left_cell-1:] = out[left_cell-1:] - 2*(np.dot(inter, a)/np.linalg.norm(inter)**2)*inter
dists = np.zeros((x.shape[1], 1)).flatten()
out = out.T
incirc = np.hypot(out.T[:, 0]-centers.T[:, 0], out.T[:, 1]-centers.T[:, 1])
dists[np.where(out[0] != 0)] = incirc[np.where(out[0] != 0)]
data = ((out.T).T*rna_exist[i][np.where(rna_exist[i] != 0)[0][0]:].T).T
data[np.where(rna_exist[i] != 0)[0][-1]- np.where(rna_exist[i] != 0)[0][0]+1 :] = -R
rna_locations[i, np.where(rna_exist[i] != 0)[0][0]:, :] = data
rna_locations[i, :np.where(rna_exist[i] != 0)[0][0], :] = -R
print(nparticles)
rna_loc_compressed = rna_locations[np.where(np.sum(np.sum(rna_locations+R, axis=1), axis=1) > 0)]
if ssa_obj == None:
print('no ssa data given')
print('simulating translation....')
print(int(rna_loc_compressed.shape[0]))
ssa_obj = self.ssa_solver(n_traj=int(rna_loc_compressed.shape[0]),tf=tf,tstep=tstep)
ivec = ssa_obj.intensity_vec/np.max(ssa_obj.intensity_vec)
ivec = ivec.T #get the intensity vec for the "fluorescence"
else:
print('Translation data given')
print('Given ' + str(ssa_obj.n_traj) + ' Needed '+str(int(rna_loc_compressed.shape[0])) )
if ssa_obj.n_traj < int(rna_loc_compressed.shape[0]):
print('simulating ' + str(int(rna_loc_compressed.shape[0]) - ssa_obj.n_traj) + ' additional trajectories....')
ssa_obj = self.ssa_solver_append(ssa_obj, n=int(rna_loc_compressed.shape[0]) - ssa_obj.n_traj)
ivec = ssa_obj.intensity_vec/np.max(ssa_obj.intensity_vec)
ivec = ivec.T #get the intensity vec for the "fluorescence"
else:
ivec = ssa_obj.intensity_vec[0:int(rna_loc_compressed.shape[0])]/np.max(ssa_obj.intensity_vec[0:int(rna_loc_compressed.shape[0])])
ivec = ivec[0:int(rna_loc_compressed.shape[0])].T #get the intensity vec for the "fluorescence"
print('making movie...')
#simulate brownian motion
def update_line(num, xpos,ypos, line): #function for the FuncAnimation
if num !=0:
for child in ax.get_children(): #remove the previous patch collection (green spots)
if isinstance(child, PatchCollection):
child.remove()
if isinstance(child, mpatches.Ellipse):
child.remove()
patches = []
radi = 3*ivec[inds[num]] #create a max radius of 3 for intensity vecs
for x1, y1, r in zip(xpos[inds[num]],ypos[inds[num]], radi): #make circle objects of radius based on ivec
circle = mpatches.Circle((x1, y1), r, color=fcolor)
patches.append(circle)
#fig.gca().add_artist(circle)
line.set_data(xpos[inds[num]], ypos[inds[num]])
line.set_linewidth(0)
line.set_marker('o')
line.set_markersize(3)
line.set_color(rnacolor)
line.set
p = PatchCollection(patches, zorder=3, facecolors=(fcolor,)) #create a patch collection to add to axis
ax.add_collection(p) #adds the circles to axis
p = mpatches.Circle((0,0), radius=R, color='black') #add the black circle
ax.add_patch(p)
whitep = mpatches.Ellipse((-R, -R), width=7, height=7, color='white', zorder=5) #add the black circle
ax.add_patch(whitep)
plt.xlabel(str(inds[num])) #update time label
return line,
xpos = rna_loc_compressed.T[0]
ypos = rna_loc_compressed.T[1]
filetype='.mov'
if filetype == '.gif':
Writer = animation.writers['pillow']
if filetype == '.html':
Writer = animation.writers['html']
if filetype == '.gif':
Writer = animation.writers['FFMpeg']
writer = Writer(fps=30, metadata=dict(artist='Me'), bitrate=1800)
fig1 = plt.figure(figsize=(imagesize, imagesize),dpi=dpi) #make figure
fig1.tight_layout()
ax= fig1.add_subplot('111')
plt.yticks([])
plt.xticks([])
p = mpatches.Circle((0, 0), radius=R, color='black') #add the black circle
ax.add_patch(p)
plt.gca().set_aspect('equal', adjustable='box')
l, = plt.plot([], [], 'r-')
plt.xlim(-R-10, R+10)
plt.ylim(-R-10, R+10)
plt.xlabel('0')
plt.title('Simulated Cell')
inds = np.linspace(0, len(t)-1, len(t)).astype(int)
#creates the animation
line_ani = animation.FuncAnimation(fig1, update_line, tstep, fargs=(xpos,ypos, l),
interval=50, blit=True)
line_ani.save((filename + filetype), writer=writer) #save the animation
#return solver_instance,n_RNA_t,rna_creation_data,data,rna_locations
return rna_locations, rna_loc_compressed, rna_particles, rna_creation_data, rna_exist, rnaonoff, rnaex
def get_simulated_mov(self, ssa_obj, filename, filetype):
'''
Create a gif or html file of the simulated circlular cell from any sms ssa object
'''
R = 50 #set up the random circle and points
num = ssa_obj.n_traj
r1 = np.zeros((1, num)).flatten()
theta1 = np.zeros((1, num)).flatten()
x1 = np.zeros((1, num)).flatten()
y1 = np.zeros((1, num)).flatten()
r2 = np.zeros((1, num)).flatten()
theta2 = np.zeros((1, num)).flatten()
x2 = np.zeros((1, num)).flatten()
y2 = np.zeros((1, num)).flatten()
for n in range(0, num): #for all trajectories make initial points
r1[n] = R*np.sqrt(np.random.random(1))
r2[n] = R*np.sqrt(np.random.random(1))
theta1[n] = 2*np.pi*np.random.random(1)
theta2[n] = 2*np.pi*np.random.random(1)
x1[n] = np.cos(theta1[n])*r1[n]
x2[n] = np.cos(theta2[n])*r2[n]
y1[n] = np.sin(theta1[n])*r1[n]
y2[n] = np.sin(theta1[n])*r2[n]
movement = .7
xpos = np.zeros((len(ssa_obj.time_vec_fixed[ssa_obj.start_time:]), num))
ypos = np.zeros((len(ssa_obj.time_vec_fixed[ssa_obj.start_time:]), num))
#over time perterub the simulated ribosome posistions and save to xpos , ypos
for j in range(len(ssa_obj.time_vec_fixed[ssa_obj.start_time:])):
if j == 0:
xpos[0] = x1
ypos[0] = y1
else:
for i in range(0,num):
xpos[j, i] = xpos[j-1,i]-movement + 2*movement*np.random.random(1)
if xpos[j, i] > 52:
xpos[j, i] = 51
if xpos[j, i] < -52:
xpos[j, i] = -51
ypos[j, i] = ypos[j-1,i]-movement + 2*movement*np.random.random(1)
if ypos[j, i] > 52:
ypos[j, i] = 51
if ypos[j, i] < -52:
ypos[j, i] = -51
ivec = ssa_obj.intensity_vec/np.max(ssa_obj.intensity_vec)
ivec = ivec.T #get the intensity vec for the "fluorescence"
k = 0
def update_line(num, xpos, ypos, line): #function for the FuncAnimation
if num !=0:
for child in ax.get_children(): #remove the previous patch collection (green spots)
if isinstance(child, PatchCollection):
child.remove()
patches = []
radi = 3*ivec[inds[num]] #create a max radius of 3 for intensity vecs
for x1, y1, r in zip(xpos[inds[num]],ypos[inds[num]], radi): #make circle objects of radius based on ivec
circle = mpatches.Circle((x1, y1), r, color='#00FF00')
patches.append(circle)
#fig.gca().add_artist(circle)
line.set_data(xpos[inds[num]],ypos[inds[num]])
line.set_linewidth(0)
line.set_marker('o')
line.set_markersize(.5)
p = PatchCollection(patches, zorder=2, facecolors=('#00FF00',)) #create a patch collection to add to axis
ax.add_collection(p) #adds the circles to axis
plt.xlabel(str(inds[num])) #update time label
return line,
if filetype == '.gif':
Writer = animation.writers['pillow']
if filetype == '.html':
Writer = animation.writers['html']
writer = Writer(fps=30, metadata=dict(artist='Me'), bitrate=1800)
fig1 = plt.figure() #make figure
fig1.tight_layout()
ax= fig1.add_subplot('111')
plt.yticks([])
plt.xticks([])
p = mpatches.Circle((0, 0), radius=65, color='black') #add the black circle
ax.add_patch(p)
plt.gca().set_aspect('equal', adjustable='box')
l, = plt.plot([], [], 'r-')
plt.xlim(-70, 70)
plt.ylim(-70, 70)
plt.xlabel('0')
plt.title('Simulated Cell')
inds = np.linspace(0, xpos.shape[0]-1, 120).astype(int)
#creates the animation
line_ani = animation.FuncAnimation(fig1, update_line, 120, fargs=(xpos,ypos, l),
interval=50, blit=True)
line_ani.save((filename + filetype), writer=writer) #save the animation
def analyze_seq_file(self, filename):
'''
General catch all to run all functions necessary before a SSA and store the first POI found from any given sequence
*args*
**filename** a txt or gb file to be read and analyzed
'''
self.open_seq_file(filename)
self.get_orfs(self.sequence_str, min_codons=80)
self.get_temporal_proteins()
self.analyze_poi(self.pois[0], self.pois_seq[0])
self.POI.k = self.get_k(self.POI.nt_seq, .03, 10)
probe_vec,probe_loc = self.get_probvec()
self.POI.probe_vec = probe_vec
self.POI.probe_loc = probe_loc
def run_default(self):
self.get_orfs(self.sequence_str, min_codons=80)
self.get_temporal_proteins()
self.analyze_poi(self.pois[0], self.pois_seq[0])
def get_gb_file(self, accession_number, savetofile=False):
'''
A function to poll genbank given an accession number and pull the relevant gb file
*args*
**accession_number**, the accession number of the sequence to find.
http://www.nslc.wustl.edu/elgin/genomics/bio4342/1archives/2006/AccReference.pdf
*keyword args*
**savetofile**, true or false to save the gb file in the same directory as sms for future use
'''
Entrez.email = "<EMAIL>"
Entrez.tool = 'SingleMoleculeSimulator'
er = False
try:
handle = Entrez.efetch(db="nucleotide", rettype="gb", retmode="text", id=accession_number)
gb_record = SeqIO.read(handle, "genbank") #using "gb" as an alias for "genbank"
handle.close()
except:
er = True
time.sleep(2)
if er == True:
print('HTTP Error: Could not find specified ascession ID')
return
self.gb_rec = gb_record
self.gb_obj = gb_record
self.sequence_str = str(gb_record.seq)
self.sequence_name = gb_record.name
if savetofile:
filename = self.sequence_name
f = open(filename, 'w')
f.write(self.gb_rec.format('gb'))
f.close()
def tau_plot(self,ssa_obj,t,tau,plot_type='contour', plot_all = False):
stime = ssa_obj.time_rec-ssa_obj.start_time
idx_t = (np.abs(stime - t)).argmin()
idx_tau = (np.abs(stime - tau)).argmin()
diff = idx_tau - idx_t
difftime = t-tau
if plot_type == 'Average':
fig,ax= plt.subplots()
for i in range(len(stime)-idx_tau,0,-4):
idx_tau = (np.abs(stime- (stime[i]+difftime ))).argmin()
Itau = ssa_obj.intensity_vec[:,idx_tau]
x,y = np.mean(ssa_obj.intensity_vec[:,idx_tau]/np.sum(ssa_obj.probe)),np.mean(ssa_obj.intensity_vec[:,idx_tau+diff]/np.sum(ssa_obj.probe))
if plot_type == 'window':
minx = 10000000
maxx = 0
miny = 10000000
maxy = 0
fig,ax= plt.subplots()
for i in range(len(stime)-idx_tau,0,-10):
idx_tau = (np.abs(stime - (idx_t+i))).argmin()
Itau = ssa_obj.intensity_vec[:,idx_tau]
x,y = np.mean(ssa_obj.intensity_vec[:,idx_tau]/np.sum(ssa_obj.probe)),np.mean(ssa_obj.intensity_vec[:,idx_tau+diff]/np.sum(ssa_obj.probe))
minx = min(np.min(x),minx)
miny = min(np.min(y),miny)
maxx = max(np.max(x),maxx)
maxy = max(np.max(y),maxy)
ax.scatter(x, y,zorder=3,color= cm.viridis_r(1.*i/len(stime)))
c_map_ax = fig.add_axes([.95, 0.1, 0.1, 0.8])
c_map_ax.axes.get_xaxis().set_visible(False)
cbar = mpl.colorbar.ColorbarBase(c_map_ax, cmap=cm.viridis_r, orientation = 'vertical')
cbar.ax.set_yticklabels(np.linspace(idx_t,stime[-1],6).astype(int) )
cbar.ax.set_title('t')
ax.plot([min(minx,miny),max(maxx,maxy)],[min(minx,miny),max(maxx,maxy)], color='red',ls='--')
ax.set_ylabel(('<I(t=' + 't + tau'+')>'))
ax.set_xlabel(('<I(t=' +'t'+')>'))
ax.set_title(( 'Average I(t) vs Average I(t+tau) for tau = ' + str(diff) ) )
if plot_type == 'density':
fig,ax= plt.subplots()
nbins = int(np.max(ssa_obj.intensity_vec/np.sum(ssa_obj.probe)))+2
x, y = ssa_obj.intensity_vec[:,idx_t]/np.sum(ssa_obj.probe),ssa_obj.intensity_vec[:,idx_tau]/np.sum(ssa_obj.probe)
k = kde.gaussian_kde([x,y])
xi, yi = np.mgrid[x.min():x.max():nbins*1j, y.min():y.max():nbins*1j]
zi = k(np.vstack([xi.flatten(), yi.flatten()]))
R = pearsonr(x,y)[0]
ax.set_title(('Density Plot' + ' R = ' + str(np.round(R,3))))
ax.pcolormesh(xi, yi, zi.reshape(xi.shape), shading='gouraud', cmap=plt.cm.viridis)
ax.contour(xi, yi, zi.reshape(xi.shape) )
ax.set_ylabel(('I(t=' + str(tau)+')'))
ax.set_xlabel(('I(t=' + str(t)+')'))
fig.show()
if plot_type == 'set_tau':
fig,ax= plt.subplots()
for i in range(len(stime)-diff-idx_t):
idx_tau = (np.abs(stime - (idx_t+i))).argmin()
plt.scatter(ssa_obj.intensity_vec[:,i]/ | np.sum(ssa_obj.probe) | numpy.sum |
# adding some initial comments to test git
# one more change here. Messing with git commits on local machine.
import requests
import json
import pandas as pd
import numpy as np
import time
import random
import re
import os
#NLTK
import nltk
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer, WordNetLemmatizer
from nltk.corpus import wordnet
from nltk.collocations import *
import re
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import CountVectorizer
# stop words
stop_words = stopwords.words('english')
# bi gram and tri gram method assignment
bigram_measures = nltk.collocations.BigramAssocMeasures()
trigram_measures = nltk.collocations.TrigramAssocMeasures()
def ngram_helper(tokens, bi_or_tri, freq_filter, junk_words):
"""
Overview: Returns a sorted list of bi or tri gram scored results
Params:
tokens --> word tokens
bi_or_tri --> 'bi' or 'tri': string
freq_filter --> frequency requirement: int
junk_words --> list of words to filter out: list
Returns:
sorted list of bi or tri grams w/ score
"""
if bi_or_tri == 'tri':
# NLTK Bigram / Trigram finders and filters
finder = TrigramCollocationFinder.from_words(tokens)
# Omits junk words
finder.apply_word_filter(lambda x: x in junk_words)
# Frquency requirement
finder.apply_freq_filter(freq_filter)
scored = finder.score_ngrams(trigram_measures.raw_freq)
return sorted(trigram for trigram, score in scored)
elif bi_or_tri == 'bi':
# NLTK Bigram / Trigram finders and filters
finder = BigramCollocationFinder.from_words(tokens)
# Omits junk words
finder.apply_word_filter(lambda x: x in junk_words)
# Frquency requirement
finder.apply_freq_filter(freq_filter)
scored = finder.score_ngrams(bigram_measures.raw_freq)
return sorted(bigram for bigram, score in scored)
else:
return 'Error'
# initial tokenization and clean-up
def tokenize(text):
"""
Takes a sentence, returns a cleaned and tokenized list
Params:
text --> string
Returns:
Tokenized list --> list
"""
# tokenize
tokens = nltk.word_tokenize(text)
# regex non-words
tokens = [re.sub(pattern = r'[^\w]', repl = "", string = word ) for word in tokens]
# remove empty strings from regex ops
tokens = [i for i in tokens if i]
# remove stop words
tokens = [word for word in tokens if word not in stop_words]
# lowercase
tokens = [word.lower() for word in tokens]
return tokens
def get_pos_tags(tokens, pos):
"""
POS tags a tokenized list
Params:
tokens --> list
pos --> part of speech: 'adjective', 'verb', 'noun'
Returns:
list of tokens within specified pos
"""
# tag the words
words = nltk.pos_tag(tokens)
# index 0: word
# index 1: pos tag
noun_abv = ['NN', 'NNS', 'NNP', 'NNPS']
verb_abv = ['VB','VBD', 'VBG', 'VBP', 'VBZ']
adjective_abv = ['JJ', 'JJR', 'JJS']
if pos == 'adjective':
return [i[0] for i in words if i[1] in adjective_abv]
elif pos == 'noun':
return [i[0] for i in words if i[1] in noun_abv]
elif pos == 'verb':
return [i[0] for i in words if i[1] in verb_abv]
else:
return 'ERROR: pos arg not acceptable, try: adjective, noun, verb'
def vectorized_df(tokens):
"""
Overview:
Vectorizes and builds df for given tokens
Params:
tokens --> list of tokens
Returns:
df
"""
# Design the Vocabulary
count_vectorizer = CountVectorizer()
# Create the Bag-of-Words Model
wordbag = count_vectorizer.fit_transform(all_nouns)
# Build dataframe
feature_names = count_vectorizer.get_feature_names()
word_df = pd.DataFrame(wordbag.toarray(), columns = feature_names)
return word_df
def get_top_words(n_words, word_list):
"""
Overview:
returns top n-number words by count
Params:
n_words --> # of words to return
wordlist --> list of tokens
Returns:
List of n-words
"""
# nested arrays with unique words and their counts
unique = np.unique(all_nouns, return_counts = True)
# words array
words = unique[0]
# counts array
counts = unique[1]
# find top n-number of counts
sorted_counts = sorted(counts)[-1*n_words:]
# retrieve index for sorted_counts
index_ret = [ | np.where(counts == item) | numpy.where |
import multiprocessing
import numpy as np
import copy
import sys
from multiprocessing.dummy import Pool as ThreadPool
from multiprocessing import Lock
#import matlab.engine
import MLprobs as MLp
import Coreset as CS
import Util
import Grapher
import itertools
import math, time
import os
from sklearn.datasets import dump_svmlight_file
from scipy.interpolate import pchip
from scipy import stats
import MergeAndReduceTree
from datetime import datetime
from scipy.io import savemat
from pathlib import Path
from scipy.io import loadmat
#eng = matlab.engine.start_matlab()
class Evaluator(object):
mutex = Lock()
NUM_THREADS = multiprocessing.cpu_count()
NUM_THREADS =1
NUM_THREADS_ALL = multiprocessing.cpu_count() // 4
DEFAULT_NUMBER_OF_ADDED_TESTS = 1
def __init__(self, dataName, fileName, opts):
dataDir = 'data'
normalize = opts['normalize']
center = opts['center']
numRepetitions = opts['numRepetitions']
streaming = opts['streaming']
evaluateCVM = opts['evaluateCVM']
if streaming:
self.NUM_THREADS = 1
self.normalize = normalize
self.center = center
self.fileName = fileName
self.numRepetitions = opts['numRepetitions']
self.streamable = streaming
self.fileNameFullPath = None # Put here path to dataset file
self.C = 1.0
self.classify = False
self.data = Util.createData(self.fileNameFullPath, normalize, center)
modelSpec = Util.LoadModelSpec(self.data, self.classify, self.C)
self.mlProbs = MLp.MLProbs(copy.deepcopy(modelSpec), fileName, normalize, center, streaming)
self.data['sensitivity_murad_cenk'] = self.mlProbs.computeSensitivity(use_k_median=True) if not self.streamable else None
# temp = np.load('HTRU_2.npz')
self.data['timeTaken_murad_cenk'] = self.mlProbs.coreset_time if not self.streamable else 0.0
self.data['sensitivity_murad_cenk_means'] = self.mlProbs.computeSensitivity() if not self.streamable else None
self.data['timeTaken_murad_cenk_means'] = self.mlProbs.coreset_time if not self.streamable else 0.0
self.N = self.data['trainingData'].shape[0]
# Load the matrix and save it as svm light file for CVM eval.
X = self.data['trainingData'][:, 0:-1]
Y = self.data['trainingData'][:, -1]
self.cvmX = X
self.cvmY = Y
self.cvmFileName = fileName.replace('.mat', '')
self.evaluateCVM = evaluateCVM
if self.evaluateCVM:
dump_svmlight_file(X, Y, self.cvmFileName)
self.lambdaPegasos = 1.0 / (float(self.N) * self.C)
self.iterPegasos = 10000
self.timeStampDiff = 1e-3
self.batchSize = math.ceil(math.log(self.N))
self.fileNamePath = os.path.abspath("{}/{}".format(dataDir, fileName))
print('Done generating sensitivities')
print('Sum of sensitivities using $k$-median is {}'.format(np.sum(self.data['sensitivity_murad_cenk'])))
print('Sum of sensitivities using $k$-means is {}'.format(np.sum(self.data['sensitivity_murad_cenk_means'])))
self.samples = Util.Sampler(self.N, fileName, streaming).samples
self.uniformCoresets = [CS.Coreset() for j in range(numRepetitions)]
self.coresets = [CS.Coreset() for j in range(numRepetitions)]
self.output = []
self.title = (fileName.replace('.mat', '') + " ($N = {}$)".format(self.N)).replace(dataDir, '')
self.pegasosOn = False
self.batchSizeOn = (self.pegasosOn and False)
self.weights = np.array(self.data['weights']).flatten()
self.numOfAddedTests = self.DEFAULT_NUMBER_OF_ADDED_TESTS
# self.legend = ['Uniform Sampling', 'Tukan et al., 2020', 'Our Coreset', 'All Data']
self.legend = ['Uniform Sampling', 'Our Coreset: $k$-median', 'Our Coreset: $k$-means', 'All Data']
self.algorithms = [lambda i, sampleSize, seed: self.uniformCoresets[i].computeCoreset(self.data['trainingData'], \
np.ones((self.N, 1)), sampleSize, self.weights, seed),
lambda i, sampleSize, seed: self.coresets[i].computeCoreset(self.data['trainingData'], \
self.data['sensitivity_murad_cenk'], sampleSize, self.weights, seed), \
lambda i, sampleSize, seed: self.coresets[i].computeCoreset(self.data['trainingData'], \
self.data['sensitivity_our'], sampleSize, self.weights, seed)] if not self.streamable else \
[lambda i, sampleSize, seed:
MergeAndReduceTree.MergeAndReduceTree(self.data['trainingData'], sampleSize, \
sampleSize, self.mlProbs, True).runMergeAndReduce(seed),
lambda i, sampleSize, seed: MergeAndReduceTree.MergeAndReduceTree(self.data['trainingData'], sampleSize, \
sampleSize, self.mlProbs, False).runMergeAndReduce(seed)]
self.algorithms = [lambda i, sampleSize, seed: self.uniformCoresets[i].computeCoreset(self.data['trainingData'], \
np.ones((self.N, 1)), sampleSize, self.weights, seed),
lambda i, sampleSize, seed: self.coresets[i].computeCoreset(self.data['trainingData'], \
self.data['sensitivity_murad_cenk'], sampleSize, self.weights, seed),
lambda i, sampleSize, seed: self.coresets[i].computeCoreset(self.data['trainingData'], \
self.data['sensitivity_murad_cenk_means'], sampleSize, self.weights, seed)] \
if not self.streamable else \
[lambda i, sampleSize, seed:
MergeAndReduceTree.MergeAndReduceTree(self.data['trainingData'], sampleSize, \
sampleSize, self.mlProbs, True).runMergeAndReduce(seed),
lambda i, sampleSize, seed: MergeAndReduceTree.MergeAndReduceTree(self.data['trainingData'], sampleSize, \
sampleSize, self.mlProbs, False).runMergeAndReduce(seed)]
resultsDir = 'results'
append = ''
if self.streamable:
append += '-streaming'
resultsDir += append
self.append = append
self.EvaluateDataSet()
def EvaluateCoreset(self, coresetFunc, sampleSize):
DEBUG = False
e = lambda x: self.mlProbs.evaluateRelativeError(x[0][:, :-1], x[0][:, -1], x[1])
if self.classify:
e = lambda x: self.mlProbs.evaluateAccuracy(x[0], x[1], x[2])
if not DEBUG and self.NUM_THREADS > 1:
pool = ThreadPool(self.NUM_THREADS)
coresets = pool.map(lambda i, sampleSize=sampleSize: coresetFunc(i, sampleSize, i),
range(self.numRepetitions))
results = pool.map(e, coresets)
pool.close()
pool.join()
else:
coresets = [coresetFunc(i, sampleSize, i) for i in range(
self.numRepetitions)]
results = [e(x) for x in coresets]
# Return the (mean accuracy, mean computation time)
stat = lambda x : np.mean(x)
#stat = lambda x: np.median(x)
return stat([x[0] for x in results]), \
stat(np.array([x[1] for x in results]) + np.array([x[-1] for x in coresets])), \
stat([x[0].shape[0] for x in coresets]), stats.median_absolute_deviation([x[0] for x in results]), \
stats.median_absolute_deviation( | np.array([x[1] for x in results]) | numpy.array |
# std libs
import warnings
# third-party libs
import numpy as np
from numpy.lib.stride_tricks import as_strided
def _checks(wsize, overlap, n, axis):
# checks
if n < wsize < 0:
raise ValueError(f'Window size ({wsize}) should be greater than 0 and '
f'smaller than array size ({n}) along axis {axis}')
if wsize <= overlap < 0:
raise ValueError(f'Overlap ({overlap}) should be greater equal 0 and '
f'smaller than window size ({wsize})')
# FIXME: does not always pad out to the correct length!
def fold(a, wsize, overlap=0, axis=0, pad='masked', **kws):
"""
Fold (window) an array along a given `axis` at given `size`, with successive
windows overlapping each previous by `overlap` elements. This method
works on masked arrays as well and will fold the mask identically to the
data. By default the array is padded out with masked elements so that the
step size evenly divides the array along the given axis.
Parameters
----------
a
wsize
overlap
axis
pad
kws
Keywords are passed to `np.pad` which pads up the array to the required
length.
Returns
-------
Notes
-----
When overlap is nonzero, the array returned by this function will have
multiple entries **with the same memory location**. Beware of this when
doing inplace arithmetic operations on the returned array.
eg.:
>>> n, size, overlap = 100, 10, 5
>>> q = fold(np.arange(n), size, overlap)
>>> k = 0
>>> q[0, overlap + k] *= 10
>>> q[1, k] == q[0, overlap + k]
True
"""
a = np.asanyarray(a)
shape = a.shape
n = shape[axis]
_checks(wsize, overlap, n, axis)
# short circuits
if (n == wsize) and (overlap == 0):
return a.reshape(np.insert(shape, axis, 1))
if n < wsize:
warnings.warn(
f'Window size larger than array size along dimension {axis}')
return a.reshape(np.insert(shape, axis, 1))
# pad out
if pad:
a, n_seg = padder(a, wsize, overlap, axis, **kws)
#
sa = get_strided_array(a, wsize, overlap, axis)
# deal with masked data
if | np.ma.isMA(a) | numpy.ma.isMA |
#!/usr/bin/env python
# ------------------------------------------------------------------------------------------------------%
# Created by "Thieu" at 04:34, 03/02/2021 %
# %
# Email: <EMAIL> %
# Homepage: https://www.researchgate.net/profile/Nguyen_Thieu2 %
# Github: https://github.com/thieu1995 %
# ------------------------------------------------------------------------------------------------------%
from numpy import all, any, ndarray, array, isfinite, isnan, zeros, where
from numpy import max as np_max
class Root:
"""
This class is Abstract class for all other class to inherit
"""
def __init__(self, pareto_front=None, reference_front=None):
"""
:param pareto_front: list/tuple or 2d-array (matrix) of non-dominated front (pareto front obtained from your test case)
:param reference_front: list/tuple or 2d-array (matrix) of True pareto-front or your appropriate front you wish to be reference front
"""
self.messages = []
self.flag = True
self.n_objs = 0
# When creating object, you can pass pareto front with different size, or even None. It wont effect the program
# But when you calling the function, if you pass None or front with different size --> this flag will be triggered
self.pareto_front = self.check_convert_front(pareto_front)
self.reference_front = self.check_convert_front(reference_front)
def check_convert_front(self, front=None, converted_type="float64"):
if front is None:
return None
else:
if type(front) in [list, tuple]:
front_temp = | array(front) | numpy.array |
##
# \brief Student's T copula.
from __future__ import print_function, absolute_import, division
import numpy as np
from scipy import stats
from scipy.special import gammaln
# STARVINE IMPORTS
from starvine.bvcopula.copula.copula_base import CopulaBase
from starvine.bvcopula.copula.mvtdstpack import mvtdstpack as mvt
class StudentTCopula(CopulaBase):
"""!
@brief Student T copula
2 parameter model
\f$ \theta_0 == \rho \f$ (shape param, related to pearson's corr coeff)
\f$ \theta_1 == \nu \f$ (degrees of freedom)
\f$ \theta_0 \in (-1, 1), \f$
\f$ \theta_1 \in (2, \infty) \f$
"""
def __init__(self, rotation=0, init_params=None):
super(StudentTCopula, self).__init__(rotation, params=init_params)
self.thetaBounds = ((-1 + 1e-9, 1 - 1e-9), (2.0, np.inf),)
self.theta0 = (0.7, 10.0)
self.name = 't'
self.rotation = 0
@CopulaBase._rotPDF
def _pdf(self, u, v, rotation=0, *theta):
"""!
@brief Probability density function of T copula.
@param u <b>np_1darary</b>
@param v <b>np_1darary</b>
@param rotation <b>int</b> Optional copula rotation.
@param theta <b>list of float</b> list of parameters to T-copula
[Shape, DoF]
"""
# Constants
rho2 = np.power(theta[0], 2.0)
h1 = 1.0 - rho2
h2 = theta[1] / 2.0
h3 = h2 + 0.5
h4 = h2 + 1.0
h5 = 1.0 / theta[1]
h6 = h5 / h1
# T random var with theta[1] DoF parameter (unit SD, centered at 0)
t_rv = stats.t(df=theta[1], scale=1.0, loc=0.0)
# u and v must be inside the unit square ie. in (0, 1)
# clipMask = ((v < 1.0) & (v > 0.0) & (u < 1.0) & (v > 0.0))
UU = np.array(u)
VV = np.array(v)
# Percentile point function eval
x = t_rv.ppf(UU)
y = t_rv.ppf(VV)
x2 = np.power(x, 2.0)
y2 = np.power(y, 2.0)
p = ggamma(h4)*ggamma(h2)/np.sqrt(h1)/np.power(ggamma(h3),2)*np.power(1+h5*x2,h3)* \
np.power(1+h5*y2,h3)/np.power(1+h6*(x2+y2-2*theta[0]*x*y),h4)
if np.any(np.isinf(p)):
print("WARNING: INF probability returned by PDF")
return p
@CopulaBase._rotCDF
def _cdf(self, u, v, rotation=0, *theta):
rho = theta[0]
dof = int(round(theta[1]))
t_rv = stats.t(df=theta[1], scale=1.0, loc=0.0)
UU = np.array(u)
VV = np.array(v)
# Output storage
p = np.zeros(UU.size)
lower = np.zeros((UU.size, 2))
upper = np.zeros((UU.size, 2))
upper[:, 0] = t_rv.ppf(UU)
upper[:, 1] = t_rv.ppf(VV)
for i in range(UU.size):
lowerb = lower[i, :]
upperb = upper[i, :]
inFin = np.zeros(upperb.size, dtype='int') # integration limit setting
delta = np.zeros(upperb.size, dtype='double') # non centrality params
error, value, status = mvt.mvtdst(dof, lowerb, upperb, inFin, rho, delta)
p[i] = value
return p
@CopulaBase._rotH
def _h(self, v, u, rotation=0, *theta):
"""!
@brief H function (Conditional distribution) of T copula.
TODO: CHECK UU and VV ordering!
"""
kT = self.kTau(*theta)
kTs = kT / abs(kT)
kTM = 1 if kTs < 0 else 0
h1 = 1.0 - np.power(theta[0], 2.0)
nu1 = theta[1] + 1.0
dist1 = stats.t(df=theta[1], scale=1.0, loc=0.0)
dist2 = stats.t(df=nu1, scale=1.0, loc=0.0)
UU = np.array(kTM + kTs * u) # TODO: check input bounds
VV = np.array(v)
# inverse CDF yields quantiles
x = dist1.ppf(UU)
y = dist1.ppf(VV)
# eval H function
uu = dist2.cdf((x - theta[0] * y) / np.sqrt((theta[1] + | np.power(y, 2) | numpy.power |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Defines unit tests for :mod:`colour.models.ipt` module.
"""
from __future__ import division, unicode_literals
import numpy as np
import sys
if sys.version_info[:2] <= (2, 6):
import unittest2 as unittest
else:
import unittest
from colour.models import XYZ_to_IPT, IPT_to_XYZ, IPT_hue_angle
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013 - 2014 - Colour Developers'
__license__ = 'New BSD License - http://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = '<EMAIL>'
__status__ = 'Production'
__all__ = ['TestXYZ_to_IPT',
'TestIPT_to_XYZ',
'TestIPTHueAngle']
class TestXYZ_to_IPT(unittest.TestCase):
"""
Defines :func:`colour.models.ipt.TestXYZ_to_IPT` definition unit tests
methods.
"""
def test_XYZ_to_IPT(self):
"""
Tests :func:`colour.models.ipt.XYZ_to_IPT` definition.
"""
np.testing.assert_almost_equal(
XYZ_to_IPT(np.array([0.96907232, 1, 1.12179215])),
np.array([1.00300825, 0.01906918, -0.01369292]),
decimal=7)
np.testing.assert_almost_equal(
XYZ_to_IPT(np.array([1.92001986, 1, -0.1241347])),
np.array([0.73974548, 0.95333412, 1.71951212]),
decimal=7)
np.testing.assert_almost_equal(
XYZ_to_IPT(np.array([1.0131677, 1, 2.11217686])),
np.array([1.06406598, -0.08075812, -0.39625384]),
decimal=7)
class TestIPT_to_XYZ(unittest.TestCase):
"""
Defines :func:`colour.models.ipt.IPT_to_XYZ` definition unit tests
methods.
"""
def test_IPT_to_XYZ(self):
"""
Tests :func:`colour.models.ipt.IPT_to_XYZ` definition.
"""
np.testing.assert_almost_equal(
IPT_to_XYZ(np.array([1.00300825, 0.01906918, -0.01369292])),
np.array([0.9689994, 0.99995764, 1.1218432]),
decimal=7)
np.testing.assert_almost_equal(
IPT_to_XYZ(np.array([0.73974548, 0.95333412, 1.71951212])),
np.array([1.91998253, 0.99988784, -0.12416715]),
decimal=7)
np.testing.assert_almost_equal(
IPT_to_XYZ(np.array([1.06406598, -0.08075812, -0.39625384])),
np.array([1.0130757, 0.9999554, 2.11229678]),
decimal=7)
class TestIPTHueAngle(unittest.TestCase):
"""
Defines :func:`colour.models.ipt.IPT_hue_angle` definition unit tests
methods.
"""
def test_IPT_hue_angle(self):
"""
Tests :func:`colour.models.ipt.IPT_hue_angle` definition.
"""
np.testing.assert_almost_equal(
IPT_hue_angle(np.array([0.96907232, 1, 1.12179215])),
0.84273584954373859,
decimal=7)
np.testing.assert_almost_equal(
IPT_hue_angle(np.array([1.92001986, 1, -0.1241347])),
-0.12350291631562464,
decimal=7)
np.testing.assert_almost_equal(
IPT_hue_angle( | np.array([1.0131677, 1, 2.11217686]) | numpy.array |
# 2.
# Используя расщепление матрицы Стилтьеса, отвечающее её неполной факторизации по методу ILU(k),
# реализовать стационарный итерационный процесс и исследовать скорость его сходимости
#
# стр. 65 - Основные стационарные итерационные процессы
# стр. 75 - ускорение сходимости стационарных итерационных процессов
#
# http://mathworld.wolfram.com/StationaryIterativeMethod.html
# Stationary iterative methods are methods for solving a linear system of equations Ax=b
#
import numpy as np
ITERATION_LIMIT = 1000
# initialize the matrix
A = np.array([[10., -1., 2., 0.],
[-1., 11., -1., 3.],
[2., -1., 10., -1.],
[0., 3., -1., 8.]])
# initialize the RHS vector
b = np.array([6., 25., -11., 15.])
def jacobi_method(A: np.ndarray, b: np.ndarray):
x = np.zeros_like(b)
for it_count in range(ITERATION_LIMIT):
# print("Current solution:", x)
x_new = np.zeros_like(x)
for i in range(A.shape[0]):
s1 = np.dot(A[i, :i], x[:i])
s2 = np.dot(A[i, i + 1:], x[i + 1:])
x_new[i] = (b[i] - s1 - s2) / A[i, i]
if np.allclose(x, x_new, atol=1e-8):
break
x = x_new
return x
def gauss_seidel(A: np.ndarray, b: np.ndarray):
x = np.zeros_like(b)
for it_count in range(ITERATION_LIMIT):
# print("Current solution:", x)
x_new = np.zeros_like(x)
for i in range(A.shape[0]):
s1 = np.dot(A[i, :i], x_new[:i])
s2 = np.dot(A[i, i + 1:], x[i + 1:])
x_new[i] = (b[i] - s1 - s2) / A[i, i]
if np.allclose(x, x_new, rtol=1e-8):
break
x = x_new
return x
def sor_method(A: np.ndarray, b: np.ndarray, w=1.0):
x = np.zeros_like(b)
for it_count in range(ITERATION_LIMIT):
# print("Current solution:", x)
x_new = np.zeros_like(x)
for i in range(A.shape[0]):
s1 = np.dot(A[i, :i], x_new[:i])
s2 = np.dot(A[i, i + 1:], x[i + 1:])
x_new[i] = (1.0 - w)*x[i] + w * (b[i] - s1 - s2) / A[i, i]
if | np.allclose(x, x_new, rtol=1e-8) | numpy.allclose |
import sys, time, os, json
import numpy as np
import matplotlib.pylab as plt
from PIL import Image
from keras.models import *
from keras.layers import *
from keras.optimizers import *
from google.colab import drive
def Generator(img_size, noise_dim, class_num):
def deconv2d(x, filters):
x = UpSampling2D(2)(x)
x = Conv2D(filters, 4, padding='same', activation='relu')(x)
x = BatchNormalization(momentum=0.8)(x)
return x
#ラベルも入力に含める
noise = Input((noise_dim,))
if class_num:
label = Input((1,), dtype=np.int32)
label_embedding = Flatten()(Embedding(class_num, 100)(label))
x = multiply([noise, label_embedding])
else:
x = noise
#ノイズを3次元に展開
t = img_size//16
x = Dense(512 * t * t, activation="relu", input_dim=noise_dim)(x)
x = Reshape((t, t, 512))(x)
x = BatchNormalization(momentum=0.8)(x)
#画像サイズまで逆畳み込み
x = deconv2d(x, 512)
x = deconv2d(x, 256)
x = deconv2d(x, 128)
x = deconv2d(x, 64)
#カラーチャンネルにして出力
x = Conv2D(3, 4, padding='same', activation='tanh')(x)
if class_num:
return Model([noise, label], x)
else:
return Model(noise, x)
def Discriminator(img_shape, class_num, cgan, acgan):
def d_layer(x, filters, bn=True, drop=0.0):
x = Conv2D(filters, 4, strides=2, padding='same')(x)
x = LeakyReLU(0.2)(x)
if drop:
x = Dropout(drop)(x)
if bn:
x = BatchNormalization(momentum=0.8)(x)
return x
#ラベルも入力に含める
img = Input(img_shape)
if cgan:
label = Input((1,), dtype=np.int32)
label_embedding = Flatten()(Embedding(class_num, np.prod(img_shape))(label))
flat_img = Flatten()(img)
x = multiply([flat_img, label_embedding])
else:
x = img
#PatchGANのサイズまで畳み込み
x = d_layer(x, 64, bn=False, drop=0.25)
x = d_layer(x, 128, drop=0.25)
x = d_layer(x, 256, drop=0.25)
x = d_layer(x, 512, drop=0.25)
#0〜1ラベル出力
x = Conv2D(1, 4, padding='same')(x)
if cgan:
return Model([img, label], x)
elif acgan:
#ラベルも出力に含める
valid = Dense(1, activation="sigmoid")(x)
label = Dense(class_num, activation="softmax")(x)
return Model(img, [valid, label])
else:
return Model(img, x)
def create_models(gen_path, disc_path, noise_dim, class_num, cgan, acgan):
opt = Adam(0.0002, 0.5)
#生成モデル、識別モデル
if os.path.isfile(disc_path):
gen = load_model(gen_path)
disc = load_model(disc_path)
else:
gen = Generator(img_size, noise_dim, class_num)
disc = Discriminator(img_shape, class_num, cgan, acgan)
disc.compile(loss='binary_crossentropy', optimizer=opt)
#生成訓練モデル
disc.trainable = False
noise = Input((noise_dim,))
if cgan:
noise_label = Input((1,))
fake_img = gen([noise, noise_label])
valid = disc([fake_img, noise_label])
g_trainer = Model([noise, noise_label], valid)
elif acgan:
noise_label = Input((1,))
fake_img = gen([noise, noise_label])
valid, label = disc(fake_img)
g_trainer = Model([noise, noise_label], [valid, label])
else:
fake_img = gen(noise)
valid = disc(fake_img)
g_trainer = Model(noise, valid)
g_trainer.compile(loss='binary_crossentropy', optimizer=opt)
def train(train_num, img_size, cgan=False, acgan=False):
#ドライブをマウントしてフォルダ作成
drive_root = '/content/drive'
drive.mount(drive_root)
my_drive = "%s/My Drive"%drive_root
datasets_dir = "%s/datasets"%my_drive
train_dir = "%s/train/face%d_%d"%(my_drive,img_size,train_num)
if cgan: train_dir += "_cgan"
if acgan: train_dir += "_acgan"
imgs_dir = "%s/imgs"%train_dir
save_dir = "%s/save"%train_dir
os.makedirs(imgs_dir, exist_ok=True)
os.makedirs(save_dir, exist_ok=True)
#教師データ
img_shape = (img_size,img_size,3)
x_train = np.memmap("%s/face%d_%d.npy"%(datasets_dir,img_size,train_num), dtype=np.uint8, mode="r", shape=(train_num,)+img_shape)
if cgan or acgan:
y_train = np.memmap("%s/tags%d_%d.npy"%(datasets_dir,img_size,train_num), dtype=np.uint32, mode="r", shape=(train_num))
class_num = np.max(y_train)
else:
class_num = 0
#訓練回数
epochs = 200
batch_size = 100
batch_num = train_num // batch_size
#前回までの訓練情報
info_path = "%s/info.json"%train_dir
info = json.load(open(info_path)) if os.path.isfile(info_path) else {"epoch":0}
last_epoch = info["epoch"]
#モデル
noise_dim = 100
gen_path = "%s/gen.h5"%train_dir
disc_path = "%s/disc.h5"%train_dir
gen, disc, g_trainer = create_models(gen_path, disc_path, noise_dim, class_num, cgan, acgan)
if last_epoch:
print_img(gen, img_size, noise_dim, class_num, imgs_dir, last_epoch)
#PatchGAN
patch_shape = (img_size//16, img_size//16, 1)
real = | np.ones((batch_size,) + patch_shape) | numpy.ones |
# main.py
# ---------------------------
# main.py connects segmentation, stitching, and output into a single pipeline. It prints metadata about
# the run, and then initializes a segmenter and stitcher. Looping over all image files in the directory,
# each image is segmented, stitched, grown, and overlaps resolved. The data is concatenated if outputting
# as quantifications, and outputted per file for other output methods. This file can be run by itself by
# invoking python main.py or the main function imported.
import os
import sys
from src.cvsegmenter import CVSegmenter
from src.cvstitch_plane import CVMaskStitcher
from src.cvmask import CVMask
from src import cvutils
from src import cvvisualize
from src.my_fcswrite import write_fcs
from PIL import Image
import skimage
import tifffile
import json
import numpy as np
import pandas as pd
import tensorflow as tf
from collections import defaultdict
import matplotlib.pyplot as plt
from timeit import default_timer as timer
from time import sleep
def show(img):
fig = plt.figure()
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
ax.imshow(img, aspect='equal')
plt.show()
def main(indir, region_index=None, increase_factor=None, growth_plane=None, growth_quant_A=None, growth_quant_M=None, border_quant_M=None):
print('Starting CellSeg-CRISP')
sys.path.insert(0, indir)
from CellSeg_config import CSConfig
physical_devices = tf.config.experimental.list_physical_devices('GPU')
try:
for dev in physical_devices:
tf.config.experimental.set_memory_growth(dev, True)
except: # Invalid device or cannot modify virtual devices once initialized.
pass
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1'
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
cf = CSConfig(indir, increase_factor, growth_plane, growth_quant_A, growth_quant_M, border_quant_M)
print('Initializing CVSegmenter at', cf.DIRECTORY_PATH)
stitcher = CVMaskStitcher(overlap=cf.OVERLAP, min_area=cf.MIN_AREA)
if cf.OUTPUT_METHOD not in ['imagej_text_file', 'statistics', 'images', 'all']:
raise NameError('Output method is not supported. Check the OUTPUT_METHOD variable in CellSeg_config.py.')
growth = '_us{:.1f}_grow{:.1f}x{:.1f}x{:.1f}b{:.1f}'.format(cf.INCREASE_FACTOR, cf.GROWTH_PIXELS_MASKS, cf.GROWTH_PIXELS_PLANE, cf.GROWTH_PIXELS_QUANT_M, cf.BORDER_PIXELS_QUANT_M)
if region_index is not None and cf.FILENAMES:
cf.FILENAMES = [cf.FILENAMES[region_index]] if len(cf.FILENAMES) > region_index else []
for count, filename in enumerate(cf.FILENAMES):
t0_file = t0 = timer()
print(f'Processing image: {filename}')
path = os.path.join(cf.DIRECTORY_PATH, filename)
drc0, drc1, drca = None, None, None
with tifffile.TiffFile(path) as tif:
if 'ImageDescription' in tif.pages[0].tags:
try:
desc = json.loads(tif.pages[0].tags['ImageDescription'].value)
drcv = desc['drcv']
drc0 = desc['drc0']
drc1 = desc['drc1']
drca = desc['drca']
assert(drcv == 1) # no other versions have been implemented
print('Using DRC values:', drc0, drc1, drca)
except: pass
image = np.array(cf.READ_METHOD(path))
print('Load image: {:.1f}s'.format(timer()-t0)); t0=timer()
if cf.IS_CODEX_OUTPUT and image.ndim == 4:
image = | np.transpose(image, (2, 3, 0, 1)) | numpy.transpose |
'''
Created on 29 Nov 2021
@author: <NAME> : <EMAIL>
TODO: Old cubes vs new cubes. Need to handle both. For example, the cal factor matrix.
TODO: Handle fits, sav and PDS files
TODO: Handle cube files with only one datastruct (hdu)
TODO: Discussion on why original cubes removed ymin offset from all images and backplanes?
Shouldn't we maintain pixel position throughout? For now, I'm just copying
things over, leaving the images at y=0
'''
import numpy as np
from numpy import genfromtxt
from astropy.io import fits
from astropy.table import Table
from uvis_template_factory import UVISFitsTemplateFactory
from pathlib import Path
from datetime import datetime
import pvl
from spiceypy import spiceypy as cspice
import subprocess
def cassini_uvis_euv_wavelengths(xbin):
"""
This function was originally written in IDL by the Cassini UVIS processing team.
The IDL source code can be found at https://github.com/Cassini-UVIS/tools
Ported to Python by <NAME> : https://github.com/emmalieb/SALSA
:param xbin: Number of wavelength elements to bin together.
:type xbin: int
:return: Array of wavelength values of size 1024/xbin
:rtype: numpy.array, float64
TODO: This should be moved to a general library of UVIS functions.
"""
RAD=180.0/np.pi
D=1.E7/1371.
ALP=8.03/RAD+.00032-.0000565
BET=(np.linspace(0,1023,1024)-511.5)*0.025*.9987/300.0
BET=np.arctan(BET)-1.19/RAD+.00032-.0000565
LAM=D*(np.sin(ALP)+np.sin(BET))
e_wavelength=LAM
if xbin == 1:
return e_wavelength
e_wavelength=np.zeros(shape=(1024//xbin,))
for k in range(0,1024//xbin):
e_wavelength[k]=np.sum(LAM[k*xbin:(k+1)*xbin-1])/xbin
return e_wavelength
def cassini_uvis_fuv_wavelengths(xbin):
"""
This function was originally written in IDL by the Cassini UVIS processing team.
The IDL source code can be found at https://github.com/Cassini-UVIS/tools
Ported to Python by <NAME> : https://github.com/emmalieb/SALSA
:param xbin: Number of wavelength elements to bin together.
:type xbin: int
:return: Array of wavelength values of size 1024/xbin
:rtype: numpy.array, float64
TODO: This should be moved to a general library of UVIS functions.
"""
RAD=180./np.pi
D=1.E7/1066
ALP=(9.22+.032)/RAD
ALP=ALP+3.46465e-5
BET=(np.linspace(0,1023,1024)-511.5)*0.025*0.99815/300.0
BET=np.arctan(BET)+0.032/RAD+3.46465e-5
lam=D*(np.sin(ALP)+np.sin(BET))
e_wavelength=lam
if xbin == 1:
return e_wavelength
e_wavelength=np.zeros(shape=(1024//xbin,))
for k in range(0,1024//xbin):
e_wavelength[k]=np.sum(lam[k*xbin:(k+1)*xbin])/xbin
return e_wavelength
class CGCubeToFITS(object):
"""
This class maps an existing Cassini UVIS Cube file into the new FITS format
given a template.
"""
def __init__(self):
"""
Constructor.
"""
pass
def convert_to_fits(self, template_file, cube_file, output_file, pds_label_file,
kernel_list=None):
'''
Convert a UVIS Cube file into the new FITS format.
:param template_file: Path to spreadsheet containing the template.
:type template_file: pathLib.Path object.
:param cube_file: Path to original cube file.
:type cube_file: pathLib.Path object.
:param output_file: Path to output FITS file.
:type output_file: pathLib.Path object.
:param pds_label_file: Path to PDS Label file for this observation
:type pds_label_file: pathLib.Path object.
:param kernel_list: List of kernel files used (for metadata purposes).
:type kernel_list: List of strings.
TODO: Fields that are not present in the original cube:
NUMBER_OF_SAMPLES
OBS_SECONDS
OBS_TICKS
Fields present in the spreadsheet, not in the cube
PIXEL_CENTER_OCCULTATION_LATITUDE
PIXEL_CENTER_OCCULTATION_LONGITUDE
'''
# Create the template factory
factory = UVISFitsTemplateFactory()
self.factory = factory
with fits.open(cube_file) as hdu_list:
n_readouts = len(hdu_list[1].data['XBIN'][0])
new_hdu_list = factory.construct_fits(template_file, n_samples=n_readouts,
n_sp_kernels=3) # TODO: Need to dynamically set this.
self.new_hdu_list = new_hdu_list
#Populate Primary Header
self.populate_primary_header(new_hdu_list[0], output_file, pds_label_file)
channel = new_hdu_list[0].header['CHANNEL']
# Get the Pandas data frame that contains the template information
template = factory.get_template()
template = template[~template['HDU NAME'].str.contains('PRIMARY')]
hdu_names = template['HDU NAME']
field_names = template['FIELD NAME']
cg_field_names = template['CG FIELD NAME']
# Copy data from the cube file to the new fits file.
for cg_field_name, hdu_name, field_name in zip(cg_field_names, hdu_names, field_names):
if type(cg_field_name) is not str:
continue
new_data = new_hdu_list[hdu_name].data[field_name]
# Insert cube data into the new fits file. Dimensions will need to be reordered.
if hdu_name == 'DATA':
for hdu_number in (1, 2, 3):
cube_data = np.moveaxis(hdu_list[hdu_number].data[cg_field_name],
(0, 1, 2, 3), (1, 0, 2, 3))
new_data[:, hdu_number-1, :, :] = np.squeeze(cube_data)
elif (hdu_name == 'TIME') or (hdu_name == 'SC_GEOM'):
for hdu_number in (1, 2, 3):
if cg_field_name == 'UTC':
array_shape = new_data[:, hdu_number-1].shape
value = np.chararray(shape=array_shape, itemsize=26)
time = hdu_list[hdu_number].data[cg_field_name].squeeze()
for i in range(time.shape[0]):
value[i] = datetime.strptime(time[i], "%Y %b %d %H:%M:%S.%f").isoformat()
else:
value = np.moveaxis(hdu_list[hdu_number].data[cg_field_name],
(0,1), (1,0))
new_data[:, hdu_number-1] = value.squeeze()
elif hdu_name == 'CAL':
pass # TODO: The cal matrix has dimensions of (1, 20, 10240, 64)?
elif hdu_name == 'TARGET_GEOM':
for hdu_number in (1, 2, 3):
cube_shape = hdu_list[hdu_number].data[cg_field_name].shape
cube_data = hdu_list[hdu_number].data[cg_field_name].squeeze()
if len(cube_shape) == 2:
new_data[:, hdu_number-1] = cube_data
else:
new_data[:, hdu_number-1, :] = cube_data
elif hdu_name == 'FOV_GEOM':
field_shape = new_data.shape
for hdu_number in (1, 2, 3):
if len(field_shape) == 4:
cg_corner_field_names = cg_field_name.split('|')
corner_index = 0
for cg_fn in cg_corner_field_names:
cube_data = hdu_list[hdu_number].data[cg_fn]
new_data[:, hdu_number-1, :, corner_index] = np.squeeze(cube_data)
corner_index += 1
else:
cube_data = hdu_list[hdu_number].data[cg_field_name].squeeze()
new_data[:, hdu_number-1, :] = np.squeeze(cube_data)
else:
# All values in the array should be the same. Do a quick
# check of that here and raise an exception if it's not.
data_min = hdu_list[1].data[cg_field_name].min()
data_max = hdu_list[1].data[cg_field_name].max()
if data_min != data_max:
raise ValueError("All values in " + cg_field_name + "are not the same!")
new_data[0] = data_max
# Add wavelengths, since they are not in the original cube
xbin = hdu_list[1].data['XBIN'][0][0]
# wavelengths = cassini_uvis_fuv_wavelengths(xbin)
# new_hdu_list['WAVELENGTH'].data['WAVELENGTH_FUV'] = wavelengths
# wavelengths = cassini_uvis_euv_wavelengths(xbin)
# new_hdu_list['WAVELENGTH'].data['WAVELENGTH_EUV'] = wavelengths
table = Table(new_hdu_list['WAVELENGTH'].data)
name = new_hdu_list['WAVELENGTH'].name
header = new_hdu_list['WAVELENGTH'].header
if channel == 'FUV':
wavelengths = cassini_uvis_fuv_wavelengths(xbin)
new_hdu_list['WAVELENGTH'].data['WAVELENGTH_FUV'] = wavelengths
table = Table(new_hdu_list['WAVELENGTH'].data)
table.remove_column('WAVELENGTH_EUV')
else:
wavelengths = cassini_uvis_euv_wavelengths(xbin)
new_hdu_list['WAVELENGTH'].data['WAVELENGTH_EUV'] = wavelengths
table = Table(new_hdu_list['WAVELENGTH'].data)
table.remove_column('WAVELENGTH_FUV')
new_hdu_list['WAVELENGTH'] = fits.BinTableHDU(table, name=name, header=header)
# Kernel files
if kernel_list is not None:
self.populate_kernels_hdu(kernel_list)
def populate_kernels_hdu(self, kernel_list):
"""
Populate the spice kernels HDU.
:param kernel_list: List of spice kernels used for this observation
:type kernel_list: List of strings.
"""
kernels = np.array(kernel_list)
map = {}
map['LS_KRN'] = '.tls'
map['SCL_KRN'] = '.tsc'
map['SP_KRN'] = '.bsp'
map['F_KRN'] = '.tf'
map['PC_KRN'] = '.tpc'
map['C_KRN'] = '.bc'
map['INST_KRN'] = '.ti'
table = Table(self.new_hdu_list['KERNELS'].data)
name = self.new_hdu_list['KERNELS'].name
header = self.new_hdu_list['KERNELS'].header
for key, value in map.items():
w = np.flatnonzero(np.core.defchararray.find(kernels, value) != -1)
ks = np.transpose(kernels[w])
n = len(ks)
new_kernel_array = np.reshape(ks, (1, n))
table.replace_column(key, new_kernel_array)
self.new_hdu_list['KERNELS'] = fits.BinTableHDU(table, name=name, header=header)
def write_to_file(self, output_file, overwrite=True):
"""
Write to output file.
:param output_file: Path to output FITS file.
:type output_file: pathLib.Path object.
:param overwrite: Set to overwrite output file.
:type overwrite: boolean
"""
self.factory.write_to_file(output_file, overwrite=overwrite)
def populate_primary_header(self, hdu, cube_file, pds_label_file):
"""
Populate the primary header object.
:param hdu:
:type hdu:
:param cube_file:
:type cube_file:
:param pds_label_file:
:type pds_label_file:
"""
pds_label = pvl.load(pds_label_file)
names = []
values = []
names.append('FILENAME')
values.append(cube_file.name)
names.append('PROD_ID')
values.append(pds_label['PRODUCT_ID'])
names.append('DATE')
date_string = datetime.utcnow().isoformat()
values.append(date_string)
names.append('MISSION')
values.append('Cassini')
names.append('INSTRUME')
values.append(pds_label['INSTRUMENT_NAME'])
names.append('OBS_ID')
values.append(pds_label['OBSERVATION_ID'])
names.append('MPHASE')
values.append(pds_label['MISSION_PHASE_NAME'])
names.append('TRGTNAME')
values.append(pds_label['TARGET_NAME'])
names.append('OBS_UTC')
utc = pds_label['START_TIME'].isoformat()
values.append(utc)
names.append('OBS_ET')
cspice.furnsh('../kernels/lsk/naif0012.tls') # TODO: Dynamically download?
t = cspice.str2et(utc.split('+')[0])
values.append(t)
cspice.unload('../kernels/lsk/naif0012.tls')
names.append('END_UTC')
values.append(pds_label['STOP_TIME'].isoformat())
names.append('CHANNEL')
values.append(pds_label['PRODUCT_ID'][:3])
#TODO: Read version number from template or something.
names.append('VERSION')
values.append(1.0)
#TODO: Get ORBNUM dynamically. For now, this is just an example.
names.append('ORBNUM')
values.append(208)
for name, value in zip(names, values):
hdu.header[name] = value
class TitanCubeToFITS(CGCubeToFITS):
"""
This class maps an existing Cassini UVIS Cube file into the new FITS format
for the Titan data products.
"""
def convert_to_fits(self, template_file, cube_file, output_file, pds_label_file,
spice_dir,
detector_image_files=None,
kernel_list=None):
'''
Convert a UVIS Cube file into the new FITS format.
:param template_file: Path to spreadsheet containing the template.
:type template_file: pathLib.Path object.
:param cube_file: Path to original cube file.
:type cube_file: pathLib.Path object.
:param output_file: Path to output FITS file.
:type output_file: pathLib.Path object.
:param pds_label_file: Path to PDS Label file for this observation
:type pds_label_file: pathLib.Path object.
:param kernel_list: List of kernel files used (for metadata purposes).
:type kernel_list: List of strings.
TODO: Fields that are not present in the original cube:
NUMBER_OF_SAMPLES
OBS_SECONDS
OBS_TICKS
Fields present in the spreadsheet, not in the cube
PIXEL_CENTER_OCCULTATION_LATITUDE
PIXEL_CENTER_OCCULTATION_LONGITUDE
'''
# Call the superclass method to set up the FITS file general HDUs
super().convert_to_fits(template_file, cube_file, output_file, pds_label_file,
kernel_list=kernel_list)
# Get HDUList defined in superclass.
new_hdu_list = self.new_hdu_list
# Based on the channel, exclude irrelevant fields.
channel = new_hdu_list[0].header['CHANNEL']
if channel == 'FUV':
new_hdu_list.pop('DETECTOR_IMG_EUV')
else:
new_hdu_list.pop('DETECTOR_IMG_FUV')
# Detector Images
ymin = new_hdu_list['CONFIG'].data['IMG_YMIN'][0]
ymax = new_hdu_list['CONFIG'].data['IMG_YMAX'][0]
if detector_image_files is not None:
for file in detector_image_files:
# Read the data from the detector image
detector_image = genfromtxt(file, delimiter=',')
# Get the field name
field_name = file.name[:-4].upper()
# Write to the HDU
hdu_name = 'DETECTOR_IMG_' + channel
new_hdu_list[hdu_name].data[field_name][:, :ymax-ymin] = detector_image
# Compute the SATURN_LOCAL_TIME value
self.compute_saturn_local_time(spice_dir)
def compute_saturn_local_time(self, spice_dir):
# Get the time from the header
ephemeris_time = self.new_hdu_list['TIME'].data['TIME_ET']
# Get the kernels
leap_second_kernel = spice_dir / 'lsk' / self.new_hdu_list['KERNELS'].data['LS_KRN'][0,0]
planetary_constants_kernels = self.new_hdu_list['KERNELS'].data['PC_KRN']
condition1 = np.core.defchararray.startswith(planetary_constants_kernels, 'pck')
condition2 = np.core.defchararray.startswith(planetary_constants_kernels, 'cpck_rock')
w = | np.where(~(condition1 | condition2)) | numpy.where |
# -*- coding: utf-8 -*-
# *****************************************************************************
# Copyright (c) 2020, Intel Corporation All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# *****************************************************************************
import numpy as np
import pandas as pd
import unittest
from itertools import (combinations_with_replacement, product, chain, )
import numba
from sdc.tests.indexes.index_datagens import (
test_global_index_names,
_generate_positional_range_params,
_generate_positional_indexes_fixed,
get_sample_index,
)
from sdc.tests.test_base import TestCase
from sdc.extensions.indexes.positional_index_ext import init_positional_index
from sdc.datatypes.indexes import *
class TestPositionalIndex(TestCase):
def test_positional_index_type_inferred(self):
for params in _generate_positional_range_params():
start, stop, step = params
for name in test_global_index_names:
index = pd.RangeIndex(start, stop, step, name=name)
with self.subTest(index=index):
native_index_type = numba.typeof(index)
self.assertIsInstance(native_index_type, PositionalIndexType)
def test_positional_index_create_and_box(self):
@self.jit
def sdc_func(stop, name):
return init_positional_index(stop, name=name)
for size, name in product([1, 5, 17], test_global_index_names):
with self.subTest(size=size, name=name):
result = sdc_func(size, name)
expected_res = pd.RangeIndex(size, name=name)
pd.testing.assert_index_equal(result, expected_res)
def test_positional_index_unbox_and_box(self):
def test_impl(index):
return index
sdc_func = self.jit(test_impl)
for params in _generate_positional_range_params():
start, stop, step = params
for name in test_global_index_names:
index = pd.RangeIndex(start, stop, step, name=name)
with self.subTest(index=index):
result = sdc_func(index)
result_ref = test_impl(index)
pd.testing.assert_index_equal(result, result_ref)
def test_positional_index_create_param_name_literal_str(self):
@self.jit
def sdc_func(stop):
return init_positional_index(stop, name='index')
n = 11
result = sdc_func(n)
expected_res = pd.RangeIndex(n, name='index')
pd.testing.assert_index_equal(result, expected_res)
def test_positional_index_attribute_start(self):
def test_impl(index):
return index.start
sdc_func = self.jit(test_impl)
for params in _generate_positional_range_params():
index = pd.RangeIndex(*params)
with self.subTest(index=index):
result = sdc_func(index)
result_ref = test_impl(index)
self.assertEqual(result, result_ref)
def test_positional_index_attribute_stop(self):
def test_impl(index):
return index.stop
sdc_func = self.jit(test_impl)
for params in _generate_positional_range_params():
index = pd.RangeIndex(*params)
with self.subTest(index=index):
result = sdc_func(index)
result_ref = test_impl(index)
self.assertEqual(result, result_ref)
def test_positional_index_attribute_step(self):
def test_impl(index):
return index.step
sdc_func = self.jit(test_impl)
for params in _generate_positional_range_params():
index = pd.RangeIndex(*params)
with self.subTest(index=index):
result = sdc_func(index)
result_ref = test_impl(index)
self.assertEqual(result, result_ref)
def test_positional_index_attribute_dtype(self):
def test_impl(index):
return index.dtype
sdc_func = self.jit(test_impl)
index = pd.RangeIndex(11)
result = sdc_func(index)
result_ref = test_impl(index)
self.assertEqual(result, result_ref)
def test_positional_index_attribute_name(self):
def test_impl(index):
return index.name
sdc_func = self.jit(test_impl)
n = 11
for name in test_global_index_names:
with self.subTest(name=name):
index = pd.RangeIndex(n, name=name)
result = sdc_func(index)
result_ref = test_impl(index)
self.assertEqual(result, result_ref)
def test_positional_index_len(self):
def test_impl(index):
return len(index)
sdc_func = self.jit(test_impl)
for params in _generate_positional_range_params():
index = pd.RangeIndex(*params)
with self.subTest(index=index):
result = sdc_func(index)
result_ref = test_impl(index)
self.assertEqual(result, result_ref)
def test_positional_index_attribute_values(self):
def test_impl(index):
return index.values
sdc_func = self.jit(test_impl)
for params in _generate_positional_range_params():
index = pd.RangeIndex(*params)
with self.subTest(index=index):
result = sdc_func(index)
result_ref = test_impl(index)
np.testing.assert_array_equal(result, result_ref)
def test_positional_index_contains(self):
def test_impl(index, value):
return value in index
sdc_func = self.jit(test_impl)
index = pd.RangeIndex(11)
values_to_test = [-5, 15, 1, 11, 5, 6]
for value in values_to_test:
with self.subTest(value=value):
result = sdc_func(index, value)
result_ref = test_impl(index, value)
np.testing.assert_array_equal(result, result_ref)
def test_positional_index_copy(self):
def test_impl(index, new_name):
return index.copy(name=new_name)
sdc_func = self.jit(test_impl)
for params in _generate_positional_range_params():
start, stop, step = params
for name, new_name in product(test_global_index_names, repeat=2):
index = pd.RangeIndex(start, stop, step, name=name)
with self.subTest(index=index, new_name=new_name):
result = sdc_func(index, new_name)
result_ref = test_impl(index, new_name)
pd.testing.assert_index_equal(result, result_ref)
def test_positional_index_getitem_scalar(self):
def test_impl(index, idx):
return index[idx]
sdc_func = self.jit(test_impl)
for params in _generate_positional_range_params():
index = pd.RangeIndex(*params)
n = len(index)
if not n: # test only non-empty ranges
continue
values_to_test = [-n, n // 2, n - 1]
for idx in values_to_test:
with self.subTest(index=index, idx=idx):
result = sdc_func(index, idx)
result_ref = test_impl(index, idx)
self.assertEqual(result, result_ref)
def test_positional_index_getitem_scalar_idx_bounds(self):
def test_impl(index, idx):
return index[idx]
sdc_func = self.jit(test_impl)
n = 11
index = pd.RangeIndex(n, name='abc')
values_to_test = [-(n + 1), n]
for idx in values_to_test:
with self.subTest(idx=idx):
with self.assertRaises(Exception) as context:
test_impl(index, idx)
pandas_exception = context.exception
with self.assertRaises(type(pandas_exception)) as context:
sdc_func(index, idx)
sdc_exception = context.exception
self.assertIsInstance(sdc_exception, type(pandas_exception))
self.assertIn("out of bounds", str(sdc_exception))
def test_positional_index_getitem_slice(self):
def test_impl(index, idx):
return index[idx]
sdc_func = self.jit(test_impl)
index_len = 17
slices_params = combinations_with_replacement(
[None, 0, -1, index_len // 2, index_len, index_len - 3, index_len + 3, -(index_len + 3)],
2,
)
index = pd.RangeIndex(0, index_len, 1, name='abc')
for slice_start, slice_stop in slices_params:
for slice_step in [1, -1, 2]:
idx = slice(slice_start, slice_stop, slice_step)
with self.subTest(index=index, idx=idx):
result = sdc_func(index, idx)
result_ref = test_impl(index, idx)
pd.testing.assert_index_equal(result, result_ref)
def test_positional_index_iterator_1(self):
def test_impl(index):
res = []
for i, label in enumerate(index):
res.append((i, label))
return res
sdc_func = self.jit(test_impl)
index = pd.RangeIndex(0, 21, 1)
result = sdc_func(index)
result_ref = test_impl(index)
self.assertEqual(result, result_ref)
def test_positional_index_iterator_2(self):
def test_impl(index):
res = []
for label in index:
if not label % 2:
res.append(label)
return res
sdc_func = self.jit(test_impl)
index = pd.RangeIndex(0, 21, 1)
result = sdc_func(index)
result_ref = test_impl(index)
self.assertEqual(result, result_ref)
def test_positional_index_nparray(self):
def test_impl(index):
return np.array(index)
sdc_func = self.jit(test_impl)
n = 11
index = get_sample_index(n, PositionalIndexType)
result = sdc_func(index)
result_ref = test_impl(index)
np.testing.assert_array_equal(result, result_ref)
def test_positional_index_operator_eq_index_1(self):
""" Verifies operator.eq implementation for pandas PositionalIndex in a case of equal range sizes """
def test_impl(index1, index2):
return index1 == index2
sdc_func = self.jit(test_impl)
n = 11
for index1, index2 in product(_generate_positional_indexes_fixed(n), repeat=2):
with self.subTest(index1=index1, index2=index2):
result = np.asarray(sdc_func(index1, index2)) # FIXME_Numba#5157: remove np.asarray
result_ref = test_impl(index1, index2)
np.testing.assert_array_equal(result, result_ref)
def test_positional_index_operator_eq_index_2(self):
""" Verifies operator.eq implementation for pandas PositionalIndex in a case of non equal range sizes """
def test_impl(index1, index2):
return index1 == index2
sdc_func = self.jit(test_impl)
index1 = pd.RangeIndex(11)
index2 = pd.RangeIndex(22)
with self.assertRaises(Exception) as context:
test_impl(index1, index2)
pandas_exception = context.exception
with self.assertRaises(type(pandas_exception)) as context:
sdc_func(index1, index2)
sdc_exception = context.exception
self.assertIn(str(sdc_exception), str(pandas_exception))
def test_positional_index_operator_eq_scalar(self):
""" Verifies operator.eq implementation for pandas PositionalIndex and a scalar value """
def test_impl(A, B):
return A == B
sdc_func = self.jit(test_impl)
n = 11
A = pd.RangeIndex(n)
scalars_to_test = [
A.start,
float(A.start),
A.start + 1,
(A.start + A.stop) / 2,
A.stop,
]
for B in scalars_to_test:
for swap_operands in (False, True):
if swap_operands:
A, B = B, A
with self.subTest(left=A, right=B):
result = np.asarray(sdc_func(A, B)) # FIXME_Numba#5157: remove np.asarray
result_ref = test_impl(A, B)
np.testing.assert_array_equal(result, result_ref)
def test_positional_index_operator_eq_nparray(self):
""" Verifies operator.eq implementation for pandas PositionalIndex and a numpy array """
def test_impl(A, B):
return A == B
sdc_func = self.jit(test_impl)
n = 11
for A, B in product(
_generate_positional_indexes_fixed(n),
map(lambda x: np.array(x), _generate_positional_indexes_fixed(n))
):
for swap_operands in (False, True):
if swap_operands:
A, B = B, A
with self.subTest(left=A, right=B):
result = np.asarray(sdc_func(A, B)) # FIXME_Numba#5157: remove np.asarray
result_ref = test_impl(A, B)
np.testing.assert_array_equal(result, result_ref)
def test_positional_index_operator_ne_index(self):
""" Verifies operator.ne implementation for pandas PositionalIndex in a case of non equal range sizes """
def test_impl(index1, index2):
return index1 != index2
sdc_func = self.jit(test_impl)
n = 11
for index1, index2 in product(_generate_positional_indexes_fixed(n), repeat=2):
with self.subTest(index1=index1, index2=index2):
result = np.asarray(sdc_func(index1, index2)) # FIXME_Numba#5157: remove np.asarray
result_ref = test_impl(index1, index2)
np.testing.assert_array_equal(result, result_ref)
def test_positional_index_operator_is_nounbox(self):
def test_impl_1(*args):
index1 = pd.RangeIndex(*args)
index2 = index1
return index1 is index2
sdc_func_1 = self.jit(test_impl_1)
def test_impl_2(*args):
index1 = pd.RangeIndex(*args)
index2 = pd.RangeIndex(*args)
return index1 is index2
sdc_func_2 = self.jit(test_impl_2)
# positive testcase
params = 1, 21, 3
with self.subTest(subtest="same indexes"):
result = sdc_func_1(*params)
result_ref = test_impl_1(*params)
self.assertEqual(result, result_ref)
self.assertEqual(result, True)
# negative testcase
with self.subTest(subtest="not same indexes"):
result = sdc_func_2(*params)
result_ref = test_impl_2(*params)
self.assertEqual(result, result_ref)
self.assertEqual(result, False)
def test_positional_index_getitem_by_mask(self):
def test_impl(index, mask):
return index[mask]
sdc_func = self.jit(test_impl)
n = 11
np.random.seed(0)
mask = np.random.choice([True, False], n)
for index in _generate_positional_indexes_fixed(n):
result = sdc_func(index, mask)
result_ref = test_impl(index, mask)
pd.testing.assert_index_equal(result, result_ref)
def test_positional_index_getitem_by_array(self):
def test_impl(index, idx):
return index[idx]
sdc_func = self.jit(test_impl)
n, k = 11, 7
| np.random.seed(0) | numpy.random.seed |
import copy
from math import ceil
from typing import Callable, List, Tuple
from matplotlib import pyplot as plt
import numpy as np
from baselines.ga.direct_ga_fitness import DirectFitness
from baselines.ga.general_ga_pcg import _GeneralGAIndividual
from common.methods.pcg_method import PCGMethod
from games.game import Game
from games.level import Level
from games.maze.maze_game import MazeGame
from games.maze.maze_level import MazeLevel
from gym_pcgrl.envs.helper import calc_longest_path, get_tile_locations
from skimage import morphology as morph
import scipy
from metrics.horn.leniency import LeniencyMetric
from novelty_neat.fitness.entropy import EntropyFitness
from novelty_neat.fitness.fitness import CombinedFitness
from novelty_neat.maze.neat_maze_fitness import SolvabilityFitness
from novelty_neat.novelty.distance_functions.distance import visual_diversity
from novelty_neat.novelty.novelty_metric import NoveltyMetric
class SingleElementGAIndividual:
def __init__(self, level: Level, min: int, max: int, init: int = 0) -> None:
"""A single individual that handles a single aspect of a level.
Args:
level (Level): NOT USED
min (int): The smallest value that these tiles can take on
max (int): The largest value (inclusive) that these tiles can take on
init (int, optional): The initial value. Defaults to 0.
"""
self.level = level
self.genome = np.zeros(level.width, dtype=np.int32) + init
self.min = min
self.max = max
def crossover(self, other: "SingleElementGAIndividual") -> "SingleElementGAIndividual":
k = np.random.randint(0, len(self.genome) - 1)
new_genome = np.zeros_like(self.genome)
new_genome2 = np.zeros_like(self.genome)
new_genome[:k] += self.genome[:k]
new_genome[k:] += other.genome[k:]
new_genome2[:k] += other.genome[:k]
new_genome2[k:] += self.genome[k:]
new_agent = SingleElementGAIndividual(self.level, min=self.min, max=self.max)
new_agent.genome = new_genome
new_agent2 = SingleElementGAIndividual(self.level, min=self.min, max=self.max)
new_agent2.genome = new_genome2
return new_agent, new_agent2
def mutate(self, prob):
indices = np.random.rand(len(self.genome)) < prob
random_ints = np.random.randint(self.min, self.max + 1, size=indices.sum())
self.genome[indices] = random_ints
class SingleElementFitnessFunction:
pass
def calc_fitness(self, individuals: List[SingleElementGAIndividual]) -> List[float]:
raise NotImplementedError("")
def reset(self):
pass
def __repr__(self) -> str:
return f"{self.__class__.__name__}()"
class MultiPopGAPCG(PCGMethod):
"""
This is a method that attempt to replicate
<NAME>., <NAME>., & <NAME>. (2014, July). A multi-population genetic algorithm for procedural generation of levels for platform games. In Proceedings of the Companion Publication of the 2014 Annual Conference on Genetic and Evolutionary Computation (pp. 45-46).
Specifically using multiple populations, and at the end we combine them into one level
"""
def __init__(self, game: Game, init_level: Level,
individual_funcs: List[Callable[[Level], SingleElementGAIndividual]],
fitness_functions: List[SingleElementFitnessFunction],
population_size: int = 50,
number_of_generations: int = 100) -> None:
super().__init__(game, init_level)
self.population_size = population_size
self.individual_funcs = individual_funcs
self.level = init_level
self.number_of_generations = number_of_generations
self.fitness_functions = fitness_functions
self.reset()
assert len(self.fitness_functions) == len(self.individual_funcs) == len(self.populations)
def reset(self):
self.populations: List[List[SingleElementGAIndividual]] = \
[
[individual_func(copy.deepcopy(self.level)) for i in range(self.population_size)]
for individual_func in self.individual_funcs
]
self.gen_count = 0
self.best_agent = None
for f in self.fitness_functions:
f.reset()
def one_gen(self):
"""Performs one generation, evaluates and breeds.
"""
for index, (pop, fit) in enumerate(zip(self.populations, self.fitness_functions)):
probs = self.evaluate(pop, fit)
self.populations[index] = self.breed(pop, probs)
self.gen_count += 1
def breed(self, pop: List[SingleElementGAIndividual], probs: List[float]):
"""Breeds the current population to form the next one.
The individual with the best score is kept unchanged, and the others are from crossover with parents.
Args:
probs (List[float]): With what probability should each parent be chosen.
"""
best_agent = | np.argmax(probs) | numpy.argmax |
"""
Testing what the fastest way is to create a 1D Array with 2 values
"""
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
import random
import numpy as np
x, y = random.uniform(0, 300), random.uniform(0, 300)
def numpy_array(x, y):
# Calculate distances between each of the points
return np.array((x, y), dtype=np.float)
def numpy_array_tuple(my_tuple):
# Calculate distances between each of the points
return np.array(my_tuple, dtype=np.float)
def numpy_asarray(x, y):
# Calculate distances between each of the points
return | np.asarray((x, y), dtype=np.float) | numpy.asarray |
import numpy as np
import scipy
import scipy.special
import scipy.optimize
import scipy.ndimage
import scipy.fftpack as fftpack
import thimbles as tmb
from thimbles import speed_of_light
from functools import reduce
sqrt2pi = np.sqrt(2*np.pi)
sqrt2 = | np.sqrt(2) | numpy.sqrt |
# ******************************************************************************
# Copyright 2017-2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
"""
Test the usage of ng.constant
"""
from __future__ import print_function
import numpy as np
import ngraph as ng
from ngraph.testing import executor
import pytest
pytestmark = [pytest.mark.transformer_dependent, pytest.mark.separate_execution]
def test_constant_init():
"""TODO."""
a = ng.constant(5)
with executor(a) as ex:
result = ex()
print(result)
assert (result == 5)
print("pass constant initialization")
nparray = np.array(range(5))
a = ng.constant(nparray)
with executor(a) as ex:
result = ex()
ng.testing.assert_allclose(result, nparray)
def test_constant_add():
"""TODO."""
a = ng.constant(1)
b = ng.constant(2)
c = a + b
with executor(c) as ex:
result = ex()
print(result)
assert result == 3
def test_constant_multiply():
"""TODO."""
a = ng.constant(4)
b = ng.constant(2)
c = ng.multiply(a, b)
with executor(c) as ex:
result = ex()
assert result == 8
def test_cputensor_add():
"""TODO."""
Y = ng.make_axis(length=2)
M = ng.make_axis(length=2)
N = ng.make_axis(length=2)
a = ng.constant(np.array([3, 5], dtype=np.float32), [Y])
b = ng.constant(np.array([3, 5], dtype=np.float32), [Y])
c = a + b
with executor(c) as ex:
result = ex()
assert np.array_equal(result, [6, 10])
np_a = np.array([[1, 2], [3, 4]], dtype=np.float32)
np_b = np.array([[1, 2], [3, 4]], dtype=np.float32)
np_c = np_a + np_b
a = ng.constant(np_a, [M, N])
b = ng.constant(np_b, [M, N])
c = a + b
with executor(c) as ex:
result = ex()
assert np.array_equal(result, np_c)
def test_cputensor_dot():
Y = ng.make_axis(length=2)
M = ng.make_axis(length=1)
N = ng.make_axis(length=3)
np_a = np.array([[1, 2, 3]], dtype=np.float32)
np_b = np.array([[1, 2], [2, 3], [3, 4]], dtype=np.float32)
np_c = | np.dot(np_a, np_b) | numpy.dot |
import numpy as np
from scipy.optimize import linear_sum_assignment
from collections import defaultdict
from utils.utils import parse_camera_param
def global2pixel(person_coords, camera_id, camera_param_dict):
# det : X Y Z
world_coord = person_coords / camera_param_dict['discretization_factor'] + camera_param_dict['min_volume']
trans_coord = world_coord - camera_param_dict[camera_id]['Translation']
uvw = np.linalg.inv(camera_param_dict[camera_id]['Rotation']) @ trans_coord.transpose(1, 0)
uvw = uvw.transpose(1, 0)
pixel_coords = uvw / camera_param_dict[camera_id]['FInv'] / uvw[:, 2:3] + camera_param_dict[camera_id]['C']
return pixel_coords[:, :2]
def batch_euc_dist(point1, point2):
point1_reshape = point1[:, np.newaxis, :]
point2_reshape = point2[np.newaxis, :, :]
sub = point1_reshape - point2_reshape
dist = | np.linalg.norm(sub, ord=2, axis=-1) | numpy.linalg.norm |
import numpy as np
import scipy as sp
def reshape_dims(M,dims=None):
num_dim = np.ndim(M)
if num_dim ==3:
M1r= M.reshape((np.prod(dims[:2]),dims[2]),order='F')
elif num_dim ==2:
M1r = M.reshape(dims,order='F')
return M1r
def remove_trend(Y_rm,detrend_option='linear'):
mean_pixel = Y_rm.mean(axis=1, keepdims=True)
Y_rm2 = Y_rm - mean_pixel
# Detrend
if detrend_option=='linear':
detr_data = sp.signal.detrend(Y_rm2,axis=1,type='l')
#elif detrend_option=='quad':
#detr_data = detrend(Y_rm)
else:
print('Add option')
Y_det = detr_data + mean_pixel
offset = Y_rm - Y_det
return Y_det, offset
def unpad(x):
"""
Given padded matrix with nan
Get rid of all nan in order (row, col)
Parameters:
----------
x: np.array
array to unpad (all nan values)
Outputs:
-------
x: np.array
unpaded array (will not contain nan values)
dimension might be different from input array
"""
x = x[:, ~np.isnan(x).all(0)]
x = x[~np.isnan(x).all(1)]
return x
def pad(array, reference_shape, offsets, array_type=np.nan):
"""
Pad array wrt reference_shape exlcluding offsets with dtype=array_type
Parameters:
----------
array: np.array
array to be padded
reference_shape:tuple
size of narray to create
offsets: tuple
list of offsets (number of elements must be equal
to the dimension of the array)
will throw a ValueError if offsets is too big and the
reference_shape cannot handle the offsets
array_type: dtype
data type to pad array with.
Outputs:
-------
result: np.array (reference_shape)
padded array given input
"""
# Create an array of zeros with the reference shape
result = np.ones(reference_shape) * array_type
# Create a list of slices from offset to offset + shape in each dimension
insertHere = [slice(offsets[dim], offsets[dim] + array.shape[dim])
for dim in range(array.ndim)]
# Insert the array in the result at the specified offsets
result[insertHere] = array
return result
def nextpow2(value):
"""
Extracted from
caiman.source_extraction.cnmf.deconvolution import axcov
Find exponent such that 2^exponent is >= abs(value).
Parameters:
----------
value : int
Returns:
-------
exponent : int
"""
exponent = 0
avalue = np.abs(value)
while avalue > | np.power(2, exponent) | numpy.power |
# Copyright (c) 2019, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
import numpy as np
import cuml
import cuml.svm
from numba import cuda
from sklearn import svm
from sklearn.datasets import load_iris, make_blobs
from sklearn.datasets.samples_generator import make_classification, \
make_gaussian_quantiles
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from cuml.test.utils import to_nparray, np_to_cudf
import cudf
def array_equal(a, b, tol=1e-6, relative_diff=True, report_summary=False):
diff = np.abs(a-b)
if relative_diff:
idx = np.nonzero(abs(b) > tol)
diff[idx] = diff[idx] / abs(b[idx])
equal = np.all(diff <= tol)
if not equal and report_summary:
idx = np.argsort(diff)
print("Largest diffs")
a = a.ravel()
b = b.ravel()
diff = diff.ravel()
for i in idx[-5:]:
if (diff[i] > tol):
print(diff[i], "at", i, "values", a[i], b[i])
print('Avgdiff:', np.mean(diff), 'stddiyy:', np.std(diff), 'avgval:',
np.mean(b))
return equal
def compare_svm(svm1, svm2, X, y, n_sv_tol=None, b_tol=None, coef_tol=None,
cmp_sv=False, dcoef_tol=None, accuracy_tol=None,
report_summary=False):
""" Compares two svm classifiers
Parameters:
-----------
svm1 : svm classifier
svm2 : svm classifier
accuracy_tol : float, default 0.1%
tolerance while comparing the prediction accuracy
b_tol : float
tolerance while comparing the constant in the decision functions
coef_tol: float
tolerance used while comparing coef_ attribute for linear SVM
cmp_idx : boolean, default false
whether to compare SVs and their indices
dcoef_tol: float, default: do not compare dual coefficients
tolerance used to compare dual coefs
"""
n = X.shape[0]
svm1_y_hat = to_nparray(svm1.predict(X))
svm1_n_wrong = np.sum(np.abs(y - svm1_y_hat))
accuracy1 = (n-svm1_n_wrong)*100/n
svm2_y_hat = to_nparray(svm2.predict(X))
svm2_n_wrong = np.sum(np.abs(y - svm2_y_hat))
accuracy2 = (n-svm2_n_wrong)*100/n
if accuracy_tol is None:
if n >= 250 and (accuracy1 + accuracy2)/2 <= 75:
# 1% accuracy tolerance for not so accurate SVM on "large" dataset
accuracy_tol = 1
else:
accuracy_tol = 0.1
assert abs(accuracy1 - accuracy2) <= accuracy_tol
print(accuracy1, n)
n_support1 = np.sum(svm1.n_support_)
n_support2 = np.sum(svm2.n_support_)
if n_sv_tol is None:
n_sv_tol = max(2, n_support1*0.02)
if b_tol is None:
b_tol = 30*svm1.tol
if accuracy1 < 50:
# Increase error margin for classifiers that are not accurate.
# Although analytically the classifier should always be the same,
# we fit only until we reach a certain numerical tolerance, and
# therefore the resulting SVM's can be different. We increase the
# tolerance in these cases.
#
# A good example is the gaussian dataset with linear classifier:
# the classes are concentric blobs, and we cannot separate that with a
# straight line. When we have a large number of data points, then
# any separating hyperplane that goes through the center would be good.
n_sv_tol *= 10
b_tol *= 10
if n >= 250:
coef_tol = 2 # allow any direction
else:
coef_tol *= 10
assert abs(n_support1-n_support2) <= n_sv_tol
if abs(svm2.intercept_) > 1e-6:
assert abs((svm1.intercept_-svm2.intercept_)/svm2.intercept_) <= b_tol
else:
assert abs((svm1.intercept_-svm2.intercept_)) <= b_tol
if coef_tol is None:
coef_tol = 1e-5
if svm1.kernel == 'linear':
cs = np.dot(svm1.coef_, svm2.coef_.T) / \
(np.linalg.norm(svm1.coef_) * np.linalg.norm(svm2.coef_))
assert cs > 1 - coef_tol
if cmp_sv or (dcoef_tol is not None):
sidx1 = np.argsort(to_nparray(svm1.support_))
sidx2 = np.argsort(to_nparray(svm2.support_))
if cmp_sv:
support_idx1 = to_nparray(svm1.support_)[sidx1]
support_idx2 = to_nparray(svm2.support_)[sidx2]
assert np.all(support_idx1-support_idx2) == 0
sv1 = to_nparray(svm1.support_vectors_)[sidx1, :]
sv2 = to_nparray(svm2.support_vectors_)[sidx2, :]
assert | np.all(sv1-sv2 == 0) | numpy.all |
from time import time
import numpy
class Trainer(object):
def __init__(self, agent, env):
"""
An object to facilitate agent training and evaluation.
Parameters
----------
agent : :class:`NumpyAgent` instance
The agent to train.
env : ``gym.wrappers`` or ``gym.envs`` instance
The environment to run the agent on.
"""
self.env = env
self.agent = agent
self.rewards = {"total": [], "smooth_total": [], "n_steps": [], "duration": []}
def _train_episode(self, max_steps, render_every=None):
t0 = time()
if "train_episode" in dir(self.agent):
# online training updates over the course of the episode
reward, n_steps = self.agent.train_episode(max_steps)
else:
# offline training updates upon completion of the episode
reward, n_steps = self.agent.run_episode(max_steps)
self.agent.update()
duration = time() - t0
return reward, duration, n_steps
def train(
self,
n_episodes,
max_steps,
seed=None,
plot=True,
verbose=True,
render_every=None,
smooth_factor=0.05,
):
"""
Train an agent on an OpenAI gym environment, logging training
statistics along the way.
Parameters
----------
n_episodes : int
The number of episodes to train the agent across.
max_steps : int
The maximum number of steps the agent can take on each episode.
seed : int or None
A seed for the random number generator. Default is None.
plot : bool
Whether to generate a plot of the cumulative reward as a function
of training episode. Default is True.
verbose : bool
Whether to print intermediate run statistics to stdout during
training. Default is True.
smooth_factor : float in [0, 1]
The amount to smooth the cumulative reward across episodes. Larger
values correspond to less smoothing.
"""
if seed:
numpy.random.seed(seed)
self.env.seed(seed=seed)
t0 = time()
render_every = numpy.inf if render_every is None else render_every
sf = smooth_factor
for ep in range(n_episodes):
tot_rwd, duration, n_steps = self._train_episode(max_steps)
smooth_tot = tot_rwd if ep == 0 else (1 - sf) * smooth_tot + sf * tot_rwd
if verbose:
print(
f"[Ep. {ep + 1:2}] {n_steps:<6.2f} Steps | Total Reward: {tot_rwd:<7.2f} | Smoothed "
f"Total: {smooth_tot:<7.2f} | Duration: {duration:<6.2f}s"
)
if (ep + 1) % render_every == 0:
total, n_steps = self.agent.greedy_policy(max_steps)
print(
f"\tGreedy policy total reward: {total:.2f}, n_steps: {n_steps:.2f}"
)
self.rewards["total"].append(tot_rwd)
self.rewards["n_steps"].append(n_steps)
self.rewards["duration"].append(duration)
self.rewards["smooth_total"].append(smooth_tot)
train_time = (time() - t0) / 60
print(
f"Training took {train_time:.2f} mins [{ | numpy.mean(self.rewards['duration']) | numpy.mean |
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
import keras
from keras.callbacks import Callback
from sklearn.metrics import confusion_matrix, classification_report
import os
from scipy import misc
class SPARCS_Callback(keras.callbacks.Callback):
def __init__(self, valid_datasets, valid_datagens, steps_per_epoch=float('inf'), frequency=1):
keras.callbacks.Callback.__init__(self)
self.datasets = valid_datasets
self.datagens = valid_datagens
self.steps_per_epoch = steps_per_epoch
self.frequency = frequency
return
def on_train_begin(self, logs={}):
return
def on_train_end(self, logs={}):
return
def on_epoch_begin(self, epoch, logs={}):
return
def on_epoch_end(self, epoch, logs={}):
if epoch % self.frequency == self.frequency-1:
print('|{0: ^12}|{1: ^12}|{2: ^12}|{3: ^12}|{4: ^12}|{5: ^12}\n -----------------------------------------------------------------'.
format('Biome', '% Correct', '% Omission', '% Comission', '% Cloud', '%Shadow'))
for dataset, gen in zip(self.datasets, self.datagens):
y_true, y_pred = np.array([]), np.array([])
biome = os.path.split(dataset.dirs)[-1]
stepper = 0
n_samples = 1
n_pixels = 1
n_cloud = 1
n_clear = 1
n_shadow = 1
percent_cloud = 0
percent_shadow = 0
percent_correct = 0
percent_omission = 0
percent_comission = 0
while n_samples < len(dataset) and stepper < self.steps_per_epoch:
x, y_t = next(gen)
y_p = self.model.predict(x)
y_true = y_t.reshape([-1, y_t.shape[-1]]).argmax(axis=-1)
y_pred = y_p.reshape([-1, y_p.shape[-1]]).argmax(axis=-1)
total = len(y_true)
total_shadow = y_true == 0
total_clear = y_true == 1
total_cloud = y_true == 2
cloud_as_shadow = (y_true == 2) * (y_pred == 0)
cloud_as_clear = (y_true == 2) * (y_pred == 1)
cloud_as_cloud = (y_true == 2) * (y_pred == 2)
clear_as_shadow = (y_true == 1) * (y_pred == 0)
clear_as_clear = (y_true == 1) * (y_pred == 1)
clear_as_cloud = (y_true == 1) * (y_pred == 2)
shadow_as_shadow = (y_true == 0) * (y_pred == 0)
shadow_as_clear = (y_true == 0) * (y_pred == 1)
shadow_as_cloud = (y_true == 0) * (y_pred == 2)
i_percent_cloud = 100*np.sum(total_cloud)/np.sum(total)
i_percent_shadow = 100*np.sum(total_shadow)/np.sum(total)
i_percent_correct = 100 * \
(np.sum(shadow_as_shadow)+np.sum(cloud_as_cloud) +
np.sum(clear_as_clear))/np.sum(total)
if np.sum(total_cloud):
i_percent_omission = 100 * \
(np.sum(total_shadow) - np.sum(shadow_as_shadow)) / \
np.sum(total_shadow)
else:
i_percent_omission = 0
if np.sum(total_clear):
i_percent_comission = 100 * \
np.sum(clear_as_shadow+cloud_as_shadow) / \
(np.sum(total_clear)+np.sum(total_cloud))
else:
i_percent_comission = 0
percent_cloud = (
percent_cloud*n_pixels + i_percent_cloud*np.sum(total))/(n_pixels+np.sum(total))
percent_shadow = (
percent_shadow*n_pixels + i_percent_shadow*np.sum(total))/(n_pixels+np.sum(total))
percent_correct = (
percent_correct*n_pixels + i_percent_correct*np.sum(total))/(n_pixels+np.sum(total))
percent_omission = (percent_omission*n_shadow + i_percent_omission*np.sum(
total_shadow))/(n_shadow+np.sum(total_shadow))
percent_comission = (percent_comission*(n_clear+n_cloud) + i_percent_comission*(np.sum(
total_clear)+np.sum(total_cloud)))/(n_clear+n_cloud+np.sum(total_clear)+np.sum(total_cloud))
n_pixels += np.sum(total)
n_cloud += np.sum(total_cloud)
n_clear += | np.sum(total_clear) | numpy.sum |
"""
Similar to RunModel, but fine-tunes over time on openpose output.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from .ops import keypoint_l1_loss
from .models import Encoder_resnet, Encoder_fc3_dropout
from .tf_smpl.batch_lbs import batch_rodrigues
from .tf_smpl.batch_smpl import SMPL
from .tf_smpl.projection import batch_orth_proj_idrot
from .util.renderer import SMPLRenderer, draw_skeleton
from .util.image import unprocess_image
import time
from os.path import exists
import tensorflow as tf
import numpy as np
class Refiner(object):
def __init__(self, config, num_frames, sess=None):
"""
Args:
config,,
"""
# Config + path
if not config.load_path:
raise Exception(
"[!] You should specify `load_path` to load a pretrained model"
)
if not exists(config.load_path + '.index'):
print('%s doesnt exist..' % config.load_path)
# import ipdb
# ipdb.set_trace()
self.config = config
self.load_path = config.load_path
self.num_frames = num_frames
self.data_format = config.data_format
self.smpl_model_path = config.smpl_model_path
# Visualization for fitting
self.viz = config.viz
self.viz_sub = 10
# Loss & Loss weights:
self.e_lr = config.e_lr
self.e_loss_weight = config.e_loss_weight
self.shape_loss_weight = config.shape_loss_weight
self.joint_smooth_weight = config.joint_smooth_weight
self.camera_smooth_weight = config.camera_smooth_weight
self.keypoint_loss = keypoint_l1_loss
self.init_pose_loss_weight = config.init_pose_loss_weight
# Data
self.batch_size = num_frames
self.img_size = config.img_size
input_size = (self.batch_size, self.img_size, self.img_size, 3)
self.images_pl = tf.placeholder(tf.float32, shape=input_size)
self.img_feat_pl = tf.placeholder(tf.float32,
shape=(self.batch_size, 2048))
self.img_feat_var = tf.get_variable("img_feat_var",
dtype=tf.float32,
shape=(self.batch_size, 2048))
kp_size = (self.batch_size, 19, 3)
self.kps_pl = tf.placeholder(tf.float32, shape=kp_size)
# Camera type!
self.num_cam = 3
self.proj_fn = batch_orth_proj_idrot
self.num_theta = 72 # 24 * 3
self.total_params = self.num_theta + self.num_cam + 10
# Model spec
# For visualization
if self.viz:
self.renderer = SMPLRenderer(img_size=self.img_size,
face_path=config.smpl_face_path)
# Instantiate SMPL
self.smpl = SMPL(self.smpl_model_path)
self.theta0_pl_shape = [self.batch_size, self.total_params]
self.theta0_pl = tf.placeholder_with_default(
self.load_mean_param(), shape=self.theta0_pl_shape, name='theta0')
# Optimization space.
self.refine_inpose = config.refine_inpose
if self.refine_inpose:
self.theta_pl = tf.placeholder(tf.float32,
shape=self.theta0_pl_shape,
name='theta_pl')
self.theta_var = tf.get_variable("theta_var",
dtype=tf.float32,
shape=self.theta0_pl_shape)
# For ft-loss
self.shape_pl = tf.placeholder_with_default(tf.zeros(10),
shape=(10, ),
name='beta0')
# For stick-to-init-pose loss:
self.init_pose_pl = tf.placeholder_with_default(tf.zeros(
[num_frames, 72]),
shape=(num_frames, 72),
name='pose0')
self.init_pose_weight_pl = tf.placeholder_with_default(
tf.ones([num_frames, 1]),
shape=(num_frames, 1),
name='pose0_weights')
# For camera loss
self.scale_factors_pl = tf.placeholder_with_default(
tf.ones([num_frames]), shape=(num_frames), name='scale_factors')
self.offsets_pl = tf.placeholder_with_default(tf.zeros([num_frames,
2]),
shape=(num_frames, 2),
name='offsets')
# Build model!
self.ief = config.ief
if self.ief:
self.num_stage = config.num_stage
self.build_refine_model()
else:
print('never here')
import ipdb
ipdb.set_trace()
all_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
# Exclude the new variable.
all_vars_filtered = [
v for v in all_vars
if ('img_feat_var' not in v.name) and ('theta_var' not in v.name)
]
self.saver = tf.train.Saver(all_vars_filtered)
if sess is None:
self.sess = tf.Session()
else:
self.sess = sess
new_vars = [
v for v in all_vars
if ('img_feat_var' in v.name) or ('theta_var' in v.name)
]
self.sess.run(tf.variables_initializer(new_vars))
# coord = tf.train.Coordinator()
# threads = tf.train.start_queue_runners(sess=self.sess, coord=coord)
self.prepare()
def load_mean_param(self):
mean = np.zeros((1, self.total_params))
mean[0, 0] = 0.9 # This is scale.
mean = tf.constant(mean, tf.float32)
self.mean_var = tf.Variable(mean,
name="mean_param",
dtype=tf.float32,
trainable=True)
# self.E_var.append(self.mean_var)
init_mean = tf.tile(self.mean_var, [self.batch_size, 1])
# 85D consists of [cam (3), pose (72), shapes (10)]
# cam is [scale, tx, ty]
return init_mean
def prepare(self):
print('Restoring checkpoint %s..' % self.load_path)
self.saver.restore(self.sess, self.load_path)
self.mean_value = self.sess.run(self.mean_var)
def build_refine_model(self):
img_enc_fn = Encoder_resnet
threed_enc_fn = Encoder_fc3_dropout
self.img_feat, self.E_var = img_enc_fn(self.images_pl,
is_training=False,
reuse=False)
self.set_img_feat_var = self.img_feat_var.assign(self.img_feat_pl)
# Start loop
self.all_verts = []
self.all_kps = []
self.all_cams = []
self.all_Js = []
self.all_Jsmpl = []
self.final_thetas = []
theta_prev = self.theta0_pl
for i in np.arange(self.num_stage):
print('Iteration %d' % i)
# ---- Compute outputs
state = tf.concat([self.img_feat_var, theta_prev], 1)
if i == 0:
delta_theta, threeD_var = threed_enc_fn(
state,
num_output=self.total_params,
is_training=False,
reuse=False)
self.E_var.append(threeD_var)
else:
delta_theta, _ = threed_enc_fn(state,
num_output=self.total_params,
is_training=False,
reuse=True)
# Compute new theta
theta_here = theta_prev + delta_theta
# cam = N x 3, pose N x self.num_theta, shape: N x 10
cams = theta_here[:, :self.num_cam]
poses = theta_here[:, self.num_cam:(self.num_cam + self.num_theta)]
shapes = theta_here[:, (self.num_cam + self.num_theta):]
# Rs_wglobal is Nx24x3x3 rotation matrices of poses
verts, Js, pred_Rs = self.smpl(shapes, poses, get_skin=True)
Jsmpl = self.smpl.J_transformed
# Project to 2D!
pred_kp = self.proj_fn(Js, cams, name='proj_2d_stage%d' % i)
self.all_verts.append(verts)
self.all_kps.append(pred_kp)
self.all_cams.append(cams)
self.all_Js.append(Js)
self.all_Jsmpl.append(Jsmpl)
# save each theta.
self.final_thetas.append(theta_here)
# Finally)update to end iteration.
theta_prev = theta_here
# Compute everything with the final theta.
if self.refine_inpose:
self.set_theta_var = self.theta_var.assign(self.theta_pl)
theta_final = self.theta_var
else:
theta_final = theta_here
cams = theta_final[:, :self.num_cam]
poses = theta_final[:, self.num_cam:(self.num_cam + self.num_theta)]
shapes = theta_final[:, (self.num_cam + self.num_theta):]
# Rs_wglobal is Nx24x3x3 rotation matrices of poses
verts, Js, pred_Rs = self.smpl(shapes, poses, get_skin=True)
Jsmpl = self.smpl.J_transformed
# Project to 2D!
pred_kp = self.proj_fn(Js, cams, name='proj_2d_stage%d' % i)
self.all_verts.append(verts)
self.all_kps.append(pred_kp)
self.all_cams.append(cams)
self.all_Js.append(Js)
self.all_Jsmpl.append(Jsmpl)
# save each theta.
self.final_thetas.append(theta_final)
# Compute new losses!!
self.e_loss_kp = self.e_loss_weight * self.keypoint_loss(
self.kps_pl, pred_kp)
# Beta variance should be low!
self.loss_shape = self.shape_loss_weight * shape_variance(
shapes, self.shape_pl)
self.loss_init_pose = self.init_pose_loss_weight * init_pose(
pred_Rs, self.init_pose_pl, weights=self.init_pose_weight_pl)
# Endpoints should be smooth!!
self.loss_joints = self.joint_smooth_weight * joint_smoothness(Js)
# Camera should be smooth
self.loss_camera = self.camera_smooth_weight * camera_smoothness(
cams,
self.scale_factors_pl,
self.offsets_pl,
img_size=self.config.img_size)
self.total_loss = self.e_loss_kp + self.loss_shape + self.loss_joints + self.loss_init_pose + self.loss_camera
# Setup optimizer
print('Setting up optimizer..')
self.optimizer = tf.train.AdamOptimizer
e_optimizer = self.optimizer(self.e_lr)
if self.refine_inpose:
self.e_opt = e_optimizer.minimize(self.total_loss,
var_list=[self.theta_var])
else:
self.e_opt = e_optimizer.minimize(self.total_loss,
var_list=[self.img_feat_var])
print('Done initializing the model!')
def predict(self, images, kps, scale_factors, offsets):
"""
images: num_batch, img_size, img_size, 3
kps: num_batch x 19 x 3
Preprocessed to range [-1, 1]
scale_factors, offsets: used to preprocess the bbox
Runs the model with images.
"""
## Initially, get the encoding of images:
feed_dict = {self.images_pl: images}
fetch_dict = {'img_feats': self.img_feat}
img_feats = self.sess.run(self.img_feat, feed_dict)
feed_dict = {
self.img_feat_pl: img_feats,
self.kps_pl: kps,
}
self.sess.run(self.set_img_feat_var, feed_dict)
if self.refine_inpose:
# Take -2 bc that's the actual theta (-1 is still placeholder)
use_res = -2
else:
use_res = -1
fetch_dict = {
'theta': self.final_thetas[use_res],
'joints': self.all_kps[use_res],
'verts': self.all_verts[use_res],
}
init_result = self.sess.run(fetch_dict, feed_dict)
shapes = init_result['theta'][:, -10:]
# Save mean shape of this trajectory:
mean_shape = | np.mean(shapes, axis=0) | numpy.mean |
bl_info = {
"name": "Import Planar Code",
"author": "<NAME>",
"version": (1, 0),
"blender": (2, 80, 0),
"location": "File > Import > Planar Code",
"description": "Import planar code and construct mesh by assigning vertex positions.",
"warning": "",
"support": "TESTING",
"wiki_url": "",
"tracker_url": "",
"category": "Import-Export",
}
import bpy
import bmesh
import numpy as np
import mathutils as mu
from bpy.props import StringProperty, IntProperty, BoolProperty
import struct
import collections
import os
import random
class PlanarCodeReader:
def __init__(self, filename, index, embed2d, embed3d):
self.faceCounters = []
verts_loc, faces = self.read(filename, index)
if (not verts_loc):
return
if (len(verts_loc) <= 0):
return
# create new mesh
name = os.path.basename(filename) + "_" + str(index)
mesh = bpy.data.meshes.new(name)
mesh.from_pydata(verts_loc,[],faces)
mesh.update(calc_edges=True)
# create new bmesh
bm = bmesh.new()
bm.from_mesh(mesh)
# enable lookup
bm.verts.ensure_lookup_table()
bm.edges.ensure_lookup_table()
bm.faces.ensure_lookup_table()
if (embed2d):
pv = self.embed(bm)
print(pv)
if (embed3d):
self.liftup(bm, pv)
bm.to_mesh(mesh)
# create new object
obj = bpy.data.objects.new(name, mesh)
# set object location
obj.location = bpy.context.scene.cursor.location
# link the object to collection
bpy.context.scene.collection.objects.link(obj)
def read(self, filename, index):
self.f = open(filename, "rb")
verts = []
faces = []
try:
DEFAULT_HEADER = b">>planar_code<<"
header = self.f.read(len(DEFAULT_HEADER))
if (header == DEFAULT_HEADER):
print(index)
self.skip(index)
# create verts
num_vert = struct.unpack('b', self.f.read(1))
i = 0
while i < num_vert[0]:
# create vertex
verts.append((0, 0, 0))
# read adjacant vertices
adj = []
while True:
tmp = struct.unpack('b', self.f.read(1))
if (tmp[0] <= 0): # 0 means separator
break
adj.append(tmp[0])
# add face counter
lastIndex = len(adj)-1
for j in range(lastIndex):
self.addIfAbsent(collections.Counter([i, adj[j]-1, adj[j+1]-1]))
self.addIfAbsent(collections.Counter([i, adj[0]-1, adj[lastIndex]-1]))
i += 1
for counter in self.faceCounters:
faces.append(tuple(counter))
except:
print(f"Error in reading {filename}")
self.f.close()
return
self.f.close()
del self.f
return verts, faces
def skip(self, index):
# skip to target index
for i in range(index):
num_vert = struct.unpack('b', self.f.read(1))
n = num_vert[0]
while n > 0:
d = struct.unpack('b', self.f.read(1))
if (d[0] == 0):
n -= 1
def addIfAbsent(self, fc):
for counter in self.faceCounters:
if (counter == fc):
break
else:
self.faceCounters.append(fc)
def embed(self, bm):
# randomly pick up a face
outerFace = bm.faces[random.randint(0, len(bm.faces)-1)]
# embed an outer face to form a regular polygon inscribed into a circle
n = len(outerFace.verts)
inv_sqrt = 1.0 / np.sqrt(n)
angle = 360.0 / n
for i, v in enumerate(outerFace.verts):
rad = (i * angle / 180.0) * np.pi
x = inv_sqrt * np.cos(rad)
y = inv_sqrt * np.sin(rad)
v.co.x = x
v.co.y = y
rests = []
for v in bm.verts:
if (not v in outerFace.verts):
rests.append(v)
# variables for the force F_uv on a Edge(u,v)
fuv = np.zeros((len(bm.edges), 3))
# force F_v on a Vertex(v)
fv = np.zeros((len(bm.verts), 3))
# Constant value
n_pi = np.sqrt(len(bm.verts) / np.pi)
# final double A = 2.5;
avg_area = np.pi / len(bm.verts)
loop = 0
# iterations
while (loop < 500):
# Set F_v to zero
fv[:] = 0
# Calculate F_uv for Edges
for j, e in enumerate(bm.edges):
v = e.verts[0]
u = e.verts[1]
C = n_pi
x = C * np.power(v.co.x - u.co.x, 3)
y = C * | np.power(v.co.y - u.co.y, 3) | numpy.power |
import numpy as np
def clean_matrix(A):
"""
Removes all the vertices of graph which do not sustain any cycle
by iteratively removing isolated vertices, sinks and sources until
the matrix is invariant under such removal.
Parameters
----------
A : numpy.ndarray
Adjacency matrix
Returns
-------
numpy.ndarray
The shape of this array is different from the input array
"""
oldshape = (0, 0)
while oldshape != A.shape:
oldshape = A.shape
x = | np.any(A, axis=0) | numpy.any |
import pickle as pkl
import numpy as np
import scipy.sparse as sp
import torch
import networkx as nx
from sklearn.metrics import roc_auc_score, average_precision_score, accuracy_score, f1_score, log_loss
import matplotlib.pyplot as plt
## Miscellaneous useful functions ##
def load_graph_to_numpy(path_to_edgelist):
pass
def load_graph(path_to_edgelist, subgraph=None, weighted=False, seed=0):
"""
:param subgraph: None or int - number of nodes to sample
"""
# nx_graph = nx.from_edgelist([l.split() for l in open(path_to_edgelist)])
if weighted:
nx_graph = nx.read_weighted_edgelist(path_to_edgelist)
else:
nx_graph = nx.read_edgelist(path_to_edgelist)
print('The graph has {} nodes and {} edges'.format(nx_graph.number_of_nodes(),
nx_graph.number_of_edges()))
if subgraph is None:
return nx_graph
if seed:
np.random.seed(seed)
print('Sampling {}-node subgraph from original graph'.format(subgraph))
return nx_graph.subgraph(np.random.choice(nx_graph.nodes(),
size=subgraph, replace=False))
def get_dual(graph, sparse=True):
# graph is a networkx Graph
L = nx.line_graph(graph)
nodelist = sorted(L.nodes())
# may wrap sp.csr around numpy
if sparse:
return nx.to_scipy_sparse_matrix(L, nodelist), nodelist
return nx.to_numpy_matrix(L, nodelist), nodelist
# def get_dual(adj):
# # adj is a networkx Graph
# adj = nx.from_numpy_array(adj)
# L = nx.line_graph(adj)
# nodelist = sorted(L.nodes())
# return nx.to_numpy_matrix(L, nodelist), {i: n for i, n in enumerate(nodelist)}
def get_features(adj, sparse=True):
if sparse:
return sp.eye(adj.shape[0])
return np.identity(adj.shape[0])
############### VGAE-specific ################
########################################################################################
# ------------------------------------
# Some functions borrowed from:
# https://github.com/tkipf/pygcn and
# https://github.com/tkipf/gae
# ------------------------------------
class dotdict(dict):
"""dot.notation access to dictionary attributes"""
__getattr__ = dict.__getitem__
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
def eval_gae_lp(edges_pos, edges_neg, emb, adj_orig, threshold=0.5, verbose=False):
"""
Evaluate VGAE learned embeddings on Link Prediction task.
"""
def sigmoid(x):
return 1 / (1 + | np.exp(-x) | numpy.exp |
from . import Image
import matplotlib.pyplot as plt
import numpy as np
import re
from astropy.time import Time
from astropy import units as u
from astropy.coordinates import SkyCoord
from .fluxes import ApertureFluxes
from . import viz
from astropy.io import fits
from .telescope import Telescope
from . import utils
from astroquery.mast import Catalogs
from astropy.wcs import WCS, utils as wcsutils
import pandas as pd
from scipy.stats import binned_statistic
from .blocks.psf import Gaussian2D, Moffat2D,cutouts
from .console_utils import INFO_LABEL
from astropy.stats import sigma_clipped_stats
from astropy.io.fits.verify import VerifyWarning
from datetime import datetime
import warnings
from .blocks.registration import distances
import requests
import shutil
from pathlib import Path
from . import twirl
import io
from .utils import fast_binning, z_scale
from .console_utils import info
warnings.simplefilter('ignore', category=VerifyWarning)
class Observation(ApertureFluxes):
"""
Class to load and analyze photometry products
Parameters
----------
photfile : str
path of the `.phot` file to load
"""
def __init__(self, photfile, ignore_time=False):
super().__init__(photfile)
utils.remove_sip(self.xarray.attrs)
self.phot = photfile
self.telescope = Telescope.from_name(self.telescope)
self.gaia_data = None
self.tic_data = None
self.wcs = WCS(utils.remove_arrays(self.xarray.attrs))
self._meridian_flip = None
has_bjd = hasattr(self.xarray, "bjd_tdb")
if has_bjd:
has_bjd = ~np.all(self.xarray.bjd_tdb.isnull().values)
if not has_bjd:
try:
self.compute_bjd()
if not ignore_time:
print(f"{INFO_LABEL} Time converted to BJD TDB")
except:
if not ignore_time:
print(f"{INFO_LABEL} Could not convert time to BJD TDB")
def _check_stack(self):
assert 'stack' in self.xarray is not None, "No stack found"
# Loaders and savers (files and data)
# ------------------------------------
def __copy__(self):
copied = Observation(self.xarray.copy(), ignore_time=True)
copied.phot = self.phot
copied.telescope = self.telescope
copied.gaia_data = self.gaia_data
copied.tic_data = self.tic_data
copied.wcs = self.wcs
return copied
def copy(self):
return self.__copy__()
def to_csv(self, destination, sep=" "):
"""Export a typical csv of the observation's data
Parameters
----------
destination : str
Path of the csv file to save
sep : str, optional
separation string within csv, by default " "
"""
df = pd.DataFrame(
{
"BJD-TDB" if self.time_format == "bjd_tdb" else "JD-UTC": self.time,
"DIFF_FLUX": self.diff_flux,
"ERROR": self.diff_error,
"dx_MOVE": self.dx,
"dy_MOVE": self.dy,
"FWHM": self.fwhm,
"FWHMx": self.fwhm,
"FWHMy": self.fwhm,
"SKYLEVEL": self.sky,
"AIRMASS": self.airmass,
"EXPOSURE": self.exptime,
}
)
df.to_csv(destination, sep=sep, index=False)
def save(self, destination=None):
"""Save current observation
Parameters
----------
destination : str, optional
path to phot file, by default None
"""
self.xarray.to_netcdf(self.phot if destination is None else destination)
info(f"saved {self.phot}")
def export_stack(self, destination, **kwargs):
"""Export stack to FITS file
Parameters
----------
destination : str
path of FITS to export
"""
header = {name: value for name, value in self.xarray.attrs.items() if name.isupper()}
data = self.stack
hdul = fits.HDUList([fits.PrimaryHDU(data=data, header=fits.Header(header))])
hdul.writeto(destination, **kwargs)
def import_stack(self, fitsfile):
"""Import FITS as stack to current obs (including WCS) - do not forget to save to keep it
Parameters
----------
fitsfile : str
path of FITS stack to import
"""
data = fits.getdata(fitsfile)
header = fits.getheader(fitsfile)
self.wcs = WCS(header)
self.xarray.attrs.update(utils.header_to_cdf4_dict(header))
self.xarray["stack"] = (('w', 'h'), data)
# Convenience
# -----------
@property
def skycoord(self):
"""astropy SkyCoord object for the target
"""
return SkyCoord(self.RA, self.DEC, frame='icrs', unit=(self.telescope.ra_unit, self.telescope.dec_unit))
@property
def simbad_url(self):
"""
[notebook feature] clickable simbad query url for specified target
"""
from IPython.core.display import display, HTML
display(HTML('<a href="{}">{}</a>'.format(self.simbad, self.simbad)))
@property
def simbad(self):
"""
simbad query url for specified target
"""
return f"http://simbad.u-strasbg.fr/simbad/sim-coo?Coord={self.RA}+{self.DEC}&CooFrame=FK5&CooEpoch=2000&CooEqui=" \
"2000&CooDefinedFrames=none&Radius=2&Radius.unit=arcmin&submit=submit+query&CoordList="
@property
def denominator(self):
"""A conveniant name for the observation: {telescope}_{date}_{name}_{filter}
Returns
-------
[type]
[description]
"""
return f"{self.telescope.name}_{self.date}_{self.name}_{self.filter}"
@property
def meridian_flip(self):
"""Meridian flip time. Supposing EAST and WEST encode orientation
"""
if self._meridian_flip is not None:
return self._meridian_flip
else:
has_flip = hasattr(self.xarray, "flip")
if has_flip:
try:
np.all(np.isnan(self.flip))
return None
except TypeError:
pass
if has_flip:
if "WEST" in self.flip:
flip = (self.flip.copy() == "WEST").astype(int)
diffs = np.abs(np.diff(flip))
if np.any(diffs):
self._meridian_flip = self.time[np.argmax(diffs).flatten()]
else:
self._meridian_flip = None
return self._meridian_flip
else:
return None
else:
return None
# TESS specific methods
# --------------------
@property
def tic_id(self):
"""TIC id from digits found in target name
"""
try:
nb = re.findall('\d*\.?\d+', self.name)
df = pd.read_csv("https://exofop.ipac.caltech.edu/tess/download_toi?toi=%s&output=csv" % nb[0])
tic = df["TIC ID"][0]
return f"{tic}"
except KeyError:
print('TIC ID not found')
return None
@property
def gaia_from_toi(self):
"""Gaia id from TOI id if TOI is in target name
"""
if self.tic_id is not None:
tic_id = ("TIC " + self.tic_id)
catalog_data = Catalogs.query_object(tic_id, radius=.001, catalog="TIC")
return f"{catalog_data['GAIA'][0]}"
else:
return None
@property
def tfop_prefix(self):
return f"TIC{self.tic_id}_{self.date}_{self.telescope.name}_{self.filter}"
# Methods
# -------
def compute_bjd(self, version="prose"):
"""Compute BJD_tdb based on current time
Once this is done self.time is BJD tdb and time format can be checked in self.time_format. Note that half the
exposure time is added to the JD times before conversion. The precision of the returned time is not
guaranteed, especially with "prose" method (~30ms). "eastman" option accuracy is 20ms. See
http://astroutils.astronomy.ohio-state.edu/time/utc2bjd.html for more details.
Parameters
----------
version : str, optiona
- "prose": uses an astropy method
- "eastman": uses the web applet http://astroutils.astronomy.ohio-state.edu (Eastman et al. 2010) [requires
an internet connection]
by default "prose"
"""
assert self.telescope is not None
assert self.skycoord is not None
exposure_days = self.xarray.exposure.values/60/60/24
# For backward compatibility
# --------------------------
if "time_format" not in self.xarray.attrs:
self.xarray.attrs["time_format"] = "jd_utc"
self.xarray["jd_utc"] = ("time", self.time)
if "jd_utc" not in self:
self.xarray["jd_utc"] = ("time", self.jd)
self.xarray.drop("jd")
# -------------------------
if version == "prose":
time = Time(self.jd_utc + exposure_days/2, format="jd", scale="utc", location=self.telescope.earth_location).tdb
light_travel_tbd = time.light_travel_time(self.skycoord, location=self.telescope.earth_location)
bjd_time = (time + light_travel_tbd).value
elif version == "eastman":
bjd_time = utils.jd_to_bjd(self.jd_utc + exposure_days/2, self.skycoord.ra.deg, self.skycoord.dec.deg)
self.xarray = self.xarray.assign_coords(time=bjd_time)
self.xarray["bjd_tdb"] = ("time", bjd_time)
self.xarray.attrs["time_format"] = "bjd_tdb"
# Catalog queries
# ---------------
def query_gaia(self, limit=-1, cone_radius=None):
"""Query gaia catalog for stars in the field
"""
from astroquery.gaia import Gaia
Gaia.ROW_LIMIT = limit
header = self.xarray.attrs
shape = self.stack.shape
if cone_radius is None:
cone_radius = np.sqrt(2) * np.max(shape) * self.telescope.pixel_scale / 120
coord = self.skycoord
radius = u.Quantity(cone_radius, u.arcminute)
gaia_query = Gaia.cone_search_async(coord, radius, verbose=False, )
self.gaia_data = gaia_query.get_results()
self.gaia_data.sort("phot_g_mean_flux", reverse=True)
delta_years = (utils.datetime_to_years(datetime.strptime(self.date, "%Y%m%d")) - \
self.gaia_data["ref_epoch"].data.data) * u.year
dra = delta_years * self.gaia_data["pmra"].to(u.deg / u.year)
ddec = delta_years * self.gaia_data["pmdec"].to(u.deg / u.year)
skycoords = SkyCoord(
ra=self.gaia_data['ra'].quantity + dra,
dec=self.gaia_data['dec'].quantity + ddec,
pm_ra_cosdec=self.gaia_data['pmra'],
pm_dec=self.gaia_data['pmdec'],
radial_velocity=self.gaia_data['radial_velocity'],
obstime=Time(2015.0, format='decimalyear'))
gaias = np.array(wcsutils.skycoord_to_pixel(skycoords, self.wcs)).T
gaias[np.any(np.isnan(gaias), 1), :] = [0, 0]
self.gaia_data["x"], self.gaia_data["y"] = gaias.T
inside = np.all((np.array([0, 0]) < gaias) & (gaias < np.array(self.stack.shape)), 1)
self.gaia_data = self.gaia_data[np.argwhere(inside).squeeze()]
w, h = self.stack.shape
if np.abs(np.mean(self.gaia_data["x"])) > w or np.abs(np.mean(self.gaia_data["y"])) > h:
warnings.warn("Catalog stars seem out of the field. Check that your stack is solved and that telescope "
"'ra_unit' and 'dec_unit' are well set")
def query_tic(self,cone_radius=None):
"""Query TIC catalog (through MAST) for stars in the field
"""
from astroquery.mast import Catalogs
header = self.xarray.attrs
shape = self.stack.shape
if cone_radius is None:
cone_radius = np.sqrt(2) * np.max(shape) * self.telescope.pixel_scale / 120
coord = self.skycoord
radius = u.Quantity(cone_radius, u.arcminute)
self.tic_data = Catalogs.query_region(coord, radius, "TIC", verbose=False)
self.tic_data.sort("Jmag")
skycoords = SkyCoord(
ra=self.tic_data['ra'],
dec=self.tic_data['dec'], unit="deg")
self.tic_data["x"], self.tic_data["y"] = np.array(wcsutils.skycoord_to_pixel(skycoords, self.wcs))
w, h = self.stack.shape
if np.abs(np.mean(self.tic_data["x"])) > w or np.abs(np.mean(self.tic_data["y"])) > h:
warnings.warn("Catalog stars seem out of the field. Check that your stack is solved and that telescope "
"'ra_unit' and 'dec_unit' are well set")
@property
def gaia_target(self):
return None
@gaia_target.setter
def gaia_target(self, gaia_id):
"""Set target with a gaia id
Parameters
----------
gaia_id : int
gaia id
"""
if self.gaia_data is None:
self.query_gaia()
_ = self.gaia_data.to_pandas()[["source_id", "x", "y"]].to_numpy()
ids = _[:, 0]
positions = _[:, 1:3]
gaia_i = np.argmin(np.abs(gaia_id - ids))
self.target = np.argmin(np.power(positions[gaia_i, :] - self.stars[:, ::-1], 2).sum(1))
# Plot
# ----
def show(self, size=10, flip=False, zoom=False, contrast=0.05, wcs=False, cmap="Greys_r", sigclip=None,vmin=None,vmax=None):
"""Show stack image
Parameters
----------
size : int, optional
size of the square figure, by default 10
flip : bool, optional
, by default False
zoom : bool, optional
whether to include a zoom inlay in the image, by default False
contrast : float, optional
contrast for the Zscale of image, by default 0.05
wcs : bool, optional
whether to show grid ans axes to world coordinate
"""
if self.target == -1:
zoom = False
self._check_stack()
fig = plt.figure(figsize=(size, size))
fig.patch.set_facecolor('white')
image = self.stack.copy()
if flip:
image = image[::-1, ::-1]
if sigclip is not None:
mean, median, std = sigma_clipped_stats(image)
image[image - median < 2 * std] = median
if wcs:
ax = plt.subplot(projection=self.wcs, label='overlays')
else:
ax = fig.add_subplot(111)
if all([vmin, vmax]) is False:
_ = ax.imshow(utils.z_scale(image,c=contrast), cmap=cmap, origin="lower")
else:
_ = ax.imshow(image, cmap=cmap, origin="lower",vmin=vmin,vmax=vmax)
if wcs:
ax.coords.grid(True, color='white', ls='solid', alpha=0.3)
ax.coords[0].set_axislabel('Galactic Longitude')
ax.coords[1].set_axislabel('Galactic Latitude')
overlay = ax.get_coords_overlay('fk5')
overlay.grid(color='white', ls='--', alpha=0.3)
overlay[0].set_axislabel('Right Ascension (J2000)')
overlay[1].set_axislabel('Declination (J2000)')
def _check_show(self, **kwargs):
axes = plt.gcf().axes
if len(axes) == 0:
self.show(**kwargs)
def show_stars(self, size=10, view=None, n=None, flip=False,
comp_color="yellow", color=[0.51, 0.86, 1.], stars=None, legend=True, **kwargs):
"""Show detected stars over stack image
Parameters
----------
size : int, optional
size of the square figure, by default 10
flip : bool, optional
whether to flip image, by default False
view : str, optional
"all" to see all stars OR "reference" to have target and comparison stars hilighted, by default None
n : int, optional
max number of stars to show, by default None,
Raises
------
AssertionError
[description]
"""
self._check_show(flip=flip, size=size, **kwargs)
if stars is None:
stars = self.stars
if n is not None:
if view == "reference":
raise AssertionError("'n_stars' kwargs is incompatible with 'reference' view that will display all stars")
else:
n = len(stars)
stars = stars[0:n]
if view is None:
view = "reference" if 'comps' in self else "all"
image_size = np.array(np.shape(self.stack))[::-1]
if flip:
stars = np.array(image_size) - stars
if view == "all":
viz.plot_marks(*stars.T, np.arange(len(stars)), color=color)
if "stars" in self.xarray:
others = np.arange(n, len(self.stars))
others = np.setdiff1d(others, self.target)
viz.plot_marks(*self.stars[others].T, alpha=0.4, color=color)
elif view == "reference":
x = self.xarray.isel(apertures=self.aperture)
assert 'comps' in self, "No differential photometry"
comps = x.comps.values
others = np.setdiff1d(np.arange(len(stars)), x.comps.values)
others = np.setdiff1d(others, self.target)
_ = viz.plot_marks(*stars[self.target], self.target, color=color)
_ = viz.plot_marks(*stars[comps].T, comps, color=comp_color)
_ = viz.plot_marks(*stars[others].T, alpha=0.4, color=color)
if legend:
colors = [comp_color, color]
texts = ["Comparison stars", "Target"]
viz.circles_legend(colors, texts)
def show_gaia(self, color="yellow", alpha=1, n=None, idxs=True, limit=-1, fontsize=8, align=False):
"""Overlay Gaia objects on stack image
Parameters
----------
color : str, optional
color of marks and font, by default "lightblue"
alpha : int, optional
opacity of marks and font, by default 1
n : int, optional
max number of stars to show, by default None, by default None for all stars
idxs : bool, optional
wether to show gaia ids, by default True
"""
self._check_show()
if self.gaia_data is None:
self.query_gaia(limit=limit)
gaias = np.vstack([self.gaia_data["x"].data, self.gaia_data["y"].data]).T
defined = ~np.any(np.isnan(gaias), 1)
gaias = gaias[defined]
labels = self.gaia_data["source_id"].data.astype(str)[defined]
if align:
X = twirl.find_transform(gaias[0:30], self.stars, n=15)
gaias = twirl.affine_transform(X)(gaias)
labels = [f"{_id[0:len(_id) // 2]}\n{_id[len(_id) // 2::]}" for _id in labels]
_ = viz.plot_marks(*gaias.T, labels if idxs else None, color=color, alpha=alpha, n=n, position="top",
fontsize=fontsize)
def show_tic(self, color="white", alpha=1, n=None, idxs=True, align=True):
"""Overlay TIC objects on stack image
Parameters
----------
color : str, optional
color of marks and font, by default "lightblue"
alpha : int, optional
opacity of marks and font, by default 1
n : int, optional
max number of stars to show, by default None, by default None for all stars
idxs : bool, optional
wether to show TIC ids, by default True
"""
self._check_show()
if self.tic_data is None:
self.query_tic()
x = self.tic_data["x"].data
y = self.tic_data["y"].data
tics = np.vstack([x, y]).T
ID = self.tic_data["ID"].data
if align:
X = twirl.find_transform(tics[0:30], self.stars, n=15)
tics = twirl.affine_transform(X)(tics)
_ = viz.plot_marks(*tics.T, ID if idxs else None, color=color, alpha=alpha, n=n, position="top", fontsize=9, offset=10)
def show_cutout(self, star=None, size=200, marks=True,**kwargs):
"""
Show a zoomed cutout around a detected star or coordinates
Parameters
----------
star : [type], optional
detected star id or (x, y) coordinate, by default None
size : int, optional
side size of square cutout in pixel, by default 200
"""
if star is None:
x, y = self.stars[self.target]
elif isinstance(star, int):
x, y = self.stars[star]
elif isinstance(star, (tuple, list, np.ndarray)):
x, y = star
else:
raise ValueError("star type not understood")
self.show(**kwargs)
plt.xlim(np.array([-size / 2, size / 2]) + x)
plt.ylim( | np.array([-size / 2, size / 2]) | numpy.array |
import copy
from abc import abstractmethod
import numpy as np
from pymoo.model.callback import Callback
from pymoo.model.evaluator import Evaluator
from pymoo.model.individual import Individual
from pymoo.model.population import Population
from pymoo.model.result import Result
from pymoo.util.function_loader import FunctionLoader
from pymoo.util.nds.non_dominated_sorting import NonDominatedSorting
class Algorithm:
"""
This class represents the abstract class for any algorithm to be implemented. Most importantly it
provides the solve method that is used to optimize a given problem.
The solve method provides a wrapper function which does validate the input.
Parameters
----------
problem: class
Problem to be solved by the algorithm
termination: class
Object that tells the algorithm when to terminate.
seed: int
Random seed to be used. Same seed is supposed to return the same result. If set to None, a random seed
is chosen randomly and stored in the result object to ensure reproducibility.
verbose : bool
If true information during the algorithm execution are displayed
callback : func
A callback function can be passed that is executed every generation. The parameters for the function
are the algorithm itself, the number of evaluations so far and the current population.
def callback(algorithm):
pass
save_history : bool
If true, a current snapshot of each generation is saved.
pf : np.array
The Pareto-front for the given problem. If provided performance metrics are printed during execution.
return_least_infeasible : bool
Whether the algorithm should return the least infeasible solution, if no solution was found.
evaluator : class
The evaluator which can be used to make modifications before calling the evaluate function of a problem.
"""
def __init__(self,
callback=None,
display=None,
termination=None,
return_least_infeasible=False,
**kwargs):
# !
# Here all algorithm parameters needed no matter what is problem is passed are defined
# problem dependent initialization happens in initialize(problem, **kwargs)
# !
super().__init__()
# prints the compile warning if enabled
FunctionLoader.get_instance()
# function used to display attributes
# other attributes of the algorithm
self.callback = callback
self.display = None
self.return_least_infeasible = return_least_infeasible
# !
# Attributes to be set later on for each problem run
# !
# the optimization problem as an instance
self.problem = None
# the termination criterion of the algorithm
self.termination = termination
# an algorithm can defined the default termination which can be overwritten
self.default_termination = None
# the random seed that was used
self.seed = None
# the pareto-front of the problem - if it exist or passed
self.pf = None
# the function evaluator object (can be used to inject code)
self.evaluator = None
# the current number of generation or iteration
self.n_gen = None
# whether the history should be saved or not
self.save_history = None
# the history object which contains the list
self.history = None
# the current solutions stored - here considered as population
self.pop = None
# the optimum found by the algorithm
self.opt = None
# whether the algorithm should print output in this run or not
self.verbose = None
# set the display variable supplied to the algorithm
self.display = display
# =========================================================================================================
# PUBLIC
# =========================================================================================================
def initialize(self,
problem,
termination=None,
seed=None,
pf=True,
evaluator=None,
verbose=False,
save_history=False,
**kwargs):
# if this run should be verbose or not
self.verbose = verbose
# set the problem that is optimized for the current run
self.problem = problem
# the termination criterion to be used to stop the algorithm
if self.termination is None:
self.termination = termination
# if nothing given fall back to default
if self.termination is None:
self.termination = self.default_termination
# set the random seed in the algorithm object
self.seed = seed
if self.seed is None:
self.seed = np.random.randint(0, 10000000)
| np.random.seed(self.seed) | numpy.random.seed |
import utils as uts
from thirdparty import log_mvnpdf, log_mvnpdf_diag
import numpy as np
from scipy.misc import logsumexp
import online
from gaussEM import GaussEM
EPS = np.finfo(float).eps
class Incremental(online.OnlineEM):
def __init__(self, param):
super().__init__(param)
self.skip = int(param['skip'])
def prepare(self, dataset):
super().prepare(dataset)
class IncrementalGauss(Incremental, GaussEM):
def __init__(self, param):
super().__init__(param)
self.cov = param['cov']
self.k = param['k']
self.select = param['select']
self.C = float(param['smoothing'])
self.histAcc = 0.0
self.func = {
'inf': (self.__e_inf, self.__m_inf),
'one': (self.__e_inf, self.__m_one),
'k': (self.__e_inf, self.__m_k),
}
self.mvnpdf = {'full': log_mvnpdf, 'diag': log_mvnpdf_diag}
def __e_inf(self, X):
lg = self.mvnpdf[self.cov](np.array([X]), self.means, self.COV[self.cov])
logResps = lg[0] + np.log(self.weights)
self.histAcc += logsumexp(logResps)
self.hist.append(-self.histAcc/self.N)
maxLg = np.max(logResps)
logResps -= maxLg
self.resps = | np.exp(logResps) | numpy.exp |
"""
Implements pipelines to track a sequence: Obtain costs, solve the instance (global or instance wise)
"""
import torch
import numpy as np
from scipy.sparse import csc_matrix
from tqdm import tqdm
import math
import os
from src.TrackingModel import TrackingModel
from src.datasets import Data, SplittedDataloader
from src.utilities.conversions import to_numpy
''' Cost update functions '''
def temporal_decay(delta_time):
""" creates a temporal decay factor based on the temporal distance """
return 1 / (10 * delta_time.clamp(0, 2) + 0.1)
def induce_soft_constraints(data, result):
"""
Induces Soft-constraints by adding High cost value to hingh confident edges.
"""
if "high_confident" in data["edges"].keys():
high_confidention_cost = -1000
result['edge_costs'] = result['edge_costs'] + data["edges"]["high_confident"] * high_confidention_cost
return result
''' The tracker class to solve instances '''
class Tracker:
node_cost_keys = ['out_costs', 'in_costs', 'node_costs']
dataloader_cfg = dict(shuffle=False, num_workers=0, pin_memory=False, batch_size=1)
@staticmethod
def track(model: TrackingModel, dataset: Data):
if not model.solver.solve_instance_wise:
return Tracker.track_global(model, dataset)
else:
return Tracker.track_instance_wise(model, dataset)
@staticmethod
def track_global(model: TrackingModel, dataset: Data):
"""
This function infers and associates the data set with a given model
:param model: The model to evaluate
:param dataset: The dataset of class data. BE SURE THAT ONLY ONE SEQUENCE IS LOADED!
:return Dictionariers with numpy arrays
"""
model.eval()
seq = dataset.sequences_for_inference[0]
''' Create global graph for the sequence'''
full_graph_data = dataset.return_batch_with_full_graph(seq)
number_of_nodes = full_graph_data["nodes"]["frame"].shape[0]
node_row = full_graph_data["nodes"]["row"].cpu().numpy().astype(int)
node_row_to_index_mapping = np.ones(np.max(node_row) + 1)
node_row_to_index_mapping[node_row] = np.arange(node_row.shape[0])
node_row_to_index_mapping = node_row_to_index_mapping.astype(int)
''' Create edge cost and node cost container '''
node_costs = dict()
for key in Tracker.node_cost_keys:
node_costs[key] = torch.zeros_like(full_graph_data["nodes"]["id"][None, :])
edge_cost_matrix = csc_matrix((number_of_nodes, number_of_nodes), dtype=np.float32)
edge_calculations = csc_matrix((number_of_nodes, number_of_nodes), dtype=np.int16)
node_calculations = np.zeros(number_of_nodes, dtype=np.int16)
''' Iterate over dataset and fill cost container with cost values '''
dataset_cfg = dict(sequences=[seq], is_inference=True)
dataloader = SplittedDataloader(dataset, dataset_cfg, Tracker.dataloader_cfg, total_parts=25)
with torch.no_grad():
progress_bar = tqdm(iter(dataloader), desc="Track sequence with global graph")
for data in progress_bar:
if data["edges"]["sink"].numel() == 0:
continue
result = model.calculate_costs(data['nodes'], data['edges'])
result = induce_soft_constraints(data, result)
_rows, _sources, _sinks = \
data["nodes"]["row"].cpu().numpy().astype(int)[0], \
data["edges"]["source"].cpu().numpy().astype(int)[0], \
data["edges"]["sink"].cpu().numpy().astype(int)[0]
# Map detection_rows, sinks and sources to indices of the global graph
detection_indices = node_row_to_index_mapping[_rows]
_sources, _sinks = detection_indices[_sources], detection_indices[_sinks]
node_calculations[detection_indices] += 1
for key in Tracker.node_cost_keys:
node_costs[key][0, detection_indices] += result[key][0]
edge_calculations[_sources, _sinks] += 1
edge_cost_matrix[_sources, _sinks] += result["edge_costs"][0].numpy().astype(np.float32)
''' Convert aggregated edge costs to solver format '''
edge_counter = edge_calculations[
full_graph_data["edges"]["source"].numpy().astype(int),
full_graph_data["edges"]["sink"].numpy().astype(int)]
global_edge_costs = edge_cost_matrix[
full_graph_data["edges"]["source"].numpy().astype(int),
full_graph_data["edges"]["sink"].numpy().astype(int)]
global_edge_costs = global_edge_costs / np.maximum(1, edge_counter)
node_calculations = torch.from_numpy(node_calculations[None, :])
for key in Tracker.node_cost_keys:
node_costs[key] /= node_calculations.clamp(1, 10000)
costs = dict(
node_frames=full_graph_data["nodes"]["frame"][None, :], node_costs=node_costs['node_costs'],
edge_sources=full_graph_data["edges"]["source"][None, :], out_costs=node_costs['out_costs'],
edge_sinks=full_graph_data["edges"]["sink"][None, :], edge_costs=torch.from_numpy(global_edge_costs),
in_costs=node_costs['in_costs'],
)
''' Weight costs with the time '''
delta_time = \
(costs['node_frames'][0][costs['edge_sinks']] - costs['node_frames'][0][costs['edge_sources']]).float() / \
seq["fps"]
weight = temporal_decay(delta_time)
costs['edge_costs'] = costs['edge_costs'][0] * weight
''' Solve global instance and return full graph data '''
with torch.no_grad():
result = model.run_solver(costs=costs)
full_graph_data["prediction"] = result
full_graph_data["edges"]["costs"] = costs['edge_costs']
full_graph_data = to_numpy(full_graph_data)
return full_graph_data
@staticmethod
def track_instance_wise(model: TrackingModel, dataset: Data):
""" Tracks a sequence splitted into instances """
solver = model.solver.instance_solver
''' Create dataset specific values '''
seq = dataset.sequences_for_inference[0]
dataset_cfg = dict(sequences=[seq], is_inference=True)
full_graph_data = dataset.return_batch_with_full_graph(seq, return_edges=False)
number_of_nodes = full_graph_data["nodes"]["frame"].shape[0]
fps = seq["fps"]
datase_name = os.getenv("DATASET", "MOT17")
batchsize = 3 * 50 if datase_name == "MOT20" else 3 * 60
node_row = full_graph_data["nodes"]["row"].cpu().numpy().astype(int)
node_row_to_index_mapping = np.ones(np.max(node_row) + 1)
node_row_to_index_mapping[node_row] = np.arange(node_row.shape[0])
node_row_to_index_mapping = node_row_to_index_mapping.astype(int)
''' Update solver parameter for "irregular" videos with different framerate than 30 '''
if datase_name == "MOT17" and fps != 30:
new_len = str(int(math.floor(2 * fps)))
params = {"MAX_TIMEGAP_BASE": new_len, "MAX_TIMEGAP_LIFTED": new_len, "MAX_TIMEGAP_COMPLETE": new_len}
model.solver.batched_solver.update_params_map(params)
def init_tracker_container():
""" Create data containers required for a tracking run """
node_costs = dict()
for key in Tracker.node_cost_keys:
node_costs[key] = torch.zeros_like(full_graph_data["nodes"]["id"][None, :])
dataloader = SplittedDataloader(dataset, dataset_cfg, Tracker.dataloader_cfg, total_parts=50)
return dataloader, node_costs
def prepare_local_instance(
edge_calculations, edge_cost_matrix, node_calculations, node_costs,
first_frame, last_frame
):
""" Converts the sparse global graph to a local instance """
source, sink = edge_calculations.nonzero()
frames = full_graph_data["nodes"]["frame"].numpy()
if last_frame is not None:
valid = (frames[source] <= last_frame) * (frames[sink] <= last_frame)
source, sink = source[valid], sink[valid]
if first_frame is not None:
valid = (frames[source] >= first_frame) * (frames[sink] >= first_frame)
source, sink = source[valid], sink[valid]
edge_counter = edge_calculations[source, sink]
global_edge_costs = edge_cost_matrix[source, sink]
global_edge_costs = global_edge_costs / edge_counter
node_calculations = torch.from_numpy(node_calculations[None, :])
for key in node_costs:
node_costs[key] = node_costs[key] / node_calculations.float().clamp(1, 10000)
# Convert to cost tensor
costs = dict(
node_frames=full_graph_data["nodes"]["frame"][None, :], edge_sources=torch.from_numpy(source)[None, :],
edge_sinks=torch.from_numpy(sink)[None, :], edge_costs=torch.from_numpy(global_edge_costs),
in_costs=node_costs['in_costs'], out_costs=node_costs['out_costs'], node_costs=node_costs['node_costs']
)
return costs
def delete_old_nodes_and_edges(
edge_calculations, edge_cost_matrix, node_calculations, node_costs, min_frame
):
""" Removes entries from the sparse matrix for frames smaller than the current minimal frame"""
frames_to_be_removed = np.where(full_graph_data["nodes"]["frame"] < min_frame)[0]
edge_calculations[edge_calculations[frames_to_be_removed, :].nonzero()] = 0
edge_calculations[edge_calculations[:, frames_to_be_removed].nonzero()] = 0
edge_cost_matrix[edge_cost_matrix[frames_to_be_removed, :].nonzero()] = 0
edge_cost_matrix[edge_cost_matrix[:, frames_to_be_removed].nonzero()] = 0
edge_cost_matrix.eliminate_zeros()
edge_calculations.eliminate_zeros()
node_calculations[frames_to_be_removed] = 0
for key in node_costs.keys():
node_costs[key][0, frames_to_be_removed] = 0
return edge_calculations, edge_cost_matrix, node_calculations, node_costs
def iterate_through_dataset(node_costs):
""" Iterates over the sequence and solves batches"""
''' Create empty data container to accumulate costs '''
edge_cost_matrix, edge_calculations, node_calculations = \
csc_matrix((number_of_nodes, number_of_nodes), dtype=np.float32), \
csc_matrix((number_of_nodes, number_of_nodes), dtype=np.int16), \
np.zeros(number_of_nodes, dtype=np.int16)
data_stack = list()
''' Iterate over sequence and calculate all edges '''
progress_bar = tqdm(iter(dataloader), desc="Track sequence batchwise graph")
with torch.no_grad():
for datas in progress_bar:
datas = [datas] if type(datas) != list else datas
for data in datas:
if data["edges"]["sink"].numel() == 0:
continue
l_bound, u_bound = solver.time_bounds[0], solver.time_bounds[1]
''' Do inference for current batch'''
result = model.calculate_costs(data['nodes'], data['edges'])
result = induce_soft_constraints(data, result)
min_frame, max_frame = data["nodes"]["frame"].min().item(), data["nodes"]["frame"].max().item()
if max_frame < l_bound:
continue
''' Add calculated node and edge costs to accumulator '''
_rows, _sources, _sinks = \
data["nodes"]["row"].cpu().numpy().astype(int)[0], \
data["edges"]["source"].cpu().numpy().astype(int)[0], \
data["edges"]["sink"].cpu().numpy().astype(int)[0]
# Map detection_rows, sinks and sources to indices of the global graph
detection_indices = node_row_to_index_mapping[_rows]
_sources, _sinks = detection_indices[_sources], detection_indices[_sinks]
node_calculations[detection_indices] += 1
# Weight costs with time
delta_time = data["edges"]["delta_t"]
delta_time = delta_time.float()
weight = temporal_decay(delta_time)
result['edge_costs'][0] = result['edge_costs'][0] * weight
for key in Tracker.node_cost_keys:
node_costs[key][0, detection_indices] += result[key][0]
# Aggregate some data, cause updateing the sparse matrix ist slow
_ = result["edge_costs"][0].numpy().astype(np.float32)
data_stack.append([_sources, _sinks, _])
''' If all frames for the current batch are processed: Merge data and solve graph '''
solve = min_frame >= solver.time_bounds[1]
if solve:
''' Update sparse matrix with collected data '''
_sources = np.concatenate([_[0] for _ in data_stack])
_sinks = np.concatenate([_[1] for _ in data_stack])
_data = np.concatenate([_[2] for _ in data_stack])
edge_cost_matrix[_sources, _sinks] += _data
edge_calculations[_sources, _sinks] += 1
data_stack = list()
''' Solve graph '''
costs = prepare_local_instance(
edge_calculations, edge_cost_matrix, node_calculations, node_costs, l_bound, u_bound)
solver.process_next_batch(costs)
updated_sparse = delete_old_nodes_and_edges(
edge_calculations, edge_cost_matrix, node_calculations, node_costs, min_frame=l_bound)
edge_calculations, edge_cost_matrix, node_calculations, node_costs = updated_sparse
''' Solve the last batch if ot already done '''
if len(data_stack) > 0:
_sources, _sinks, _data = \
np.concatenate([_[0] for _ in data_stack]), np.concatenate([_[1] for _ in data_stack]), \
| np.concatenate([_[2] for _ in data_stack]) | numpy.concatenate |
from classes.config import Config
c = Config()
from classes.display import Display
d = Display(c.settings,True) # Emulate
d.begin()
from classes.display import Pot
p = Pot(d.LCD)
p.begin()
import numpy
import time
def play():
for i in | numpy.arange(0,1,0.01) | numpy.arange |
# Core functionality for database implementation
import os
import json
import numpy as np
import pandas as pd
from copy import deepcopy
from astropy import units as u
from astropy.table import QTable
from astropy.units import Quantity
from astropy import uncertainty as unc
def get_values_from_distribution(distribution, unit=None):
"""Assuming a normal distribution, return value+error; includes unit if provided"""
if isinstance(distribution, (list, np.ndarray)):
distribution = unc.Distribution(distribution)
val = distribution.pdf_mean
err = distribution.pdf_std
if isinstance(val, u.quantity.Quantity):
unit = val.unit.to_string()
val = val.value
err = err.value
out_dict = {'value': val, 'error': err}
if unit is not None:
out_dict['unit'] = unit
return out_dict
class Database(object):
def __init__(self, directory='data', conn_string='', mongo_db_name='', collection_name='',
references_file='references.json', references_collection='references'):
"""
Database connection object which will prepare or load a database.
It also includes a collection of references.
Parameters
----------
directory
conn_string
mongo_db_name
collection_name
references_file
references_collection
"""
# Load or establish connection
self.use_mongodb = False
if conn_string and mongo_db_name and collection_name:
# Connect to mongoDB
try:
import pymongo
self.use_mongodb = True
client = pymongo.MongoClient(conn_string)
database = client[mongo_db_name] # database
self.references = database[references_collection]
self.db = database[collection_name] # collection
except ImportError:
print('ERROR : pymongo package required for using MongoDB')
self.use_mongodb = False
else:
self.db = np.array([])
if not os.path.exists(references_file):
msg = 'ERROR: A json file of references must be provided.'
print(msg)
raise RuntimeError(msg)
with open(references_file, 'r') as f:
self.references = json.load(f)
self.load_all(directory)
def load_all(self, directory):
for filename in os.listdir(directory):
# Skip hidden and non-json files
if filename.startswith('.') or not filename.endswith('.json'):
continue
self.load_file_to_db(os.path.join(directory, filename))
def load_file_to_db(self, filename, id_column='name'):
"""
Load JSON file to database. If the document already exists (as matched by id_column), it gets updated.
Parameters
----------
filename : str or dict-like
Name of JSON file to add. If a dict-like, it is treated as ready to load.
id_column : str
Name of field to use for matching (Default: 'name')
"""
if isinstance(filename, str):
with open(filename, 'r') as f:
doc = json.load(f)
else:
doc = filename
if self.use_mongodb:
self.load_to_mongodb(doc, id_column=id_column)
else:
doc = self._recursive_json_fix(doc)
# Check if already present and if so update, otherwise add as new
name = doc.get(id_column, '')
orig_doc = self.query_db({id_column: name})
if len(orig_doc) > 0 and orig_doc[0][id_column] == name:
ind = np.where(self.db == orig_doc[0])
self.db[ind] = doc
else:
self.db = np.append(self.db, doc)
def load_to_mongodb(self, doc, id_column='name'):
# Load JSON file to MongoDB
# Use name as the unique ID (may want to change later)
id_value = doc[id_column]
# Revert arrays to lists to make correct JSON documents
doc = self._recursive_json_reverse_fix(doc)
# This uses replace_one to replace any existing document that matches the filter.
# If none is matched, upsert=True creates a new document.
result = self.db.replace_one(filter={id_column: id_value}, replacement=doc, upsert=True)
def update_references_mongodb(self, references_file, id_column='key'):
"""
Method to load references from a provided file to the MongoDB database
Parameters
----------
references_file : str
Name of references JSON file to load
id_column : str
Name of ID column to use to match against existing documents (default: key)
"""
with open(references_file, 'r') as f:
references = json.load(f)
for doc in references:
id_value = doc[id_column]
result = self.references.replace_one(filter={id_column: id_value}, replacement=doc, upsert=True)
def _recursive_json_fix(self, doc):
"""
Recursively fix a JSON document to convert lists to numpy arrays.
This is needed for queries against the MongoDB database.
Parameters
----------
doc : dict
Document result from a query against a MongoDB database
Returns
-------
out_doc : dict
Fixed document
"""
out_doc = {}
if isinstance(doc, list):
# Handle lists by converting to numpy arrays
out_doc = np.array([])
for elem in doc:
if isinstance(elem, dict):
elem = self._recursive_json_fix(elem)
out_doc = np.append(out_doc, elem)
elif isinstance(doc, dict):
# Handle dicts by recursively fixing
for key, val in doc.items():
if isinstance(val, dict):
out_doc[key] = self._recursive_json_fix(val)
elif isinstance(val, list):
new_array = np.array([])
for elem in val:
new_val = self._recursive_json_fix(elem)
new_array = np.append(new_array, new_val)
out_doc[key] = new_array
else:
out_doc[key] = val
else:
out_doc = doc
return out_doc
def _recursive_json_reverse_fix(self, doc):
"""
Undo the work from _recursive_json_fix; that is turn arrays to list to make correct JSON documents.
This is needed since MongoDB does not understand numpy arrays.
Parameters
----------
doc : dict
Document result to convert
Returns
-------
out_doct : dict
Fixed document
"""
out_doc = {}
# Remove _id if present (used in MongoDB)
if self.use_mongodb and isinstance(doc, dict) and '_id' in doc.keys():
del doc['_id']
if isinstance(doc, list):
# Handle np.arrays by converting to list
out_doc = []
for elem in doc:
if isinstance(elem, dict):
elem = self._recursive_json_reverse_fix(elem)
out_doc.append(elem)
elif isinstance(doc, dict):
for key, val in doc.items():
if isinstance(val, dict):
out_doc[key] = self._recursive_json_reverse_fix(val)
elif isinstance(val, type(np.array([]))):
new_array = []
for elem in val:
new_val = self._recursive_json_reverse_fix(elem)
new_array.append(new_val)
out_doc[key] = new_array
else:
out_doc[key] = val
else:
out_doc = doc
return out_doc
def save_from_db(self, doc, verbose=False, out_dir='', save=True, name=''):
"""
Save a JSON representation of the document. Useful for exporting database contents.
Parameters
----------
doc :
Document result from a query
verbose : bool
Flag to indicate whether the JSON representation should be printed in the terminal (Default: False)
out_dir : str
Directory to save JSON file (Default: '')
save : bool
Flag to indicate if the JSON representation should be saved (Default: True)
name : str
Name of output JSON file. If none is provided, the 'name' field is used to name it. (Default: '')
"""
# Save a JSON representation
out_doc = self._recursive_json_reverse_fix(doc)
out_json = json.dumps(out_doc, indent=4, sort_keys=False)
if verbose:
print(out_json)
if save:
if not name:
name = doc['name']
name = name.strip().replace(' ', '_') + '.json'
filename = os.path.join(out_dir, name)
print(filename)
with open(filename, 'w') as f:
f.write(out_json)
return
def save_all(self, out_dir=''):
# Save entire database to disk
doc_list = self.query_db({})
for doc in doc_list:
self.save_from_db(doc, out_dir=out_dir, save=True)
def add_data(self, filename, force=False, id_column='name', auto_save=False, save_dir='data', update_value=False,
validate=True):
"""
Add JSON data to database. May need to use save_all() afterwards to explicitly save changes to disk.
Parameters
----------
filename : str or dict-like
File name of JSON data to load. Alternatively, it can be the dict-like data you want to load.
force : bool
Flag to ignore validation (Default: False)
id_column : str
Field to use when matching names (Default: 'name')
auto_save : bool
Flag to trigger automatically saving (Default: False)
save_dir : str
Directory to use if auto-saving (Default: 'data')
update_value : bool
Flag to indicate whether or not to update values from duplicated references (Default: False)
validate : bool
Flag to run JSON validation (Default: True)
"""
if isinstance(filename, str):
with open(filename, 'r') as f:
new_data = json.load(f)
else:
new_data = filename
# Run validation
if validate:
from .validator import Validator
v = Validator(database=self, db_object=new_data, is_data=True)
if not v.run():
print('{} does not pass JSON validation'.format(filename))
return
name = new_data.get(id_column)
if name is None:
raise RuntimeError('JSON data is missing name information for field: {}'.format(id_column))
# Get existing data that will be updated
old_doc = self.query_db({id_column: name})[0]
if len(old_doc) == 0:
print('{} does not exist in the database! Use load_file_to_db() to load new objects.'.format(name))
return
# Loop through the new data, adding it all to old_doc
for k, v in new_data.items():
if k == id_column:
continue
old_values = old_doc.get(k)
if old_values is None:
old_doc[k] = np.array(v)
else:
ref_list = [x['reference'] for x in old_doc[k]]
# Loop over all new entries to be inserted, checking references at each stage
for i in range(len(v)):
ref = v[i]['reference']
if ref in ref_list:
ind = np.where( | np.array(ref_list) | numpy.array |
from keras.models import load_model
import glob
import os
from skimage.io import imread, imsave
from skimage.transform import resize
import cv2
import matplotlib.pyplot as plt
from create_individual_lettuce_train_data import construct_ground_truth, fix_noise
from skimage.color import rgb2grey, grey2rgb
from skimage.draw import circle, line, set_color
from skimage.util.shape import view_as_windows
import numpy as np
# write function to load the images.
def load_field_data():
dataset_name = '20160823_Gs_NDVI_1000ft_2-148_1/'
#dataset_name = '20160816_Gs_Wk33_NDVI_1000ft_Shippea_Hill_211-362'
image_path = '../AirSurf/Jennifer Manual Counts/ground_truth/Processed for Batch Analysis/' + dataset_name
ground_truth_path = '../AirSurf/Jennifer Manual Counts/ground_truth/' + dataset_name
names = []
train_X = []
position_Y = []
files = glob.glob(ground_truth_path + "*.txt")
for ind, textfile in enumerate(files):
image_Y = ground_truth_path
image = image_path
for txt in os.path.splitext(os.path.basename(textfile))[:-1]:
image += txt
image_Y += txt
image += '.txt_sub_img.tif'
if not os.path.isfile(image):
continue
img = fix_noise(cv2.cvtColor(cv2.imread(image), cv2.COLOR_BGR2RGB))
img = rgb2grey(img)
name = "./CONVERTED/"+os.path.basename(textfile)+".tif"
img_y = imread(image_Y + ".tif")
img = resize(img, (img_y.shape[0], img_y.shape[1], 1))
positions = construct_ground_truth(img_y)
names.append(name)
train_X.append(img)
position_Y.append(positions)
return names, np.array(train_X), np.array(position_Y)
#given the img, and the model. Slide along the image, extracting plots and counting the lettuces.
def sliding_window_count(img, model, stride=10):
img = img.reshape(img.shape[:2])
img = np.pad(img, stride+1, mode='constant')
todraw = grey2rgb(img.copy()) ##reshape it from 900,900,1 to 900,900
plt.imshow(todraw)
plt.show()
img = img.reshape((img.shape[0], img.shape[1], 1))
print(img.shape)
w, h = img.shape[:2]
l = 20
#count the number of predicted ones.
lettuce_count = 0
kernel = 9
for x in range(kernel, w-l, stride):
for y in range(kernel, h-l, stride):
regions = []
inds = []
for x1 in range(x-kernel, x+kernel):
for y1 in range(y-kernel, y+kernel):
regions.append(img[x1:x1 + l, y1:y1 + l])
inds.append((x1, y1))
print(x1)
print(y1)
inds = np.array(inds)
pred = model.predict(np.array(regions), verbose=0)
#count lettuce predictions in this kernel region.
args = np.argmax(pred, axis=1)
#count the number of 1's, in the arg list.
count = np.count_nonzero(args)
#75% of preds are for a lettuce.
if count >= float(inds.shape[0]) * 0.75:
#find the index of the best pred
best_arg = np.argmax(pred[:1])
x_1, y_1 = inds[best_arg]
todraw[circle(x_1,y_1,5,shape=todraw.shape)] = (1,0,0)
lettuce_count += 1
return lettuce_count, todraw
#given the img, and the model. Slide along the image, extracting plots and counting the lettuces.
def sliding_window_count_vectorised(img, model, length=20, stride=3, probability_threshold = 0.95):
#img = img.reshape(img.shape[:2])
#img = np.pad(img, stride, mode='constant')
img = img.reshape((img.shape[0], img.shape[1], 1))
#count the number of predicted ones.
lettuce_count = 0
boxes = []
probs = []
if min(img.shape[:2]) < length:
return np.array(boxes), np.array(probs)
im4D = view_as_windows(img, (length,length,1), step=(stride,stride,1))
im3d = im4D.reshape(-1,length,length,1)
#from a given index, we should be able to convert it back into a 2d co-ord.
preds = model.predict(im3d, verbose=0)
xs = np.arange(0, img.shape[0]-length+1, step=stride)
ys = np.arange(0, img.shape[1]-length+1, step=stride)
#unravel the predictions, and construct the bounding boxes from the indexes.
for index, pred in enumerate(preds):
if np.argmax(pred) == 1:
probability = np.max(pred)
if probability < probability_threshold:
continue
probs.append(probability)
#deconstruct index into x,y.
x,y = np.unravel_index(index, im4D.shape[:2])
#need to then map back to the stride params from original image.
x = xs[x]
y = ys[y]
boxes.append([x,y,x+length,y+length])
return np.array(boxes), np.array(probs)
#given the img, and the model. Slide along the image, extracting plots and counting the lettuces.
def sliding_window_count_simple(img, model, stride=5):
img = img.reshape(img.shape[:2])
img = np.pad(img, stride, mode='constant')
img = img.reshape((img.shape[0], img.shape[1], 1))
w, h = img.shape[:2]
l = 20
#count the number of predicted ones.
lettuce_count = 0
boxes = []
probs = []
for x in range(stride, w-l, stride):
for y in range(stride, h-l, stride):
pred = model.predict(np.array([img[x:x+l,y:y+l]]), verbose=0)
if | np.argmax(pred) | numpy.argmax |
""" Here the plots and visualizations
"""
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
def plt_arima_forecast(y, forecasts, length_for_training=None,
conf_int=False,
title='Country name here',
y_label='Deaths',
x=None,
save_here='arima_case.png',
show_plot = False):
"""
:param y: real vualues
:param forecasts: predicted values
:param length_for_training: like 90% lenght of y
:param save_here: str where to save.
:return:
"""
if not (isinstance(length_for_training, int) | isinstance(length_for_training, float)):
length_for_training = forecasts.__len__()
print("WARNING: please use an int or float for forecasting length. Setting this to:", length_for_training)
plt.clf()
if x is None:
x = np.arange(y.shape[0])
plt.plot(x, y, 'b*--', label='Real')
plt.plot(x[length_for_training:], forecasts, 'go--', label='Forecast')
plt.xlabel('Date')
plt.title(title)
plt.ylabel(y_label)
if conf_int is not False:
plt.fill_between(x[length_for_training:],
conf_int[:, 0], conf_int[:, 1],
alpha=0.1, color='b')
plt.legend(loc='upper left')
plt.xticks(rotation=90)
plt.tight_layout()
plt.savefig(save_here)
if show_plot:
plt.show()
else:
plt.clf()
return plt
def plt_arima_forecast_outsample(y, forecasts, lenght_for_forecast=None, conf_int=False,
title='Country name here',
y_label='Deaths',
x=None,
save_here='arima_case.png',
show_plot = False):
"""
:param y: real vualues
:param forecasts: predicted values
:param lenght_for_forecast: like 10% length of y
:param save_here: str where to save.
:return:
"""
if not (isinstance(lenght_for_forecast, int) | isinstance(lenght_for_forecast, float)):
lenght_for_forecast = forecasts.__len__()
print("WARNING: please use an int or float for forecasting length. Setting this to:", lenght_for_forecast)
plt.clf()
if x is None:
x = | np.arange(y.shape[0]) | numpy.arange |
""" Unit tests for the SqliteCaseReader. """
import errno
import os
import unittest
from shutil import rmtree
from tempfile import mkdtemp, mkstemp
from collections import OrderedDict
import numpy as np
from io import StringIO
import openmdao.api as om
from openmdao.recorders.sqlite_recorder import format_version
from openmdao.recorders.sqlite_reader import SqliteCaseReader
from openmdao.recorders.tests.test_sqlite_recorder import ParaboloidProblem
from openmdao.recorders.case import PromAbsDict
from openmdao.core.tests.test_units import SpeedComp
from openmdao.test_suite.components.expl_comp_array import TestExplCompArray
from openmdao.test_suite.components.implicit_newton_linesearch import ImplCompTwoStates
from openmdao.test_suite.components.paraboloid import Paraboloid
from openmdao.test_suite.components.paraboloid_problem import ParaboloidProblem
from openmdao.test_suite.components.sellar import SellarDerivativesGrouped, \
SellarDis1withDerivatives, SellarDis2withDerivatives, SellarProblem
from openmdao.utils.assert_utils import assert_near_equal, assert_warning
from openmdao.utils.general_utils import set_pyoptsparse_opt, determine_adder_scaler, printoptions
from openmdao.utils.general_utils import remove_whitespace
from openmdao.utils.testing_utils import use_tempdirs
from openmdao.core.tests.test_discrete import ModCompEx, ModCompIm
# check that pyoptsparse is installed
OPT, OPTIMIZER = set_pyoptsparse_opt('SLSQP')
if OPTIMIZER:
from openmdao.drivers.pyoptsparse_driver import pyOptSparseDriver
def count_keys(d):
"""
Count the number of keys in the nested dictionary.
Parameters
----------
d : nested OrderedDict
The dictionary of cases to be counted.
"""
count = 0
for k in d:
count += 1
if isinstance(d[k], OrderedDict):
count += count_keys(d[k])
return count
class SellarDerivativesGroupedPreAutoIVC(om.Group):
"""
This version is needed for testing backwards compatibility for load_case on pre-3.2
models.
"""
def initialize(self):
self.options.declare('nonlinear_solver', default=om.NonlinearBlockGS,
desc='Nonlinear solver (class or instance) for Sellar MDA')
self.options.declare('nl_atol', default=None,
desc='User-specified atol for nonlinear solver.')
self.options.declare('nl_maxiter', default=None,
desc='Iteration limit for nonlinear solver.')
self.options.declare('linear_solver', default=om.ScipyKrylov,
desc='Linear solver (class or instance)')
self.options.declare('ln_atol', default=None,
desc='User-specified atol for linear solver.')
self.options.declare('ln_maxiter', default=None,
desc='Iteration limit for linear solver.')
def setup(self):
self.add_subsystem('px', om.IndepVarComp('x', 1.0), promotes=['x'])
self.add_subsystem('pz', om.IndepVarComp('z', np.array([5.0, 2.0])), promotes=['z'])
self.mda = mda = self.add_subsystem('mda', om.Group(), promotes=['x', 'z', 'y1', 'y2'])
mda.add_subsystem('d1', SellarDis1withDerivatives(), promotes=['x', 'z', 'y1', 'y2'])
mda.add_subsystem('d2', SellarDis2withDerivatives(), promotes=['z', 'y1', 'y2'])
self.add_subsystem('obj_cmp', om.ExecComp('obj = x**2 + z[1] + y1 + exp(-y2)',
z=np.array([0.0, 0.0]), x=0.0, y1=0.0, y2=0.0),
promotes=['obj', 'x', 'z', 'y1', 'y2'])
self.add_subsystem('con_cmp1', om.ExecComp('con1 = 3.16 - y1'), promotes=['con1', 'y1'])
self.add_subsystem('con_cmp2', om.ExecComp('con2 = y2 - 24.0'), promotes=['con2', 'y2'])
nl = self.options['nonlinear_solver']
self.nonlinear_solver = nl()
if self.options['nl_atol']:
self.nonlinear_solver.options['atol'] = self.options['nl_atol']
if self.options['nl_maxiter']:
self.nonlinear_solver.options['maxiter'] = self.options['nl_maxiter']
ln = self.options['linear_solver']
self.linear_solver = ln()
if self.options['ln_atol']:
self.linear_solver.options['atol'] = self.options['ln_atol']
if self.options['ln_maxiter']:
self.linear_solver.options['maxiter'] = self.options['ln_maxiter']
def configure(self):
self.mda.linear_solver = om.ScipyKrylov()
self.mda.nonlinear_solver = om.NonlinearBlockGS()
@use_tempdirs
class TestSqliteCaseReader(unittest.TestCase):
def setUp(self):
self.filename = "sqlite_test"
self.recorder = om.SqliteRecorder(self.filename, record_viewer_data=False)
def test_bad_filetype(self):
# Pass a plain text file.
fd, filepath = mkstemp()
with os.fdopen(fd, 'w') as tmp:
tmp.write("Lorem ipsum")
tmp.close()
with self.assertRaises(IOError) as cm:
om.CaseReader(filepath)
msg = 'File does not contain a valid sqlite database'
self.assertTrue(str(cm.exception).startswith(msg))
def test_bad_filename(self):
# Pass a nonexistent file.
with self.assertRaises(IOError) as cm:
om.CaseReader('junk.sql')
self.assertTrue(str(cm.exception).startswith('File does not exist'))
def test_format_version(self):
prob = SellarProblem()
prob.model.add_recorder(self.recorder)
prob.setup()
prob.run_driver()
prob.cleanup()
cr = om.CaseReader(self.filename)
self.assertEqual(cr._format_version, format_version,
msg='format version not read correctly')
def test_reader_instantiates(self):
""" Test that CaseReader returns an SqliteCaseReader. """
prob = SellarProblem()
prob.model.add_recorder(self.recorder)
prob.setup()
prob.run_driver()
prob.cleanup()
cr = om.CaseReader(self.filename)
self.assertTrue(isinstance(cr, SqliteCaseReader),
msg='CaseReader not returning the correct subclass.')
def test_case_attributes(self):
""" Check that a Case object has all the expected attributes. """
prob = SellarProblem()
prob.setup()
prob.driver.add_recorder(self.recorder)
prob.run_driver()
prob.cleanup()
cr = om.CaseReader(self.filename)
case = cr.get_case(0)
self.assertEqual(case.source, 'driver')
self.assertEqual(case.name, 'rank0:Driver|0')
self.assertEqual(case.counter, 1)
self.assertTrue(isinstance(case.timestamp, float))
self.assertEqual(case.success, True)
self.assertEqual(case.msg, '')
self.assertTrue(isinstance(case.outputs, PromAbsDict))
self.assertEqual(case.inputs, None)
self.assertEqual(case.residuals, None)
self.assertEqual(case.derivatives, None)
self.assertEqual(case.parent, None)
self.assertEqual(case.abs_err, None)
self.assertEqual(case.rel_err, None)
def test_invalid_source(self):
""" Tests that the reader returns params correctly. """
prob = SellarProblem(SellarDerivativesGrouped)
driver = prob.driver
driver.recording_options['record_desvars'] = False
driver.recording_options['record_objectives'] = False
driver.recording_options['record_constraints'] = False
driver.recording_options['record_derivatives'] = False
driver.add_recorder(self.recorder)
prob.setup()
prob.run_driver()
prob.cleanup()
cr = om.CaseReader(self.filename)
# check that driver is our only source
self.assertEqual(cr.list_sources(out_stream=None), ['driver'])
# check source vars
source_vars = cr.list_source_vars('driver', out_stream=None)
self.assertEqual(sorted(source_vars['inputs']), [])
self.assertEqual(sorted(source_vars['outputs']), [])
with self.assertRaisesRegex(RuntimeError, "No cases recorded for problem"):
cr.list_source_vars('problem', out_stream=None)
with self.assertRaisesRegex(RuntimeError, "Source not found: root"):
cr.list_source_vars('root', out_stream=None)
with self.assertRaisesRegex(RuntimeError, "Source not found: root.nonlinear_solver"):
cr.list_source_vars('root.nonlinear_solver', out_stream=None)
# check list cases
with self.assertRaisesRegex(RuntimeError, "Source not found: foo"):
cr.list_cases('foo')
with self.assertRaisesRegex(TypeError, "Source parameter must be a string, 999 is type int"):
cr.list_cases(999)
def test_reading_driver_cases(self):
""" Tests that the reader returns params correctly. """
prob = SellarProblem(SellarDerivativesGrouped)
driver = prob.driver = om.ScipyOptimizeDriver(tol=1e-9, disp=False)
driver.recording_options['record_desvars'] = False
driver.recording_options['record_objectives'] = True
driver.recording_options['record_constraints'] = True
driver.recording_options['record_derivatives'] = True
driver.recording_options['record_inputs'] = True
driver.recording_options['includes'] = ['*']
driver.add_recorder(self.recorder)
prob.setup()
prob.run_driver()
prob.cleanup()
cr = om.CaseReader(self.filename)
# check that we only have driver cases
self.assertEqual(cr.list_sources(out_stream=None), ['driver'])
# check source vars
source_vars = cr.list_source_vars('driver', out_stream=None)
self.assertEqual(sorted(source_vars['inputs']), ['x', 'y1', 'y2', 'z'])
self.assertEqual(sorted(source_vars['outputs']), ['con1', 'con2', 'obj', 'x', 'y1', 'y2', 'z'])
# check that we got the correct number of cases
driver_cases = cr.list_cases('driver')
self.assertEqual(driver_cases, [
'rank0:ScipyOptimize_SLSQP|0', 'rank0:ScipyOptimize_SLSQP|1', 'rank0:ScipyOptimize_SLSQP|2',
'rank0:ScipyOptimize_SLSQP|3', 'rank0:ScipyOptimize_SLSQP|4', 'rank0:ScipyOptimize_SLSQP|5',
'rank0:ScipyOptimize_SLSQP|6'
])
# Test to see if the access by case keys works:
seventh_slsqp_iteration_case = cr.get_case('rank0:ScipyOptimize_SLSQP|6')
np.testing.assert_almost_equal(seventh_slsqp_iteration_case.outputs['z'],
[1.97846296, -2.21388305e-13], decimal=2)
deriv_case = cr.get_case('rank0:ScipyOptimize_SLSQP|4')
np.testing.assert_almost_equal(deriv_case.derivatives['obj', 'z'],
[[3.8178954, 1.73971323]], decimal=2)
# While thinking about derivatives, let's get them all.
derivs = deriv_case.derivatives
self.assertEqual(set(derivs.keys()), set([
('obj', 'z'), ('con2', 'z'), ('con1', 'x'),
('obj', 'x'), ('con2', 'x'), ('con1', 'z')
]))
# Test values from the last case
last_case = cr.get_case(driver_cases[-1])
np.testing.assert_almost_equal(last_case.outputs['z'], prob['z'])
np.testing.assert_almost_equal(last_case.outputs['x'], [-0.00309521], decimal=2)
# Test to see if the case keys (iteration coords) come back correctly
for i, iter_coord in enumerate(driver_cases):
self.assertEqual(iter_coord, 'rank0:ScipyOptimize_SLSQP|{}'.format(i))
def test_driver_reading_outputs(self):
prob = ParaboloidProblem()
driver = prob.driver = om.ScipyOptimizeDriver(disp=False, tol=1e-9)
driver.recording_options['record_desvars'] = False
driver.recording_options['record_objectives'] = False
driver.recording_options['record_constraints'] = False
driver.recording_options['record_inputs'] = False
driver.recording_options['record_outputs'] = True
driver.recording_options['record_residuals'] = False
driver.recording_options['includes'] = ['*']
driver.add_recorder(self.recorder)
prob.setup()
prob.set_solver_print(0)
prob.run_driver()
cr = om.CaseReader(self.filename)
# check that we only have driver cases
self.assertEqual(cr.list_sources(out_stream=None), ['driver'])
# check source vars
source_vars = cr.list_source_vars('driver', out_stream=None)
self.assertEqual(sorted(source_vars['inputs']), [])
self.assertEqual(sorted(source_vars['outputs']), ['c', 'f_xy', 'x', 'y'])
# Test values from the last case
driver_cases = cr.list_cases('driver')
last_case = cr.get_case(driver_cases[-1])
np.testing.assert_almost_equal(last_case.outputs['f_xy'], prob['f_xy'])
np.testing.assert_almost_equal(last_case.outputs['x'], prob['x'])
def test_driver_reading_residuals(self):
prob = ParaboloidProblem()
driver = prob.driver = om.ScipyOptimizeDriver(disp=False, tol=1e-9)
driver.recording_options['record_desvars'] = False
driver.recording_options['record_objectives'] = False
driver.recording_options['record_constraints'] = False
driver.recording_options['record_inputs'] = False
driver.recording_options['record_outputs'] = False
driver.recording_options['record_residuals'] = True
driver.recording_options['includes'] = ['*']
driver.add_recorder(self.recorder)
prob.setup()
prob.set_solver_print(0)
prob.run_driver()
cr = om.CaseReader(self.filename)
# check that we only have driver cases
self.assertEqual(cr.list_sources(out_stream=None), ['driver'])
# check source vars
source_vars = cr.list_source_vars('driver', out_stream=None)
self.assertEqual(sorted(source_vars['inputs']), [])
self.assertEqual(sorted(source_vars['residuals']), ['c', 'f_xy', 'x', 'y'])
# Test values from the last case
driver_cases = cr.list_cases('driver')
last_case = cr.get_case(driver_cases[-1])
np.testing.assert_almost_equal(last_case.residuals['f_xy'], 0.0)
np.testing.assert_almost_equal(last_case.residuals['x'], 0.0)
def test_reading_system_cases(self):
prob = SellarProblem()
model = prob.model
model.recording_options['record_inputs'] = True
model.recording_options['record_outputs'] = True
model.recording_options['record_residuals'] = True
model.add_recorder(self.recorder)
prob.setup()
model.nonlinear_solver.options['use_apply_nonlinear'] = True
model.d1.add_recorder(self.recorder) # SellarDis1withDerivatives (an ExplicitComp)
model.obj_cmp.add_recorder(self.recorder) # an ExecComp
prob.run_driver()
prob.cleanup()
cr = om.CaseReader(self.filename)
# check that we only have the three system sources
self.assertEqual(sorted(cr.list_sources(out_stream=None)), ['root', 'root.d1', 'root.obj_cmp'])
# check source vars
source_vars = cr.list_source_vars('root', out_stream=None)
self.assertEqual(sorted(source_vars['inputs']), ['x', 'y1', 'y2', 'z'])
self.assertEqual(sorted(source_vars['outputs']), ['con1', 'con2', 'obj', 'x', 'y1', 'y2', 'z'])
source_vars = cr.list_source_vars('root.d1', out_stream=None)
self.assertEqual(sorted(source_vars['inputs']), ['x', 'y2', 'z'])
self.assertEqual(sorted(source_vars['outputs']), ['y1'])
source_vars = cr.list_source_vars('root.obj_cmp', out_stream=None)
self.assertEqual(sorted(source_vars['inputs']), ['x', 'y1', 'y2', 'z'])
self.assertEqual(sorted(source_vars['outputs']), ['obj'])
# Test to see if we got the correct number of cases
self.assertEqual(len(cr.list_cases('root', recurse=False)), 1)
self.assertEqual(len(cr.list_cases('root.d1', recurse=False)), 7)
self.assertEqual(len(cr.list_cases('root.obj_cmp', recurse=False)), 7)
# Test values from cases
case = cr.get_case('rank0:Driver|0|root._solve_nonlinear|0')
np.testing.assert_almost_equal(case.inputs['d1.y2'], [12.05848815, ])
np.testing.assert_almost_equal(case.outputs['obj'], [28.58830817, ])
np.testing.assert_almost_equal(case.residuals['obj'], [0.0, ],)
# Test to see if the case keys (iteration coords) come back correctly
for i, iter_coord in enumerate(cr.list_cases('root.d1', recurse=False)):
self.assertEqual(iter_coord,
'rank0:Driver|0|root._solve_nonlinear|0|NonlinearBlockGS|{iter}|'
'd1._solve_nonlinear|{iter}'.format(iter=i))
for i, iter_coord in enumerate(cr.list_cases('root.obj_cmp', recurse=False)):
self.assertEqual(iter_coord,
'rank0:Driver|0|root._solve_nonlinear|0|NonlinearBlockGS|{iter}|'
'obj_cmp._solve_nonlinear|{iter}'.format(iter=i))
def test_reading_solver_cases(self):
prob = SellarProblem()
prob.setup()
solver = prob.model.nonlinear_solver
solver.add_recorder(self.recorder)
solver.recording_options['record_abs_error'] = True
solver.recording_options['record_rel_error'] = True
solver.recording_options['record_solver_residuals'] = True
prob.run_driver()
prob.cleanup()
cr = om.CaseReader(self.filename)
# check that we only have the one solver source
self.assertEqual(sorted(cr.list_sources(out_stream=None)), ['root.nonlinear_solver'])
# check source vars
source_vars = cr.list_source_vars('root.nonlinear_solver', out_stream=None)
self.assertEqual(sorted(source_vars['inputs']), ['x', 'y1', 'y2', 'z'])
self.assertEqual(sorted(source_vars['outputs']), ['con1', 'con2', 'obj', 'x', 'y1', 'y2', 'z'])
# Test to see if we got the correct number of cases
solver_cases = cr.list_cases('root.nonlinear_solver')
self.assertEqual(len(solver_cases), 7)
# Test values from cases
last_case = cr.get_case(solver_cases[-1])
np.testing.assert_almost_equal(last_case.abs_err, [0.0, ])
np.testing.assert_almost_equal(last_case.rel_err, [0.0, ])
np.testing.assert_almost_equal(last_case.outputs['x'], [1.0, ])
np.testing.assert_almost_equal(last_case.residuals['con2'], [0.0, ])
# check that the case keys (iteration coords) come back correctly
for i, iter_coord in enumerate(solver_cases):
self.assertEqual(iter_coord,
'rank0:Driver|0|root._solve_nonlinear|0|NonlinearBlockGS|%d' % i)
def test_reading_solver_metadata(self):
prob = SellarProblem(linear_solver=om.LinearBlockGS())
prob.setup()
prob.model.nonlinear_solver.add_recorder(self.recorder)
d1 = prob.model.d1 # SellarDis1withDerivatives (an ExplicitComponent)
d1.nonlinear_solver = om.NonlinearBlockGS(maxiter=5)
d1.nonlinear_solver.add_recorder(self.recorder)
prob.run_driver()
prob.cleanup()
metadata = om.CaseReader(self.filename).solver_metadata
self.assertEqual(
sorted(metadata.keys()),
['d1.NonlinearBlockGS', 'root.NonlinearBlockGS']
)
self.assertEqual(metadata['d1.NonlinearBlockGS']['solver_options']['maxiter'], 5)
self.assertEqual(metadata['root.NonlinearBlockGS']['solver_options']['maxiter'], 10)
def test_reading_driver_recording_with_system_vars(self):
prob = SellarProblem(SellarDerivativesGrouped)
driver = prob.driver = om.ScipyOptimizeDriver(tol=1e-9, disp=False)
driver.recording_options['record_desvars'] = True
driver.recording_options['record_objectives'] = True
driver.recording_options['record_constraints'] = True
driver.recording_options['includes'] = ['y2']
driver.add_recorder(self.recorder)
prob.setup()
prob.run_driver()
prob.cleanup()
cr = om.CaseReader(self.filename)
# Test values from the last case
driver_cases = cr.list_cases('driver')
last_case = cr.get_case(driver_cases[-1])
np.testing.assert_almost_equal(last_case.outputs['z'], prob['z'])
np.testing.assert_almost_equal(last_case.outputs['x'], prob['x'])
np.testing.assert_almost_equal(last_case.outputs['y2'], prob['mda.d2.y2'])
@unittest.skipIf(OPT is None, "pyoptsparse is not installed")
@unittest.skipIf(OPTIMIZER is None, "pyoptsparse is not providing SNOPT or SLSQP")
def test_get_child_cases(self):
prob = SellarProblem(SellarDerivativesGrouped, nonlinear_solver=om.NonlinearRunOnce)
driver = prob.driver = pyOptSparseDriver(optimizer='SLSQP', print_results=False)
driver.recording_options['record_desvars'] = True
driver.recording_options['record_objectives'] = True
driver.recording_options['record_constraints'] = True
driver.add_recorder(self.recorder)
prob.setup()
model = prob.model
model.add_recorder(self.recorder)
model.nonlinear_solver.add_recorder(self.recorder)
prob.run_driver()
prob.cleanup()
cr = om.CaseReader(self.filename)
# check driver cases
expected_coords = [
'rank0:pyOptSparse_SLSQP|0',
'rank0:pyOptSparse_SLSQP|1',
'rank0:pyOptSparse_SLSQP|2',
'rank0:pyOptSparse_SLSQP|3',
'rank0:pyOptSparse_SLSQP|4',
'rank0:pyOptSparse_SLSQP|5',
'rank0:pyOptSparse_SLSQP|6'
]
last_counter = 0
for i, c in enumerate(cr.get_cases(flat=False)):
self.assertEqual(c.name, expected_coords[i])
self.assertTrue(c.counter > last_counter)
last_counter = c.counter
self.assertEqual(i+1, len(expected_coords))
# check driver cases with recursion, flat
expected_coords = [
'rank0:pyOptSparse_SLSQP|0|root._solve_nonlinear|0|NLRunOnce|0',
'rank0:pyOptSparse_SLSQP|0|root._solve_nonlinear|0',
'rank0:pyOptSparse_SLSQP|0',
'rank0:pyOptSparse_SLSQP|1|root._solve_nonlinear|1|NLRunOnce|0',
'rank0:pyOptSparse_SLSQP|1|root._solve_nonlinear|1',
'rank0:pyOptSparse_SLSQP|1',
'rank0:pyOptSparse_SLSQP|2|root._solve_nonlinear|2|NLRunOnce|0',
'rank0:pyOptSparse_SLSQP|2|root._solve_nonlinear|2',
'rank0:pyOptSparse_SLSQP|2',
'rank0:pyOptSparse_SLSQP|3|root._solve_nonlinear|3|NLRunOnce|0',
'rank0:pyOptSparse_SLSQP|3|root._solve_nonlinear|3',
'rank0:pyOptSparse_SLSQP|3',
'rank0:pyOptSparse_SLSQP|4|root._solve_nonlinear|4|NLRunOnce|0',
'rank0:pyOptSparse_SLSQP|4|root._solve_nonlinear|4',
'rank0:pyOptSparse_SLSQP|4',
'rank0:pyOptSparse_SLSQP|5|root._solve_nonlinear|5|NLRunOnce|0',
'rank0:pyOptSparse_SLSQP|5|root._solve_nonlinear|5',
'rank0:pyOptSparse_SLSQP|5',
'rank0:pyOptSparse_SLSQP|6|root._solve_nonlinear|6|NLRunOnce|0',
'rank0:pyOptSparse_SLSQP|6|root._solve_nonlinear|6',
'rank0:pyOptSparse_SLSQP|6',
]
last_counter = 0
for i, c in enumerate(cr.get_cases(recurse=True, flat=True)):
self.assertEqual(c.name, expected_coords[i])
if len(c.name.split('|')) > 2:
self.assertEqual(c.parent, expected_coords[i+1])
else:
self.assertEqual(c.parent, None)
self.assertTrue(c.counter > last_counter)
last_counter = c.counter
self.assertEqual(i+1, len(expected_coords))
# check child cases with recursion, flat
expected_coords = [
'rank0:pyOptSparse_SLSQP|0|root._solve_nonlinear|0|NLRunOnce|0',
'rank0:pyOptSparse_SLSQP|0|root._solve_nonlinear|0',
'rank0:pyOptSparse_SLSQP|0',
]
last_counter = 0
for i, c in enumerate(cr.get_cases('rank0:pyOptSparse_SLSQP|0', recurse=True, flat=True)):
self.assertEqual(c.name, expected_coords[i])
self.assertTrue(c.counter > last_counter)
last_counter = c.counter
self.assertEqual(i+1, len(expected_coords))
# check child cases with recursion, nested
expected_coords = {
'rank0:pyOptSparse_SLSQP|0': {
'rank0:pyOptSparse_SLSQP|0|root._solve_nonlinear|0': {
'rank0:pyOptSparse_SLSQP|0|root._solve_nonlinear|0|NLRunOnce|0': {}
},
}
}
cases = cr.get_cases('rank0:pyOptSparse_SLSQP|0', recurse=True, flat=False)
count = 0
for case in cases:
count += 1
coord = case.name
self.assertTrue(coord in list(expected_coords.keys()))
for child_case in cases[case]:
count += 1
child_coord = child_case.name
self.assertTrue(child_coord in expected_coords[coord].keys())
for grandchild_case in cases[case][child_case]:
count += 1
grandchild_coord = grandchild_case.name
self.assertTrue(grandchild_coord in expected_coords[coord][child_coord].keys())
self.assertEqual(count, 3)
def test_get_child_cases_system(self):
prob = SellarProblem(SellarDerivativesGrouped, nonlinear_solver=om.NonlinearRunOnce)
prob.driver = om.ScipyOptimizeDriver(tol=1e-9, disp=False)
prob.setup()
model = prob.model
model.add_recorder(self.recorder)
model.nonlinear_solver.add_recorder(self.recorder)
model.mda.nonlinear_solver.add_recorder(self.recorder)
prob.run_driver()
prob.cleanup()
cr = om.CaseReader(self.filename)
parent_coord = 'rank0:ScipyOptimize_SLSQP|2|root._solve_nonlinear|2'
coord = parent_coord + '|NLRunOnce|0'
# user scenario: given a case (with coord), get all cases with same parent
case = cr.get_case(coord)
self.assertEqual(case.parent, parent_coord)
expected_coords = [
parent_coord + '|NLRunOnce|0|mda._solve_nonlinear|2|NonlinearBlockGS|0',
parent_coord + '|NLRunOnce|0|mda._solve_nonlinear|2|NonlinearBlockGS|1',
parent_coord + '|NLRunOnce|0|mda._solve_nonlinear|2|NonlinearBlockGS|2',
parent_coord + '|NLRunOnce|0|mda._solve_nonlinear|2|NonlinearBlockGS|3',
parent_coord + '|NLRunOnce|0|mda._solve_nonlinear|2|NonlinearBlockGS|4',
parent_coord + '|NLRunOnce|0|mda._solve_nonlinear|2|NonlinearBlockGS|5',
parent_coord + '|NLRunOnce|0|mda._solve_nonlinear|2|NonlinearBlockGS|6',
parent_coord + '|NLRunOnce|0|mda._solve_nonlinear|2|NonlinearBlockGS|7',
parent_coord + '|NLRunOnce|0',
parent_coord
]
last_counter = 0
for i, c in enumerate(cr.get_cases(source=case.parent, recurse=True, flat=True)):
self.assertEqual(c.name, expected_coords[i])
self.assertTrue(c.counter > last_counter)
last_counter = c.counter
i += 1
self.assertEqual(i, len(expected_coords))
def test_list_cases_recurse(self):
prob = SellarProblem(SellarDerivativesGrouped, nonlinear_solver=om.NonlinearRunOnce)
prob.driver = om.ScipyOptimizeDriver(optimizer='SLSQP', tol=1e-9, disp=False)
prob.driver.add_recorder(self.recorder)
prob.setup()
model = prob.model
model.add_recorder(self.recorder)
model.mda.add_recorder(self.recorder)
model.nonlinear_solver.add_recorder(self.recorder)
model.mda.nonlinear_solver.add_recorder(self.recorder)
prob.run_driver()
prob.cleanup()
cr = om.CaseReader(self.filename)
# get total iteration count to check against
global_iterations = len(cr._global_iterations)
#
# get a recursive list of all cases (flat)
#
cases = cr.list_cases(recurse=True, flat=True, out_stream=None)
# verify the cases are all there
self.assertEqual(len(cases), global_iterations)
# verify the cases are in proper order
counter = 0
for i, c in enumerate(cr.get_case(case) for case in cases):
counter += 1
self.assertEqual(c.counter, counter)
#
# get a recursive dict of all cases (nested)
#
cases = cr.list_cases(recurse=True, flat=False)
num_cases = count_keys(cases)
self.assertEqual(num_cases, global_iterations)
#
# get a recursive list of child cases
#
parent_coord = 'rank0:ScipyOptimize_SLSQP|0|root._solve_nonlinear|0'
expected_coords = [
parent_coord + '|NLRunOnce|0|mda._solve_nonlinear|0|NonlinearBlockGS|0',
parent_coord + '|NLRunOnce|0|mda._solve_nonlinear|0|NonlinearBlockGS|1',
parent_coord + '|NLRunOnce|0|mda._solve_nonlinear|0|NonlinearBlockGS|2',
parent_coord + '|NLRunOnce|0|mda._solve_nonlinear|0|NonlinearBlockGS|3',
parent_coord + '|NLRunOnce|0|mda._solve_nonlinear|0|NonlinearBlockGS|4',
parent_coord + '|NLRunOnce|0|mda._solve_nonlinear|0|NonlinearBlockGS|5',
parent_coord + '|NLRunOnce|0|mda._solve_nonlinear|0|NonlinearBlockGS|6',
parent_coord + '|NLRunOnce|0|mda._solve_nonlinear|0',
parent_coord + '|NLRunOnce|0',
parent_coord
]
cases = cr.list_cases(parent_coord, recurse=True, flat=True)
# verify the cases are all there and are as expected
self.assertEqual(len(cases), len(expected_coords))
for i, c in enumerate(cases):
self.assertEqual(c, expected_coords[i])
#
# get a list of cases for each source
#
sources = cr.list_sources(out_stream=None)
self.assertEqual(sorted(sources), [
'driver', 'root', 'root.mda', 'root.mda.nonlinear_solver', 'root.nonlinear_solver'
])
# verify the coordinates of the returned cases are all there as expected
expected_coord = {
'driver': r'rank0:ScipyOptimize_SLSQP\|\d',
'root': r'rank0:ScipyOptimize_SLSQP\|\d\|root._solve_nonlinear\|\d',
'root.nonlinear_solver': r'rank0:ScipyOptimize_SLSQP\|\d\|root._solve_nonlinear\|\d\|NLRunOnce\|0',
'root.mda': r'rank0:ScipyOptimize_SLSQP\|\d\|root._solve_nonlinear\|\d\|NLRunOnce\|0\|mda._solve_nonlinear\|\d',
'root.mda.nonlinear_solver': r'rank0:ScipyOptimize_SLSQP\|\d\|root._solve_nonlinear\|\d\|NLRunOnce\|0\|mda._solve_nonlinear\|\d\|NonlinearBlockGS\|\d',
}
counter = 0
mda_counter = 0
root_counter = 0
for source in sources:
expected = expected_coord[source]
cases = cr.list_cases(source, recurse=False)
for case in cases:
counter += 1
if source.startswith('root.mda'): # count all cases for/under mda system
mda_counter += 1
if source.startswith('root.'): # count all cases for/under root solver
root_counter += 1
self.assertRegexpMatches(case, expected)
self.assertEqual(counter, global_iterations)
#
# get a recursive list of child cases for the mda system
#
counter = 0
cases = cr.list_cases('root.mda', recurse=True, flat=True)
for case in cases:
self.assertTrue(case.index('|mda._solve_nonlinear|') > 0)
counter += 1
self.assertEqual(counter, mda_counter)
#
# get a recursive list of child cases for the root solver
#
counter = 0
cases = cr.list_cases('root.nonlinear_solver', recurse=True, flat=True)
for case in cases:
self.assertTrue(case.index('|NLRunOnce|') > 0)
counter += 1
self.assertEqual(counter, root_counter)
def test_list_cases_nested_model(self):
prob = SellarProblem(SellarDerivativesGrouped, nonlinear_solver=om.NonlinearRunOnce)
prob.driver = om.ScipyOptimizeDriver(tol=1e-9, disp=False)
prob.setup()
model = prob.model
model.add_recorder(self.recorder)
model.mda.add_recorder(self.recorder)
model.nonlinear_solver.add_recorder(self.recorder)
model.mda.nonlinear_solver.add_recorder(self.recorder)
prob.run_driver()
prob.cleanup()
cr = om.CaseReader(self.filename)
# get total iteration count to check against
global_iterations = len(cr._global_iterations)
#
# get a recursive list of all cases (flat)
#
cases = cr.list_cases(recurse=True, flat=True, out_stream=None)
# verify the cases are all there
self.assertEqual(len(cases), global_iterations)
# verify the cases are in proper order
counter = 0
for i, c in enumerate(cr.get_case(case) for case in cases):
counter += 1
self.assertEqual(c.counter, counter)
#
# get a recursive dict of all cases (nested)
#
cases = cr.list_cases(recurse=True, flat=False)
num_cases = count_keys(cases)
self.assertEqual(num_cases, global_iterations)
def test_list_cases_nested_no_source(self):
prob = SellarProblem(SellarDerivativesGrouped, nonlinear_solver=om.NonlinearRunOnce)
prob.driver = om.ScipyOptimizeDriver(tol=1e-9, disp=False)
prob.setup()
model = prob.model
model.mda.add_recorder(self.recorder)
model.nonlinear_solver.add_recorder(self.recorder)
model.mda.nonlinear_solver.add_recorder(self.recorder)
prob.run_driver()
prob.cleanup()
cr = om.CaseReader(self.filename)
# get total iteration count to check against
global_iterations = len(cr._global_iterations)
#
# get a recursive list of all cases (flat)
#
cases = cr.list_cases(recurse=True, flat=True, out_stream=None)
# verify the cases are all there
self.assertEqual(len(cases), global_iterations)
# verify the cases are in proper order
counter = 0
for i, c in enumerate(cr.get_case(case) for case in cases):
counter += 1
self.assertEqual(c.counter, counter)
#
# try to get a recursive dict of all cases (nested), without driver or model
#
expected_err = ("A nested dictionary of all cases was requested, but "
"neither the driver or the model was recorded. Please "
"specify another source (system or solver) for the cases "
"you want to see.")
with self.assertRaises(RuntimeError) as cm:
cases = cr.list_cases(recurse=True, flat=False)
self.assertEqual(str(cm.exception), expected_err)
def test_get_cases_recurse(self):
prob = SellarProblem(SellarDerivativesGrouped, nonlinear_solver=om.NonlinearRunOnce)
prob.driver = om.ScipyOptimizeDriver(optimizer='SLSQP', tol=1e-9, disp=False)
prob.driver.add_recorder(self.recorder)
prob.setup()
model = prob.model
model.add_recorder(self.recorder)
model.mda.add_recorder(self.recorder)
model.nonlinear_solver.add_recorder(self.recorder)
model.mda.nonlinear_solver.add_recorder(self.recorder)
prob.run_driver()
prob.cleanup()
cr = om.CaseReader(self.filename)
# get total iteration count to check against
global_iterations = len(cr._global_iterations)
#
# get a recursive list of all cases (flat)
#
cases = cr.get_cases(recurse=True, flat=True)
# verify the cases are all there
self.assertEqual(len(cases), global_iterations)
# verify the cases are in proper order
counter = 0
for i, c in enumerate(cases):
counter += 1
self.assertEqual(c.counter, counter)
#
# get a recursive dict of all cases (nested)
#
cases = cr.get_cases(recurse=True, flat=False)
num_cases = count_keys(cases)
self.assertEqual(num_cases, global_iterations)
#
# get a recursive list of child cases
#
parent_coord = 'rank0:ScipyOptimize_SLSQP|0|root._solve_nonlinear|0'
expected_coords = [
parent_coord + '|NLRunOnce|0|mda._solve_nonlinear|0|NonlinearBlockGS|0',
parent_coord + '|NLRunOnce|0|mda._solve_nonlinear|0|NonlinearBlockGS|1',
parent_coord + '|NLRunOnce|0|mda._solve_nonlinear|0|NonlinearBlockGS|2',
parent_coord + '|NLRunOnce|0|mda._solve_nonlinear|0|NonlinearBlockGS|3',
parent_coord + '|NLRunOnce|0|mda._solve_nonlinear|0|NonlinearBlockGS|4',
parent_coord + '|NLRunOnce|0|mda._solve_nonlinear|0|NonlinearBlockGS|5',
parent_coord + '|NLRunOnce|0|mda._solve_nonlinear|0|NonlinearBlockGS|6',
parent_coord + '|NLRunOnce|0|mda._solve_nonlinear|0',
parent_coord + '|NLRunOnce|0',
parent_coord
]
cases = cr.get_cases(parent_coord, recurse=True, flat=True)
# verify the cases are all there and are as expected
self.assertEqual(len(cases), len(expected_coords))
for i, c in enumerate(cases):
self.assertEqual(c.name, expected_coords[i])
#
# get a list of cases for each source
#
sources = cr.list_sources(out_stream=None)
self.assertEqual(sorted(sources), [
'driver', 'root', 'root.mda', 'root.mda.nonlinear_solver', 'root.nonlinear_solver'
])
# verify the coordinates of the returned cases are as expected and that the cases are all there
expected_coord = {
'driver': r'rank0:ScipyOptimize_SLSQP\|\d',
'root': r'rank0:ScipyOptimize_SLSQP\|\d\|root._solve_nonlinear\|\d',
'root.nonlinear_solver': r'rank0:ScipyOptimize_SLSQP\|\d\|root._solve_nonlinear\|\d\|NLRunOnce\|0',
'root.mda': r'rank0:ScipyOptimize_SLSQP\|\d\|root._solve_nonlinear\|\d\|NLRunOnce\|0\|mda._solve_nonlinear\|\d',
'root.mda.nonlinear_solver': r'rank0:ScipyOptimize_SLSQP\|\d\|root._solve_nonlinear\|\d\|NLRunOnce\|0\|mda._solve_nonlinear\|\d\|NonlinearBlockGS\|\d',
}
counter = 0
mda_counter = 0
root_counter = 0
for source in sources:
expected = expected_coord[source]
cases = cr.get_cases(source, recurse=False)
for case in cases:
counter += 1
if source.startswith('root.mda'): # count all cases for/under mda system
mda_counter += 1
if source.startswith('root.'): # count all cases for/under root solver
root_counter += 1
self.assertRegexpMatches(case.name, expected)
self.assertEqual(counter, global_iterations)
#
# get a recursive list of child cases for the mda system
#
counter = 0
cases = cr.get_cases('root.mda', recurse=True, flat=True)
for case in cases:
counter += 1
self.assertEqual(counter, mda_counter)
#
# get a recursive list of child cases for the root solver
#
counter = 0
cases = cr.get_cases('root.nonlinear_solver', recurse=True, flat=True)
for case in cases:
counter += 1
self.assertEqual(counter, root_counter)
def test_list_outputs(self):
prob = SellarProblem()
prob.model.add_recorder(self.recorder)
prob.model.recording_options['record_residuals'] = True
prob.setup()
d1 = prob.model.d1 # SellarDis1withDerivatives (an ExplicitComp)
d1.nonlinear_solver = om.NonlinearBlockGS(maxiter=5)
d1.add_recorder(self.recorder)
prob.run_driver()
prob.cleanup()
cr = om.CaseReader(self.filename)
# check the system case for 'd1' (there should be only one output, 'd1.y1')
system_cases = cr.list_cases('root.d1')
case = cr.get_case(system_cases[-1])
outputs = case.list_outputs(explicit=True, implicit=True, values=True,
residuals=True, residuals_tol=None,
units=True, shape=True, bounds=True, desc=True,
scaling=True, hierarchical=True, print_arrays=True,
out_stream=None)
expected_outputs = {
'd1.y1': {
'lower': 0.1,
'upper': 1000.,
'ref': 1.0,
'resids': [1.318e-10],
'shape': (1,),
'values': [25.5883024],
'desc': ''
}
}
self.assertEqual(len(outputs), 1)
[name, vals] = outputs[0]
self.assertEqual(name, 'd1.y1')
expected = expected_outputs[name]
self.assertEqual(vals['lower'], expected['lower'])
self.assertEqual(vals['ref'], expected['ref'])
self.assertEqual(vals['shape'], expected['shape'])
self.assertEqual(vals['desc'], expected['desc'])
np.testing.assert_almost_equal(vals['resids'], expected['resids'])
np.testing.assert_almost_equal(vals['value'], expected['values'])
# check implicit outputs, there should not be any
impl_outputs_case = case.list_outputs(explicit=False, implicit=True,
out_stream=None)
self.assertEqual(len(impl_outputs_case), 0)
# check that output from the Case method matches output from the System method
# the system for the case should be properly identified as 'd1'
stream = StringIO()
d1.list_outputs(prom_name=True, desc=True, out_stream=stream)
expected = stream.getvalue().split('\n')
stream = StringIO()
case.list_outputs(prom_name=True, desc=True, out_stream=stream)
text = stream.getvalue().split('\n')
for i, line in enumerate(expected):
self.assertEqual(text[i], line)
def test_list_inputs(self):
prob = SellarProblem()
prob.model.add_recorder(self.recorder)
prob.model.recording_options['record_residuals'] = True
prob.setup()
d1 = prob.model.d1 # SellarDis1withDerivatives (an ExplicitComp)
d1.nonlinear_solver = om.NonlinearBlockGS(maxiter=5)
d1.add_recorder(self.recorder)
prob.run_driver()
prob.cleanup()
cr = om.CaseReader(self.filename)
expected_inputs_case = {
'd1.z': {'value': [5., 2.], 'desc': ''},
'd1.x': {'value': [1.], 'desc': ''},
'd1.y2': {'value': [12.0584882], 'desc': ''}
}
system_cases = cr.list_cases('root.d1')
case = cr.get_case(system_cases[-1])
inputs = case.list_inputs(values=True, desc=True, out_stream=None)
for name, meta in inputs:
expected = expected_inputs_case[name]
np.testing.assert_almost_equal(meta['value'], expected['value'])
self.assertEqual(meta['desc'], expected['desc'])
# check that output from the Case method matches output from the System method
# the system for the case should be properly identified as 'd1'
stream = StringIO()
d1.list_inputs(prom_name=True, desc=True, out_stream=stream)
expected = stream.getvalue().split('\n')
stream = StringIO()
case.list_inputs(prom_name=True, desc=True, out_stream=stream)
text = stream.getvalue().split('\n')
for i, line in enumerate(expected):
self.assertEqual(text[i], line)
def test_list_inputs_outputs_solver_case(self):
prob = SellarProblem(SellarDerivativesGrouped)
prob.setup()
mda = prob.model.mda
mda.nonlinear_solver = om.NonlinearBlockGS(maxiter=5)
mda.nonlinear_solver.add_recorder(self.recorder)
prob.set_solver_print(-1)
prob.run_driver()
prob.cleanup()
cr = om.CaseReader(self.filename)
case = cr.get_case(-1)
# check that output from the Case methods match output from the System methods
# the system for the solver case should be properly identified as 'mda'
stream = StringIO()
mda.list_inputs(prom_name=True, out_stream=stream)
expected = stream.getvalue().split('\n')
stream = StringIO()
case.list_inputs(prom_name=True, out_stream=stream)
text = stream.getvalue().split('\n')
for i, line in enumerate(expected):
self.assertEqual(text[i], line)
stream = StringIO()
mda.list_outputs(prom_name=True, out_stream=stream)
expected = stream.getvalue().split('\n')
stream = StringIO()
case.list_outputs(prom_name=True, out_stream=stream)
text = stream.getvalue().split('\n')
for i, line in enumerate(expected):
self.assertEqual(text[i], line)
def test_list_input_and_outputs_with_tags(self):
from openmdao.core.tests.test_expl_comp import RectangleCompWithTags
prob = om.Problem(RectangleCompWithTags())
recorder = om.SqliteRecorder("cases.sql")
prob.model.add_recorder(recorder)
prob.setup(check=False)
prob.run_model()
prob.cleanup()
cr = om.CaseReader("cases.sql")
cases = cr.get_cases()
case = cases[0]
# Inputs no tags
inputs = case.list_inputs(out_stream=None)
self.assertEqual(sorted([inp[0] for inp in inputs]), ['length', 'width'])
# Inputs with tag that matches
inputs = case.list_inputs(out_stream=None, tags="tag2")
self.assertEqual([inp[0] for inp in inputs], ['width',])
# Inputs with tag that does not match
inputs = case.list_inputs(out_stream=None, tags="tag3")
self.assertEqual([inp[0] for inp in inputs], [])
# Inputs with multiple tags
inputs = case.list_inputs(out_stream=None, tags=["tag2", "tag3"])
self.assertEqual([inp[0] for inp in inputs], ['width',])
# Outputs no tags
outputs = case.list_outputs(out_stream=None)
self.assertEqual(sorted([outp[0] for outp in outputs]), ['area',])
# Outputs with tag that does match
outputs = case.list_outputs(out_stream=None, tags="tag1")
self.assertEqual(sorted([outp[0] for outp in outputs]), ['area',])
# Outputs with tag that do not match any vars
outputs = case.list_outputs(out_stream=None, tags="tag3")
self.assertEqual(sorted([outp[0] for outp in outputs]), [])
# Outputs with multiple tags
outputs = case.list_outputs(out_stream=None, tags=["tag1", "tag3"])
self.assertEqual(sorted([outp[0] for outp in outputs]), ['area',])
def test_list_inputs_with_includes_excludes(self):
prob = SellarProblem()
prob.model.add_recorder(self.recorder)
prob.setup()
d1 = prob.model.d1 # SellarDis1withDerivatives (an ExplicitComp)
d1.nonlinear_solver = om.NonlinearBlockGS(maxiter=5)
d1.add_recorder(self.recorder)
prob.run_driver()
prob.cleanup()
cr = om.CaseReader(self.filename)
system_cases = cr.list_cases('root.d1')
case = cr.get_case(system_cases[-1])
# inputs with no includes or excludes. Should get d1.z, d1.x, and d1.y2
inputs = case.list_inputs(out_stream=None)
self.assertEqual( len(inputs), 3)
# inputs with includes
inputs = case.list_inputs(includes = ['*z'], out_stream=None)
self.assertEqual(len(inputs), 1)
# inputs with excludes
inputs = case.list_inputs(excludes = ['*z'], out_stream=None)
self.assertEqual(len(inputs), 2)
# inputs with includes and excludes
inputs = case.list_inputs(includes = ['*z'], excludes = ['d1*'], out_stream=None)
self.assertEqual(len(inputs), 0)
# outputs with no includes or excludes. Should get d1.y1
outputs = case.list_outputs(out_stream=None)
self.assertEqual( len(outputs), 1)
# outputs with includes
outputs = case.list_outputs(includes = ['*z'], out_stream=None)
self.assertEqual( len(outputs), 0)
# outputs with excludes
outputs = case.list_outputs(excludes = ['*z'], out_stream=None)
self.assertEqual( len(outputs), 1)
# outputs with includes and excludes
outputs = case.list_outputs(includes = ['d1*'], excludes = ['*z'], out_stream=None)
self.assertEqual( len(outputs), 1)
def test_list_discrete(self):
model = om.Group()
model.add_subsystem('expl', ModCompEx(3),
promotes_inputs=['x'])
model.add_subsystem('impl', ModCompIm(3),
promotes_inputs=['x'])
model.add_recorder(self.recorder)
prob = om.Problem(model)
prob.setup()
prob.set_val('x', 11)
prob.run_model()
prob.cleanup()
cr = om.CaseReader(self.filename)
case = cr.get_case(0)
#
# list inputs, not hierarchical
#
stream = StringIO()
model.list_inputs(hierarchical=False, out_stream=stream)
expected = stream.getvalue().split('\n')
stream = StringIO()
case.list_inputs(hierarchical=False, out_stream=stream)
text = stream.getvalue().split('\n')
for i, line in enumerate(expected):
if line and not line.startswith('-'):
self.assertEqual(text[i], line)
#
# list inputs, hierarchical
#
stream = StringIO()
model.list_inputs(hierarchical=True, out_stream=stream)
expected = stream.getvalue().split('\n')
stream = StringIO()
case.list_inputs(hierarchical=True, out_stream=stream)
text = stream.getvalue().split('\n')
for i, line in enumerate(expected):
if line and not line.startswith('-'):
self.assertEqual(text[i], line)
#
# list outputs, not hierarchical, with residuals
#
expected = [
"2 Explicit Output(s) in 'model'",
"-------------------------------",
"",
"varname value resids ",
"------- ----- ------------",
"expl.b [20.] [0.] ",
"expl.y 2 Not Recorded",
]
stream = StringIO()
case.list_outputs(residuals=True, hierarchical=False, out_stream=stream)
text = stream.getvalue().split('\n')
for i, line in enumerate(expected):
if line and not line.startswith('-'):
self.assertEqual(remove_whitespace(text[i]), remove_whitespace(line))
#
# list outputs, hierarchical
#
stream = StringIO()
model.list_outputs(hierarchical=True, out_stream=stream)
expected = stream.getvalue().split('\n')
stream = StringIO()
case.list_outputs(hierarchical=True, out_stream=stream)
text = stream.getvalue().split('\n')
for i, line in enumerate(expected):
if line and not line.startswith('-'):
self.assertEqual(text[i], line)
def test_list_discrete_filtered(self):
model = om.Group()
indep = model.add_subsystem('indep', om.IndepVarComp())
indep.add_discrete_output('x', 11)
sub = model.add_subsystem('sub', om.Group())
sub.add_subsystem('expl', ModCompEx(3))
sub.add_subsystem('impl', ModCompIm(3))
model.connect('indep.x', 'sub.expl.x')
model.connect('indep.x', 'sub.impl.x')
sub.add_recorder(self.recorder)
# exclude one discrete input (abs_name) and one discrete output (prom_name)
sub.recording_options['excludes'] = ['sub.impl.x', 'expl.y']
prob = om.Problem(model)
prob.setup()
prob.run_model()
prob.cleanup()
cr = om.CaseReader(self.filename)
case = cr.get_case(0)
#
# list inputs
#
expected = [
"2 Input(s) in 'sub'",
"-------------------",
"",
"varname value",
"---------- -----",
"sub.expl.a [10.]",
"sub.expl.x 11 ",
# sub.impl.x is not recorded (excluded)
]
stream = StringIO()
case.list_inputs(hierarchical=False, out_stream=stream)
text = stream.getvalue().split('\n')
for i, line in enumerate(expected):
if line and not line.startswith('-'):
self.assertEqual(remove_whitespace(text[i]), remove_whitespace(line))
#
# list outputs
#
expected = [
"1 Explicit Output(s) in 'sub'",
"-----------------------------",
"",
"varname value",
"-------- -----",
"model",
" sub",
" expl",
" b [20.]",
# y is not recorded (excluded)
"",
"",
"1 Implicit Output(s) in 'sub'",
"-----------------------------",
"",
"varname value",
"------- -----",
"model",
" sub",
" impl",
" y 2 ",
]
stream = StringIO()
case.list_outputs(out_stream=stream)
text = stream.getvalue().split('\n')
for i, line in enumerate(expected):
if line and not line.startswith('-'):
self.assertEqual(remove_whitespace(text[i]), remove_whitespace(line))
def test_list_discrete_promoted(self):
model = om.Group()
indep = om.IndepVarComp()
indep.add_discrete_output('x', 11)
model.add_subsystem('indep', indep, promotes_outputs=['x'])
model.add_subsystem('expl', ModCompEx(3), promotes_inputs=['x'])
model.add_subsystem('impl', ModCompIm(3), promotes_inputs=['x'])
model.add_recorder(self.recorder)
prob = om.Problem(model)
prob.setup()
prob.run_model()
prob.cleanup()
cr = om.CaseReader(self.filename)
case = cr.get_case(0)
#
# list inputs
#
stream = StringIO()
model.list_inputs(hierarchical=False, prom_name=True, out_stream=stream)
expected = stream.getvalue().split('\n')
stream = StringIO()
case.list_inputs(hierarchical=False, prom_name=True, out_stream=stream)
text = stream.getvalue().split('\n')
for i, line in enumerate(expected):
if line and not line.startswith('-'):
self.assertEqual(text[i], line)
#
# list outputs
#
stream = StringIO()
model.list_outputs(prom_name=True, out_stream=stream)
expected = stream.getvalue().split('\n')
stream = StringIO()
case.list_outputs(prom_name=True, out_stream=stream)
text = stream.getvalue().split('\n')
for i, line in enumerate(expected):
if line and not line.startswith('-'):
self.assertEqual(text[i], line)
def test_getitem(self):
prob = SellarProblem()
prob.setup()
prob.add_recorder(self.recorder)
prob.driver.add_recorder(self.recorder)
prob.model.d1.add_recorder(self.recorder)
prob.run_driver()
prob.record('final')
prob.cleanup()
# expected input and output values after run_once
expected = {
# promoted names
"x": 1.,
"y1": 25.58830237,
"y2": 12.05848815,
"z": [5., 2.],
"obj": 28.58830817,
"con1": -22.42830237,
"con2": -11.94151185,
# unpromoted output names
"_auto_ivc.v1": 1.,
"_auto_ivc.v0": [5., 2.],
"obj_cmp.obj": 28.58830817,
"con_cmp1.con1": -22.42830237,
"con_cmp2.con2": -11.94151185,
# unpromoted system names
"d1.x": 1.,
"d1.y1": 25.58830237,
"d1.y2": 12.05848815,
"d1.z": [5., 2.],
}
cr = om.CaseReader(self.filename)
# driver will record design vars, objectives and constraints
cases = cr.list_cases('driver', recurse=False)
case = cr.get_case(cases[0])
for name in expected:
if name[0] in ['y', 'd']:
# driver does not record coupling vars y1 & y2
# or the lower level inputs and outputs of d1
msg = "'Variable name \"%s\" not found.'" % name
with self.assertRaises(KeyError) as cm:
case[name]
self.assertEqual(str(cm.exception), msg)
else:
np.testing.assert_almost_equal(case[name], expected[name])
# problem will record all inputs and outputs at the problem level
case = cr.get_case('final')
for name in expected:
if name in ['d1.x', 'd1.y2', 'd1.z']:
# problem does not record lower level inputs
msg = "'Variable name \"%s\" not found.'" % name
with self.assertRaises(KeyError) as cm:
case[name]
self.assertEqual(str(cm.exception), msg)
else:
np.testing.assert_almost_equal(case[name], expected[name])
# system will record inputs and outputs at the system level
cases = cr.list_cases('root.d1')
case = cr.get_case(cases[-1])
for name in expected:
if name[0] in ['p', 'o', 'c']:
# system d1 does not record params, obj and cons
msg = "'Variable name \"%s\" not found.'" % name
with self.assertRaises(KeyError) as cm:
case[name]
self.assertEqual(str(cm.exception), msg)
else:
np.testing.assert_almost_equal(case[name], expected[name])
def test_get_val_exhaustive(self):
import openmdao.api as om
model = om.Group()
model.add_subsystem('comp', om.ExecComp('y=x-25.',
x={'value': 77.0, 'units': 'degF'},
y={'value': 0.0, 'units': 'degC'}))
model.add_subsystem('prom', om.ExecComp('yy=xx-25.',
xx={'value': 77.0, 'units': 'degF'},
yy={'value': 0.0, 'units': 'degC'}),
promotes=['xx', 'yy'])
model.add_subsystem('acomp', om.ExecComp('y=x-25.',
x={'value': np.array([77.0, 95.0]), 'units': 'degF'},
y={'value': np.array([0., 0.]), 'units': 'degC'}))
model.add_subsystem('aprom', om.ExecComp('ayy=axx-25.',
axx={'value': np.array([77.0, 95.0]), 'units': 'degF'},
ayy={'value': np.array([0., 0.]), 'units': 'degC'}),
promotes=['axx', 'ayy'])
model.add_recorder(self.recorder)
prob = om.Problem(model)
prob.setup()
prob.run_model()
prob.cleanup()
cr = om.CaseReader(self.filename)
case = cr.get_case(0)
assert_near_equal(case.get_val('comp.x'), 77.0, 1e-6)
assert_near_equal(case.get_val('comp.x', 'degC'), 25.0, 1e-6)
assert_near_equal(case.get_val('comp.y'), 52., 1e-6)
assert_near_equal(case.get_val('comp.y', 'degF'), 125.6, 1e-6)
assert_near_equal(case.get_val('xx'), 77.0, 1e-6)
assert_near_equal(case.get_val('xx', 'degC'), 25.0, 1e-6)
assert_near_equal(case.get_val('yy'), 52., 1e-6)
assert_near_equal(case.get_val('yy', 'degF'), 125.6, 1e-6)
assert_near_equal(case.get_val('acomp.x', indices=0), 77.0, 1e-6)
assert_near_equal(case.get_val('acomp.x', indices=[1]), 95.0, 1e-6)
assert_near_equal(case.get_val('acomp.x', 'degC', indices=[0]), 25.0, 1e-6)
assert_near_equal(case.get_val('acomp.x', 'degC', indices=1), 35.0, 1e-6)
assert_near_equal(case.get_val('acomp.y', indices=0), 52., 1e-6)
assert_near_equal(case.get_val('acomp.y', 'degF', indices=0), 125.6, 1e-6)
assert_near_equal(case.get_val('axx', indices=0), 77.0, 1e-6)
assert_near_equal(case.get_val('axx', indices=1), 95.0, 1e-6)
assert_near_equal(case.get_val('axx', 'degC', indices=0), 25.0, 1e-6)
assert_near_equal(case.get_val('axx', 'degC', indices=np.array([1])), 35.0, 1e-6)
assert_near_equal(case.get_val('ayy', indices=0), 52., 1e-6)
assert_near_equal(case.get_val('ayy', 'degF', indices=0), 125.6, 1e-6)
def test_get_ambiguous_input(self):
model = om.Group()
model.add_recorder(self.recorder)
G1 = model.add_subsystem("G1", om.Group(), promotes=['x'])
G1.add_subsystem("C0", om.IndepVarComp('x', 1.0, units='m'), promotes=['x'])
G2 = model.add_subsystem("G2", om.Group(), promotes=['a'])
G2.add_subsystem("C1", om.ExecComp('y=m*2.0', m={'units': 'm'}), promotes=[('m', 'a')])
G2.add_subsystem("C2", om.ExecComp('y=f*2.0', f={'units': 'ft'}), promotes=[('f', 'a')])
model.connect('x', 'a')
prob = om.Problem(model)
prob.setup()
prob.run_model()
prob.cleanup()
cr = om.CaseReader(self.filename)
case = cr.get_case(0)
assert_near_equal(case.get_val('x'), 1., 1e-6)
assert_near_equal(case.get_val('x', units='ft'), 3.280839895, 1e-6)
assert_near_equal(case.get_val('G1.C0.x'), 1., 1e-6)
assert_near_equal(case.get_val('G1.C0.x', units='ft'), 3.280839895, 1e-6)
assert_near_equal(case.get_val('G2.C1.m'), 1., 1e-6)
assert_near_equal(case.get_val('G2.C2.f'), 3.280839895, 1e-6)
# 'a' is ambiguous.. which input do you want when accessing 'a'?
msg = "The promoted name 'a' is invalid because it refers to multiple inputs:" + \
" ['G2.C1.m', 'G2.C2.f']. Access the value using an absolute path name " + \
"or the connected output variable instead."
with self.assertRaises(RuntimeError) as cm:
case['a']
self.assertEquals(str(cm.exception), msg)
with self.assertRaises(RuntimeError) as cm:
case.get_val('a')
self.assertEquals(str(cm.exception), msg)
with self.assertRaises(RuntimeError) as cm:
case.get_val('a', units='m')
self.assertEquals(str(cm.exception), msg)
with self.assertRaises(RuntimeError) as cm:
case.get_val('a', units='ft')
self.assertEquals(str(cm.exception), msg)
# 'a' is ambiguous.. which input's units do you want when accessing 'a'?
# (test the underlying function, currently only called from inside get_val)
msg = "Can't get units for the promoted name 'a' because it refers to " + \
"multiple inputs: ['G2.C1.m', 'G2.C2.f']. Access the units using " + \
"an absolute path name."
with self.assertRaises(RuntimeError) as cm:
case._get_units('a')
self.assertEquals(str(cm.exception), msg)
def test_get_vars(self):
prob = SellarProblem()
prob.setup()
prob.model.add_recorder(self.recorder)
prob.model.recording_options['record_residuals'] = True
prob.driver.add_recorder(self.recorder)
prob.run_driver()
prob.cleanup()
cr = om.CaseReader(self.filename)
driver_cases = cr.list_cases('driver')
driver_case = cr.get_case(driver_cases[0])
desvars = driver_case.get_design_vars()
objectives = driver_case.get_objectives()
constraints = driver_case.get_constraints()
responses = driver_case.get_responses()
expected_desvars = {"x": 1., "z": [5., 2.]}
expected_objectives = {"obj": 28.58830817, }
expected_constraints = {"con1": -22.42830237, "con2": -11.94151185}
expected_responses = expected_objectives.copy()
expected_responses.update(expected_constraints)
for expected_set, actual_set in ((expected_desvars, desvars),
(expected_objectives, objectives),
(expected_constraints, constraints),
(expected_responses, responses)):
self.assertEqual(len(expected_set), len(actual_set))
for k in expected_set:
np.testing.assert_almost_equal(expected_set[k], actual_set[k])
def test_simple_load_system_cases(self):
prob = SellarProblem()
model = prob.model
model.recording_options['record_inputs'] = True
model.recording_options['record_outputs'] = True
model.recording_options['record_residuals'] = True
model.add_recorder(self.recorder)
prob.setup()
prob.run_driver()
prob.cleanup()
cr = om.CaseReader(self.filename)
system_cases = cr.list_cases('root')
case = cr.get_case(system_cases[0])
# Add one to all the inputs and outputs just to change the model
# so we can see if loading the case values really changes the model
for name in model._inputs:
model._inputs[name] += 1.0
for name in model._outputs:
model._outputs[name] += 1.0
# Now load in the case we recorded
prob.load_case(case)
_assert_model_matches_case(case, model)
def test_load_bad_system_case(self):
prob = SellarProblem(SellarDerivativesGrouped)
prob.model.add_recorder(self.recorder)
driver = prob.driver = om.ScipyOptimizeDriver()
driver.options['optimizer'] = 'SLSQP'
driver.options['tol'] = 1e-9
driver.options['disp'] = False
driver.recording_options['record_desvars'] = True
driver.recording_options['record_objectives'] = True
driver.recording_options['record_constraints'] = True
prob.setup()
prob.run_driver()
prob.cleanup()
cr = om.CaseReader(self.filename)
system_cases = cr.list_cases('root')
case = cr.get_case(system_cases[0])
# try to load it into a completely different model
prob = SellarProblem()
prob.setup()
error_msg = "Input variable, '[^']+', recorded in the case is not found in the model"
with self.assertRaisesRegex(KeyError, error_msg):
prob.load_case(case)
def test_subsystem_load_system_cases(self):
prob = SellarProblem()
prob.setup()
model = prob.model
model.recording_options['record_inputs'] = True
model.recording_options['record_outputs'] = True
model.recording_options['record_residuals'] = True
# Only record a subsystem
model.d2.add_recorder(self.recorder)
prob.run_driver()
prob.cleanup()
cr = om.CaseReader(self.filename)
system_cases = cr.list_cases('root.d2')
case = cr.get_case(system_cases[0])
# Add one to all the inputs just to change the model
# so we can see if loading the case values really changes the model
for name in prob.model._inputs:
model._inputs[name] += 1.0
for name in prob.model._outputs:
model._outputs[name] += 1.0
# Now load in the case we recorded
prob.load_case(case)
_assert_model_matches_case(case, model.d2)
def test_load_system_cases_with_units(self):
comp = om.IndepVarComp()
comp.add_output('distance', val=1., units='m')
comp.add_output('time', val=1., units='s')
prob = om.Problem()
model = prob.model
model.add_subsystem('c1', comp)
model.add_subsystem('c2', SpeedComp())
model.add_subsystem('c3', om.ExecComp('f=speed', speed={'units': 'm/s'}, f={'units': 'm/s'}))
model.connect('c1.distance', 'c2.distance')
model.connect('c1.time', 'c2.time')
model.connect('c2.speed', 'c3.speed')
model.add_recorder(self.recorder)
prob.setup()
prob.run_model()
cr = om.CaseReader(self.filename)
system_cases = cr.list_cases('root')
case = cr.get_case(system_cases[0])
# Add one to all the inputs just to change the model
# so we can see if loading the case values really changes the model
for name in model._inputs:
model._inputs[name] += 1.0
for name in model._outputs:
model._outputs[name] += 1.0
# Now load in the case we recorded
prob.load_case(case)
_assert_model_matches_case(case, model)
# make sure it still runs with loaded values
prob.run_model()
# make sure the loaded unit strings are compatible with `convert_units`
from openmdao.utils.units import convert_units
outputs = case.list_outputs(explicit=True, implicit=True, values=True,
units=True, shape=True, out_stream=None)
meta = {}
for name, vals in outputs:
meta[name] = vals
from_units = meta['c2.speed']['units']
to_units = meta['c3.f']['units']
self.assertEqual(from_units, 'km/h')
self.assertEqual(to_units, 'm/s')
self.assertEqual(convert_units(10., from_units, to_units), 10000./3600.)
def test_optimization_load_system_cases(self):
prob = SellarProblem(SellarDerivativesGrouped)
prob.model.add_recorder(self.recorder)
driver = prob.driver = om.ScipyOptimizeDriver()
driver.options['optimizer'] = 'SLSQP'
driver.options['tol'] = 1e-9
driver.options['disp'] = False
driver.recording_options['record_desvars'] = True
driver.recording_options['record_objectives'] = True
driver.recording_options['record_constraints'] = True
prob.setup()
prob.run_driver()
prob.cleanup()
inputs_before = prob.model.list_inputs(values=True, units=True, out_stream=None)
outputs_before = prob.model.list_outputs(values=True, units=True, out_stream=None)
cr = om.CaseReader(self.filename)
# get third case
system_cases = cr.list_cases('root')
third_case = cr.get_case(system_cases[2])
iter_count_before = driver.iter_count
# run the model again with a fresh model
prob = SellarProblem(SellarDerivativesGrouped)
driver = prob.driver = om.ScipyOptimizeDriver()
driver.options['optimizer'] = 'SLSQP'
driver.options['tol'] = 1e-9
driver.options['disp'] = False
prob.setup()
prob.load_case(third_case)
prob.run_driver()
prob.cleanup()
inputs_after = prob.model.list_inputs(values=True, units=True, out_stream=None)
outputs_after = prob.model.list_outputs(values=True, units=True, out_stream=None)
iter_count_after = driver.iter_count
for before, after in zip(inputs_before, inputs_after):
np.testing.assert_almost_equal(before[1]['value'], after[1]['value'])
for before, after in zip(outputs_before, outputs_after):
np.testing.assert_almost_equal(before[1]['value'], after[1]['value'])
# Should take one less iteration since we gave it a head start in the second run
self.assertEqual(iter_count_before, iter_count_after + 1)
def test_load_solver_cases(self):
prob = SellarProblem()
prob.setup()
model = prob.model
model.nonlinear_solver.add_recorder(self.recorder)
fail = prob.run_driver()
prob.cleanup()
self.assertFalse(fail, 'Problem failed to converge')
cr = om.CaseReader(self.filename)
solver_cases = cr.list_cases('root.nonlinear_solver')
case = cr.get_case(solver_cases[0])
# Add one to all the inputs just to change the model
# so we can see if loading the case values really changes the model
for name in prob.model._inputs:
model._inputs[name] += 1.0
for name in prob.model._outputs:
model._outputs[name] += 1.0
# Now load in the case we recorded
prob.load_case(case)
_assert_model_matches_case(case, model)
def test_load_driver_cases(self):
prob = om.Problem()
model = prob.model
model.add_subsystem('p1', om.IndepVarComp('x', 50.0), promotes=['*'])
model.add_subsystem('p2', om.IndepVarComp('y', 50.0), promotes=['*'])
model.add_subsystem('comp', Paraboloid(), promotes=['*'])
model.add_subsystem('con', om.ExecComp('c = x - y'), promotes=['*'])
model.add_design_var('x', lower=-50.0, upper=50.0)
model.add_design_var('y', lower=-50.0, upper=50.0)
model.add_objective('f_xy')
model.add_constraint('c', lower=15.0)
prob.driver.add_recorder(self.recorder)
prob.driver.recording_options['includes'] = ['*']
prob.set_solver_print(0)
prob.setup()
fail = prob.run_driver()
prob.cleanup()
self.assertFalse(fail, 'Problem failed to converge')
cr = om.CaseReader(self.filename)
driver_cases = cr.list_cases('driver')
case = cr.get_case(driver_cases[0])
# Add one to all the inputs just to change the model
# so we can see if loading the case values really changes the model
for name in prob.model._inputs:
prob.model._inputs[name] += 1.0
for name in prob.model._outputs:
prob.model._outputs[name] += 1.0
# Now load in the case we recorded
prob.load_case(case)
_assert_model_matches_case(case, model)
def test_system_options_pickle_fail(self):
# simple paraboloid model
model = om.Group()
ivc = om.IndepVarComp()
ivc.add_output('x', 3.0)
model.add_subsystem('subs', ivc)
subs = model.subs
# declare two options
subs.options.declare('options value 1', 1)
# Given object which can't be pickled
subs.options.declare('options value to fail', (i for i in []))
subs.add_recorder(self.recorder)
prob = om.Problem(model)
prob.setup()
msg = ("Trying to record option 'options value to fail' which cannot be pickled on system "
"IndepVarComp (subs). Set 'recordable' to False. Skipping recording options for "
"this system.")
with assert_warning(UserWarning, msg):
prob.run_model()
prob.cleanup()
cr = om.CaseReader(self.filename)
subs_options = cr.system_options['subs']['component_options']
# no options should have been recorded for d1
self.assertEqual(len(subs_options._dict), 0)
def test_pre_load(self):
prob = SellarProblem()
prob.setup()
recorder = om.SqliteRecorder(self.filename)
prob.add_recorder(recorder)
prob.driver.add_recorder(recorder)
prob.model.add_recorder(recorder)
prob.model.nonlinear_solver.add_recorder(recorder)
prob.run_driver()
prob.record('c_1')
prob.record('c_2')
prob.cleanup()
# without pre_load, we should get format_version and metadata but no cases
cr = om.CaseReader(self.filename, pre_load=False)
num_driver_cases = len(cr.list_cases('driver', recurse=False))
num_system_cases = len(cr.list_cases('root', recurse=False))
num_solver_cases = len(cr.list_cases('root.nonlinear_solver', recurse=False))
num_problem_cases = len(cr.list_cases('problem'))
self.assertEqual(num_driver_cases, 1)
self.assertEqual(num_system_cases, 1)
self.assertEqual(num_solver_cases, 7)
self.assertEqual(num_problem_cases, 2)
self.assertEqual(cr._format_version, format_version)
self.assertEqual(set(cr.system_options.keys()),
set(['root'] + [sys.name for sys in prob.model._subsystems_allprocs]))
self.assertEqual(set(cr.problem_metadata.keys()), {
'tree', 'sys_pathnames_list', 'connections_list', 'variables', 'abs2prom',
'driver', 'design_vars', 'responses', 'declare_partials_list'
})
self.assertEqual(len(cr._driver_cases._cases), 0)
self.assertEqual(len(cr._system_cases._cases), 0)
self.assertEqual(len(cr._solver_cases._cases), 0)
self.assertEqual(len(cr._problem_cases._cases), 0)
# with pre_load, we should get format_version, metadata and all cases
cr = om.CaseReader(self.filename, pre_load=True)
num_driver_cases = len(cr.list_cases('driver', recurse=False))
num_system_cases = len(cr.list_cases('root', recurse=False))
num_solver_cases = len(cr.list_cases('root.nonlinear_solver', recurse=False))
num_problem_cases = len(cr.list_cases('problem'))
self.assertEqual(num_driver_cases, 1)
self.assertEqual(num_system_cases, 1)
self.assertEqual(num_solver_cases, 7)
self.assertEqual(num_problem_cases, 2)
self.assertEqual(cr._format_version, format_version)
self.assertEqual(set(cr.system_options.keys()),
set(['root'] + [sys.name for sys in prob.model._subsystems_allprocs]))
self.assertEqual(set(cr.problem_metadata.keys()), {
'tree', 'sys_pathnames_list', 'connections_list', 'variables', 'abs2prom',
'driver', 'design_vars', 'responses', 'declare_partials_list'
})
self.assertEqual(len(cr._driver_cases._cases), num_driver_cases)
self.assertEqual(len(cr._system_cases._cases), num_system_cases)
self.assertEqual(len(cr._solver_cases._cases), num_solver_cases)
self.assertEqual(len(cr._problem_cases._cases), num_problem_cases)
for case_type in (cr._driver_cases, cr._solver_cases,
cr._system_cases, cr._problem_cases):
for key in case_type.list_cases():
self.assertTrue(key in case_type._cases)
self.assertEqual(key, case_type._cases[key].name)
def test_caching_cases(self):
prob = SellarProblem()
prob.setup()
prob.add_recorder(self.recorder)
prob.driver.add_recorder(self.recorder)
prob.model.add_recorder(self.recorder)
prob.model.nonlinear_solver.add_recorder(self.recorder)
prob.run_driver()
prob.record('c_1')
prob.record('c_2')
prob.cleanup()
cr = om.CaseReader(self.filename, pre_load=False)
self.assertEqual(len(cr._driver_cases._cases), 0)
self.assertEqual(len(cr._system_cases._cases), 0)
self.assertEqual(len(cr._solver_cases._cases), 0)
self.assertEqual(len(cr._problem_cases._cases), 0)
# get cases without caching them
for case_type in (cr._driver_cases, cr._solver_cases,
cr._system_cases, cr._problem_cases):
for key in case_type.list_cases():
case_type.get_case(key)
self.assertEqual(len(cr._driver_cases._cases), 0)
self.assertEqual(len(cr._system_cases._cases), 0)
self.assertEqual(len(cr._solver_cases._cases), 0)
self.assertEqual(len(cr._problem_cases._cases), 0)
# get cases and cache them
for case_type in (cr._driver_cases, cr._solver_cases,
cr._system_cases, cr._problem_cases):
for key in case_type.list_cases():
case_type.get_case(key, cache=True)
# assert that we have now stored each of the cases
self.assertEqual(len(cr._driver_cases._cases), 1)
self.assertEqual(len(cr._system_cases._cases), 1)
self.assertEqual(len(cr._solver_cases._cases), 7)
self.assertEqual(len(cr._problem_cases._cases), 2)
for case_type in (cr._driver_cases, cr._solver_cases,
cr._system_cases, cr._problem_cases):
for key in case_type.list_cases():
self.assertTrue(key in case_type._cases)
self.assertEqual(key, case_type._cases[key].name)
def test_reading_driver_cases_with_indices(self):
# note: size must be an even number
SIZE = 10
prob = om.Problem()
driver = prob.driver = om.ScipyOptimizeDriver(disp=False)
prob.driver.add_recorder(self.recorder)
driver.recording_options['includes'] = ['*']
model = prob.model
indeps = model.add_subsystem('indeps', om.IndepVarComp(), promotes_outputs=['*'])
# the following were randomly generated using np.random.random(10)*2-1 to randomly
# disperse them within a unit circle centered at the origin.
# Also converted this array to > 1D array to test that capability of case recording
x_vals = np.array([
0.55994437, -0.95923447, 0.21798656, -0.02158783, 0.62183717,
0.04007379, 0.46044942, -0.10129622, 0.27720413, -0.37107886
]).reshape((-1, 1))
indeps.add_output('x', x_vals)
indeps.add_output('y', np.array([
0.52577864, 0.30894559, 0.8420792, 0.35039912, -0.67290778,
-0.86236787, -0.97500023, 0.47739414, 0.51174103, 0.10052582
]))
indeps.add_output('r', .7)
model.add_subsystem('circle', om.ExecComp('area = pi * r**2'))
model.add_subsystem('r_con', om.ExecComp('g = x**2 + y**2 - r**2',
g=np.ones(SIZE), x=np.ones(SIZE), y=np.ones(SIZE)))
thetas = np.linspace(0, np.pi/4, SIZE)
model.add_subsystem('theta_con', om.ExecComp('g=arctan(y/x) - theta',
g=np.ones(SIZE), x=np.ones(SIZE),
y=np.ones(SIZE), theta=thetas))
model.add_subsystem('delta_theta_con', om.ExecComp('g = arctan(y/x)[::2]-arctan(y/x)[1::2]',
g=np.ones(SIZE//2), x=np.ones(SIZE),
y=np.ones(SIZE)))
model.add_subsystem('l_conx', om.ExecComp('g=x-1', g=np.ones(SIZE), x=np.ones(SIZE)))
model.connect('r', ('circle.r', 'r_con.r'))
model.connect('x', ['r_con.x', 'theta_con.x', 'delta_theta_con.x'])
model.connect('x', 'l_conx.x')
model.connect('y', ['r_con.y', 'theta_con.y', 'delta_theta_con.y'])
model.add_design_var('x', indices=[0, 3])
model.add_design_var('y')
model.add_design_var('r', lower=.5, upper=10)
# nonlinear constraints
model.add_constraint('r_con.g', equals=0)
IND = np.arange(SIZE, dtype=int)
EVEN_IND = IND[0::2] # all even indices
model.add_constraint('theta_con.g', lower=-1e-5, upper=1e-5, indices=EVEN_IND)
model.add_constraint('delta_theta_con.g', lower=-1e-5, upper=1e-5)
# this constrains x[0] to be 1 (see definition of l_conx)
model.add_constraint('l_conx.g', equals=0, linear=False, indices=[0, ])
# linear constraint
model.add_constraint('y', equals=0, indices=[0], linear=True)
model.add_objective('circle.area', ref=-1)
prob.setup(mode='fwd')
prob.run_driver()
prob.cleanup()
# get the case we recorded
cr = om.CaseReader(self.filename)
case = cr.get_case(0)
# check 'use_indices' option, default is to use indices
dvs = case.get_design_vars()
assert_near_equal(dvs['x'], x_vals[[0, 3]], 1e-12)
dvs = case.get_design_vars(use_indices=False)
assert_near_equal(dvs['x'], x_vals, 1e-12)
cons = case.get_constraints()
self.assertEqual(len(cons['theta_con.g']), len(EVEN_IND))
cons = case.get_constraints(use_indices=False)
self.assertEqual(len(cons['theta_con.g']), SIZE)
# add one to all the inputs just to change the model, so we
# can see if loading the case values really changes the model
for name in prob.model._inputs:
model._inputs[name] += 1.0
for name in prob.model._outputs:
model._outputs[name] += 1.0
# load in the case we recorded and check that the model then matches
prob.load_case(case)
_assert_model_matches_case(case, model)
def test_multidimensional_arrays(self):
prob = om.Problem()
model = prob.model
comp = TestExplCompArray(thickness=1.) # has 2D arrays as inputs and outputs
model.add_subsystem('comp', comp, promotes=['*'])
# just to add a connection, otherwise an exception is thrown in recording viewer data.
# must be a bug
model.add_subsystem('double_area',
om.ExecComp('double_area = 2 * areas',
areas=np.zeros((2, 2)),
double_area=np.zeros((2, 2))),
promotes=['*'])
prob.driver.add_recorder(self.recorder)
prob.driver.recording_options['includes'] = ['*']
prob.setup()
prob.run_driver()
prob.cleanup()
# Add one to all the inputs just to change the model
# so we can see if loading the case values really changes the model
for name in prob.model._inputs:
model._inputs[name] += 1.0
for name in prob.model._outputs:
model._outputs[name] += 1.0
# Now load in the case we recorded
cr = om.CaseReader(self.filename)
driver_cases = cr.list_cases('driver')
case = cr.get_case(driver_cases[0])
prob.load_case(case)
_assert_model_matches_case(case, model)
def test_simple_paraboloid_scaled_desvars(self):
prob = om.Problem()
model = prob.model
model.add_subsystem('p1', om.IndepVarComp('x', 50.0), promotes=['*'])
model.add_subsystem('p2', om.IndepVarComp('y', 50.0), promotes=['*'])
model.add_subsystem('comp', Paraboloid(), promotes=['*'])
model.add_subsystem('con', om.ExecComp('c = x - y'), promotes=['*'])
prob.set_solver_print(level=0)
prob.driver = om.ScipyOptimizeDriver()
prob.driver.options['optimizer'] = 'SLSQP'
prob.driver.options['tol'] = 1e-9
prob.driver.options['disp'] = False
prob.driver.recording_options['record_desvars'] = True
prob.driver.recording_options['record_objectives'] = True
prob.driver.recording_options['record_constraints'] = True
recorder = om.SqliteRecorder("cases.sql")
prob.driver.add_recorder(recorder)
ref = 5.0
ref0 = -5.0
model.add_design_var('x', lower=-50.0, upper=50.0, ref=ref, ref0=ref0)
model.add_design_var('y', lower=-50.0, upper=50.0, ref=ref, ref0=ref0)
model.add_objective('f_xy')
model.add_constraint('c', lower=10.0, upper=11.0)
prob.setup(check=False, mode='fwd')
prob.run_driver()
prob.cleanup()
cr = om.CaseReader("cases.sql")
# Test values from the last case
driver_cases = cr.list_cases('driver')
last_case = cr.get_case(driver_cases[-1])
dvs = last_case.get_design_vars(scaled=False)
unscaled_x = dvs['x'][0]
unscaled_y = dvs['y'][0]
dvs = last_case.get_design_vars(scaled=True)
scaled_x = dvs['x'][0]
scaled_y = dvs['y'][0]
adder, scaler = determine_adder_scaler(ref0, ref, None, None)
self.assertAlmostEqual((unscaled_x + adder) * scaler, scaled_x, places=12)
self.assertAlmostEqual((unscaled_y + adder) * scaler, scaled_y, places=12)
def test_reading_all_case_types(self):
prob = SellarProblem(SellarDerivativesGrouped, nonlinear_solver=om.NonlinearRunOnce)
prob.setup(mode='rev')
driver = prob.driver = om.ScipyOptimizeDriver(disp=False, tol=1e-9)
#
# Add recorders
#
# driver
driver.recording_options['record_desvars'] = True
driver.recording_options['record_objectives'] = True
driver.recording_options['record_constraints'] = True
driver.add_recorder(self.recorder)
# root solver
nl = prob.model.nonlinear_solver
nl.recording_options['record_abs_error'] = True
nl.recording_options['record_rel_error'] = True
nl.recording_options['record_solver_residuals'] = True
nl.add_recorder(self.recorder)
# system
pz = prob.model.obj_cmp
pz.recording_options['record_inputs'] = True
pz.recording_options['record_outputs'] = True
pz.recording_options['record_residuals'] = True
pz.add_recorder(self.recorder)
# mda solver
nl = prob.model.mda.nonlinear_solver = om.NonlinearBlockGS()
nl.recording_options['record_abs_error'] = True
nl.recording_options['record_rel_error'] = True
nl.recording_options['record_solver_residuals'] = True
nl.add_recorder(self.recorder)
# problem
prob.recording_options['includes'] = []
prob.recording_options['record_objectives'] = True
prob.recording_options['record_constraints'] = True
prob.recording_options['record_desvars'] = True
prob.add_recorder(self.recorder)
fail = prob.run_driver()
prob.record('final')
prob.cleanup()
self.assertFalse(fail, 'Problem optimization failed.')
cr = om.CaseReader(self.filename)
#
# check sources
#
self.assertEqual(sorted(cr.list_sources(out_stream=None)), [
'driver', 'problem', 'root.mda.nonlinear_solver', 'root.nonlinear_solver', 'root.obj_cmp'
])
#
# check system cases
#
system_cases = cr.list_cases('root.obj_cmp', recurse=False)
expected_cases = [
'rank0:ScipyOptimize_SLSQP|0|root._solve_nonlinear|0|NLRunOnce|0|obj_cmp._solve_nonlinear|0',
'rank0:ScipyOptimize_SLSQP|1|root._solve_nonlinear|1|NLRunOnce|0|obj_cmp._solve_nonlinear|1',
'rank0:ScipyOptimize_SLSQP|2|root._solve_nonlinear|2|NLRunOnce|0|obj_cmp._solve_nonlinear|2',
'rank0:ScipyOptimize_SLSQP|3|root._solve_nonlinear|3|NLRunOnce|0|obj_cmp._solve_nonlinear|3',
'rank0:ScipyOptimize_SLSQP|4|root._solve_nonlinear|4|NLRunOnce|0|obj_cmp._solve_nonlinear|4',
'rank0:ScipyOptimize_SLSQP|5|root._solve_nonlinear|5|NLRunOnce|0|obj_cmp._solve_nonlinear|5',
'rank0:ScipyOptimize_SLSQP|6|root._solve_nonlinear|6|NLRunOnce|0|obj_cmp._solve_nonlinear|6'
]
self.assertEqual(len(system_cases), len(expected_cases))
for i, coord in enumerate(system_cases):
self.assertEqual(coord, expected_cases[i])
# check inputs, outputs and residuals for last case
case = cr.get_case(system_cases[-1])
self.assertEqual(list(case.inputs.keys()), ['x', 'y1', 'y2', 'z'])
self.assertEqual(case.inputs['y1'], prob['y1'])
self.assertEqual(case.inputs['y2'], prob['y2'])
self.assertEqual(list(case.outputs.keys()), ['obj'])
self.assertEqual(case.outputs['obj'], prob['obj'])
self.assertEqual(list(case.residuals.keys()), ['obj'])
self.assertEqual(case.residuals['obj'][0], 0.)
#
# check solver cases
#
root_solver_cases = cr.list_cases('root.nonlinear_solver', recurse=False)
expected_cases = [
'rank0:ScipyOptimize_SLSQP|0|root._solve_nonlinear|0|NLRunOnce|0',
'rank0:ScipyOptimize_SLSQP|1|root._solve_nonlinear|1|NLRunOnce|0',
'rank0:ScipyOptimize_SLSQP|2|root._solve_nonlinear|2|NLRunOnce|0',
'rank0:ScipyOptimize_SLSQP|3|root._solve_nonlinear|3|NLRunOnce|0',
'rank0:ScipyOptimize_SLSQP|4|root._solve_nonlinear|4|NLRunOnce|0',
'rank0:ScipyOptimize_SLSQP|5|root._solve_nonlinear|5|NLRunOnce|0',
'rank0:ScipyOptimize_SLSQP|6|root._solve_nonlinear|6|NLRunOnce|0'
]
self.assertEqual(len(root_solver_cases), len(expected_cases))
for i, coord in enumerate(root_solver_cases):
self.assertEqual(coord, expected_cases[i])
case = cr.get_case(root_solver_cases[-1])
expected_inputs = ['x', 'y1', 'y2', 'z']
expected_outputs = ['con1', 'con2', 'obj', 'x', 'y1', 'y2', 'z']
# input values must be accessed using absolute path names
expected_inputs_abs = [
'mda.d1.x', 'obj_cmp.x',
'mda.d2.y1', 'obj_cmp.y1', 'con_cmp1.y1',
'mda.d1.y2', 'obj_cmp.y2', 'con_cmp2.y2',
'mda.d1.z', 'mda.d2.z', 'obj_cmp.z'
]
self.assertEqual(sorted(case.inputs.keys()), expected_inputs)
self.assertEqual(sorted(case.outputs.keys()), expected_outputs)
self.assertEqual(sorted(case.residuals.keys()), expected_outputs)
for key in expected_inputs_abs:
np.testing.assert_almost_equal(case.inputs[key], prob[key])
for key in expected_outputs:
np.testing.assert_almost_equal(case.outputs[key], prob[key])
np.testing.assert_almost_equal(case.abs_err, 0, decimal=6)
| np.testing.assert_almost_equal(case.rel_err, 0, decimal=6) | numpy.testing.assert_almost_equal |
import numpy as np
import gudhi as gd
from numpy.lib.stride_tricks import as_strided
import tensorflow as tf
from tensorflow.python.framework import ops
import timeit
def compute_dgm(f, card, hom_dim):
"""
Computes the persistence diagram of an image.
:param f: image
:param card: maximum number of bars kept
:param hom_dim: dimension of homology
:return: persistence diagram, critical pixels
"""
dgm = np.zeros([card, 2], dtype=np.float32)
cof = np.zeros([card, 2], dtype=np.int32)
cc = gd.CubicalComplex(dimensions=f.shape, top_dimensional_cells=f.ravel())
cc.compute_persistence()
# Return zero arrays if no finite bars
num_bars = len(cc.persistence_intervals_in_dimension(hom_dim))
if ((hom_dim == 0) and (num_bars == 1)) or ((hom_dim > 0) and (num_bars == 0)):
return dgm, cof
# These are all the critical pixels
all_cof = cc.cofaces_of_persistence_pairs()[0][hom_dim]
# Generate the persistence diagram
birth_times, death_times = f.flat[all_cof[:, 0]], f.flat[all_cof[:, 1]]
# Return at most param:card bars
min_card = min(len(birth_times), card)
dgm[:min_card, 0], dgm[:min_card, 1] = birth_times[:min_card], death_times[:min_card]
cof[:min_card, :] = all_cof[:min_card, :]
return dgm, cof
def compute_dgm_grad(grad_dgm, cof, f):
"""
Uses grad_dgm to compute birth/death critical pixels
:param grad_dgm: gradient wrt dgm
:param cof: critical pixels
:param f: input image
:return: gradient of births/deaths wrt f
"""
grad_f_births = np.zeros(f.shape, dtype=np.float32)
grad_f_deaths = np.zeros(f.shape, dtype=np.float32)
# Identify which rows correspond to a persistence dot.
is_nonzero = cof.any(axis=1)
if not np.any(is_nonzero):
return grad_f_births, grad_f_deaths
# Filter by relevant rows
cof_nonzero = cof[is_nonzero, :]
grad_dgm_nonzero = grad_dgm[is_nonzero, :]
# Add gradient at appropriate places.
np.add.at(grad_f_births.ravel(), cof_nonzero[:, 0].ravel(), grad_dgm_nonzero[:, 0].ravel())
np.add.at(grad_f_deaths.ravel(), cof_nonzero[:, 1].ravel(), grad_dgm_nonzero[:, 1].ravel())
return grad_f_births, grad_f_deaths
def compute_thresh_dgm(f, card, hom_dim, pers_region=None):
"""
Computes thresholded persistent homology of an image.
:param f: input image
:param card: max cardinality of persistence diagram
:param hom_dim: degree of homology
:param pers_region: np.array([birth_low, birth_high, lifetime_low, lifetime_high])
:return: persistence diagram and associated critical pixels
"""
dgm = np.zeros([card, 2], dtype=np.float32)
cof = np.zeros([card, 2], dtype=np.int32)
cc = gd.CubicalComplex(dimensions=f.shape, top_dimensional_cells=f.ravel())
cc.compute_persistence()
# Return zero arrays if no finite bars
num_bars = len(cc.persistence_intervals_in_dimension(hom_dim))
if ((hom_dim == 0) and (num_bars == 1)) or ((hom_dim > 0) and (num_bars == 0)):
return dgm, cof
# These are all the critical pixels
all_cof = cc.cofaces_of_persistence_pairs()[0][hom_dim]
# Generate the persistence diagram
birth_times, death_times = f.flat[all_cof[:, 0]], f.flat[all_cof[:, 1]]
# Threshold by persistence region if one was provided
if pers_region is not None:
lifetimes = death_times - birth_times
rel_ind = (pers_region[0] < birth_times) & (birth_times < pers_region[1]) & \
(pers_region[2] < lifetimes) & (lifetimes < pers_region[3])
birth_times, death_times, all_cof = birth_times[rel_ind], death_times[rel_ind], all_cof[rel_ind, :]
min_card = min(len(birth_times), card)
dgm[:min_card, 0], dgm[:min_card, 1] = birth_times[:min_card], death_times[:min_card]
cof[:min_card, :] = all_cof[:min_card, :]
return dgm, cof
def compute_spawn_sw(grad_dgm, dgm, f, card,
hom_dim, kernel_size, pool_mode, noise, samples, M,
pers_region=None):
bsm = np.zeros(f.shape, dtype='float32')
dsm = np.zeros(f.shape, dtype='float32')
# Find nonzero rows of dgm
dgm_up_nonzero = dgm.any(axis=1)
if not np.any(dgm_up_nonzero):
return bsm, dsm
dgm_up = dgm[dgm_up_nonzero, :]
grad_dgm_up = grad_dgm[dgm_up_nonzero, :]
# Project nonzero rows of dgm to diagonal
dgm_up_proj = np.column_stack(((dgm_up[:, 0] + dgm_up[:, 1]) / 2, (dgm_up[:, 0] + dgm_up[:, 1]) / 2))
# For each random sample, compute fuzzy sliced-Wasserstein pairing
for t in range(samples):
g = f + np.random.uniform(-noise, noise, size=f.shape)
x_down, switch = spool(g, kernel_size, pool_mode)
# Compute persistence diagram and critical pixels.
dgm_down, cof_down = compute_thresh_dgm(x_down, card, hom_dim, pers_region)
bsm_down, dsm_down = np.zeros(x_down.shape), np.zeros(x_down.shape) # Initialize low-res smears.
# Get nonzero rows of dgm_down
dgm_down_nonzero = dgm_down.any(axis=1)
if not np.any(dgm_down_nonzero): # Skip iteration if downsampled image has no persistent homology.
continue
dgm_down = dgm_down[dgm_down_nonzero, :]
cof_down = cof_down[dgm_down_nonzero, :]
# Project nonzero rows of downsampled dgm onto diagonal
dgm_down_proj = np.column_stack(((dgm_down[:, 0] + dgm_down[:, 1]) / 2, (dgm_down[:, 0] + dgm_down[:, 1]) / 2))
theta = -np.pi / 2
for i in range(M):
theta_vec = np.array([np.cos(theta), np.sin(theta)])
# Symmetrize the pair dgm_up and dgm_down
V1 = np.concatenate([np.dot(dgm_up, theta_vec), np.dot(dgm_down_proj, theta_vec)])
V2 = np.concatenate([np.dot(dgm_down, theta_vec), np.dot(dgm_up_proj, theta_vec)])
V1_sort = V1.argsort()
V2_sort = V2.argsort()
for j in range(len(V1)):
dot1 = V1_sort[j]
dot2 = V2_sort[j]
# Check if pair happened between non-diagonal points
if (dot1 < dgm_up.shape[0]) and (dot2 < dgm_down.shape[0]):
bsm_down.ravel()[cof_down[dot2, 0]] += (grad_dgm_up[dot1, 0] / M)
dsm_down.ravel()[cof_down[dot2, 1]] += (grad_dgm_up[dot1, 1] / M)
theta += np.pi / M
bsm += unspool(bsm_down, kernel_size, switch)
dsm += unspool(dsm_down, kernel_size, switch)
bsm, dsm = bsm / samples, dsm / samples
return bsm, dsm
def robustness_test(f, eps, n, pers_region, p, hom_dim):
num_eps = len(eps)
pers_avgs = np.zeros(num_eps)
pers_mins = np.zeros(num_eps)
pers_maxs = np.zeros(num_eps)
for t in range(num_eps):
S = np.zeros(n)
for i in range(n):
g = f + np.random.uniform(low=-eps[t], high=eps[t], size=np.shape(f))
g = np.clip(g, 0, 255)
dgm = compute_dgm(g, 10000, hom_dim)[0]
lifetimes = dgm[:, 1] - dgm[:, 0]
idx = (pers_region[0] < dgm[:, 0]) & (dgm[:, 0] < pers_region[1]) & \
(pers_region[2] < lifetimes) & (lifetimes < pers_region[3])
S[i] = np.linalg.norm(lifetimes[idx], p)
pers_avgs[t] = np.average(S)
pers_mins[t] = np.min(S)
pers_maxs[t] = | np.max(S) | numpy.max |
# -*- coding: utf-8 -*-
# Copyright 1996-2015 PSERC. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
# Copyright (c) 2016-2022 by University of Kassel and Fraunhofer Institute for Energy Economics
# and Energy System Technology (IEE), Kassel. All rights reserved.
"""Builds the bus admittance matrix and branch admittance matrices.
"""
from numpy import ones, conj, nonzero, any, exp, pi, hstack, real
from scipy.sparse import csr_matrix
from pandapower.pypower.idx_brch import F_BUS, T_BUS, BR_R, BR_X, BR_B, BR_STATUS, SHIFT, TAP, BR_R_ASYM, BR_X_ASYM
from pandapower.pypower.idx_bus import GS, BS
def makeYbus(baseMVA, bus, branch):
"""Builds the bus admittance matrix and branch admittance matrices.
Returns the full bus admittance matrix (i.e. for all buses) and the
matrices C{Yf} and C{Yt} which, when multiplied by a complex voltage
vector, yield the vector currents injected into each line from the
"from" and "to" buses respectively of each line. Does appropriate
conversions to p.u.
@see: L{makeSbus}
@author: <NAME> (PSERC Cornell)
@author: <NAME>
"""
## constants
nb = bus.shape[0] ## number of buses
nl = branch.shape[0] ## number of lines
## for each branch, compute the elements of the branch admittance matrix where
##
## | If | | Yff Yft | | Vf |
## | | = | | * | |
## | It | | Ytf Ytt | | Vt |
##
Ytt, Yff, Yft, Ytf = branch_vectors(branch, nl)
## compute shunt admittance
## if Psh is the real power consumed by the shunt at V = 1.0 p.u.
## and Qsh is the reactive power injected by the shunt at V = 1.0 p.u.
## then Psh - j Qsh = V * conj(Ysh * V) = conj(Ysh) = Gs - j Bs,
## i.e. Ysh = Psh + j Qsh, so ...
## vector of shunt admittances
Ysh = (bus[:, GS] + 1j * bus[:, BS]) / baseMVA
## build connection matrices
f = real(branch[:, F_BUS]).astype(int) ## list of "from" buses
t = real(branch[:, T_BUS]).astype(int) ## list of "to" buses
## connection matrix for line & from buses
Cf = csr_matrix((ones(nl), (range(nl), f)), (nl, nb))
## connection matrix for line & to buses
Ct = csr_matrix((ones(nl), (range(nl), t)), (nl, nb))
## build Yf and Yt such that Yf * V is the vector of complex branch currents injected
## at each branch's "from" bus, and Yt is the same for the "to" bus end
i = hstack([range(nl), range(nl)]) ## double set of row indices
Yf = csr_matrix((hstack([Yff, Yft]), (i, hstack([f, t]))), (nl, nb))
Yt = csr_matrix((hstack([Ytf, Ytt]), (i, hstack([f, t]))), (nl, nb))
# Yf = spdiags(Yff, 0, nl, nl) * Cf + spdiags(Yft, 0, nl, nl) * Ct
# Yt = spdiags(Ytf, 0, nl, nl) * Cf + spdiags(Ytt, 0, nl, nl) * Ct
## build Ybus
Ybus = Cf.T * Yf + Ct.T * Yt + \
csr_matrix((Ysh, (range(nb), range(nb))), (nb, nb))
Ybus.sort_indices()
Ybus.eliminate_zeros()
return Ybus, Yf, Yt
def branch_vectors(branch, nl):
stat = branch[:, BR_STATUS] ## ones at in-service branches
Ysf = stat / (branch[:, BR_R] + 1j * branch[:, BR_X]) ## series admittance
if any(branch[:, BR_R_ASYM]) or any(branch[:, BR_X_ASYM]):
Yst = stat / ((branch[:, BR_R] + branch[:, BR_R_ASYM]) + 1j * (
branch[:, BR_X] + branch[:, BR_X_ASYM])) ## series admittance
else:
Yst = Ysf
Bc = stat * branch[:, BR_B] ## line charging susceptance
tap = ones(nl) ## default tap ratio = 1
i = nonzero( | real(branch[:, TAP]) | numpy.real |
from itertools import product
import numpy as np
from numpy.linalg import norm
from numpy.testing import (assert_, assert_allclose,
assert_equal, suppress_warnings)
from pytest import raises as assert_raises
from scipy.sparse import issparse, lil_matrix
from scipy.sparse.linalg import aslinearoperator
from scipy.optimize import least_squares
from scipy.optimize._lsq.least_squares import IMPLEMENTED_LOSSES
from scipy.optimize._lsq.common import EPS, make_strictly_feasible
def fun_trivial(x, a=0):
return (x - a)**2 + 5.0
def jac_trivial(x, a=0.0):
return 2 * (x - a)
def fun_2d_trivial(x):
return np.array([x[0], x[1]])
def jac_2d_trivial(x):
return np.identity(2)
def fun_rosenbrock(x):
return np.array([10 * (x[1] - x[0]**2), (1 - x[0])])
def jac_rosenbrock(x):
return np.array([
[-20 * x[0], 10],
[-1, 0]
])
def jac_rosenbrock_bad_dim(x):
return np.array([
[-20 * x[0], 10],
[-1, 0],
[0.0, 0.0]
])
def fun_rosenbrock_cropped(x):
return fun_rosenbrock(x)[0]
def jac_rosenbrock_cropped(x):
return jac_rosenbrock(x)[0]
# When x is 1-D array, return is 2-D array.
def fun_wrong_dimensions(x):
return np.array([x, x**2, x**3])
def jac_wrong_dimensions(x, a=0.0):
return np.atleast_3d(jac_trivial(x, a=a))
def fun_bvp(x):
n = int(np.sqrt(x.shape[0]))
u = np.zeros((n + 2, n + 2))
x = x.reshape((n, n))
u[1:-1, 1:-1] = x
y = u[:-2, 1:-1] + u[2:, 1:-1] + u[1:-1, :-2] + u[1:-1, 2:] - 4 * x + x**3
return y.ravel()
class BroydenTridiagonal:
def __init__(self, n=100, mode='sparse'):
np.random.seed(0)
self.n = n
self.x0 = -np.ones(n)
self.lb = np.linspace(-2, -1.5, n)
self.ub = np.linspace(-0.8, 0.0, n)
self.lb += 0.1 * np.random.randn(n)
self.ub += 0.1 * np.random.randn(n)
self.x0 += 0.1 * np.random.randn(n)
self.x0 = make_strictly_feasible(self.x0, self.lb, self.ub)
if mode == 'sparse':
self.sparsity = lil_matrix((n, n), dtype=int)
i = np.arange(n)
self.sparsity[i, i] = 1
i = np.arange(1, n)
self.sparsity[i, i - 1] = 1
i = np.arange(n - 1)
self.sparsity[i, i + 1] = 1
self.jac = self._jac
elif mode == 'operator':
self.jac = lambda x: aslinearoperator(self._jac(x))
elif mode == 'dense':
self.sparsity = None
self.jac = lambda x: self._jac(x).toarray()
else:
assert_(False)
def fun(self, x):
f = (3 - x) * x + 1
f[1:] -= x[:-1]
f[:-1] -= 2 * x[1:]
return f
def _jac(self, x):
J = lil_matrix((self.n, self.n))
i = np.arange(self.n)
J[i, i] = 3 - 2 * x
i = np.arange(1, self.n)
J[i, i - 1] = -1
i = np.arange(self.n - 1)
J[i, i + 1] = -2
return J
class ExponentialFittingProblem:
"""Provide data and function for exponential fitting in the form
y = a + exp(b * x) + noise."""
def __init__(self, a, b, noise, n_outliers=1, x_range=(-1, 1),
n_points=11, random_seed=None):
np.random.seed(random_seed)
self.m = n_points
self.n = 2
self.p0 = np.zeros(2)
self.x = np.linspace(x_range[0], x_range[1], n_points)
self.y = a + np.exp(b * self.x)
self.y += noise * np.random.randn(self.m)
outliers = np.random.randint(0, self.m, n_outliers)
self.y[outliers] += 50 * noise * np.random.rand(n_outliers)
self.p_opt = np.array([a, b])
def fun(self, p):
return p[0] + np.exp(p[1] * self.x) - self.y
def jac(self, p):
J = np.empty((self.m, self.n))
J[:, 0] = 1
J[:, 1] = self.x * np.exp(p[1] * self.x)
return J
def cubic_soft_l1(z):
rho = np.empty((3, z.size))
t = 1 + z
rho[0] = 3 * (t**(1/3) - 1)
rho[1] = t ** (-2/3)
rho[2] = -2/3 * t**(-5/3)
return rho
LOSSES = list(IMPLEMENTED_LOSSES.keys()) + [cubic_soft_l1]
class BaseMixin:
def test_basic(self):
# Test that the basic calling sequence works.
res = least_squares(fun_trivial, 2., method=self.method)
assert_allclose(res.x, 0, atol=1e-4)
assert_allclose(res.fun, fun_trivial(res.x))
def test_args_kwargs(self):
# Test that args and kwargs are passed correctly to the functions.
a = 3.0
for jac in ['2-point', '3-point', 'cs', jac_trivial]:
with suppress_warnings() as sup:
sup.filter(UserWarning,
"jac='(3-point|cs)' works equivalently to '2-point' for method='lm'")
res = least_squares(fun_trivial, 2.0, jac, args=(a,),
method=self.method)
res1 = least_squares(fun_trivial, 2.0, jac, kwargs={'a': a},
method=self.method)
assert_allclose(res.x, a, rtol=1e-4)
assert_allclose(res1.x, a, rtol=1e-4)
assert_raises(TypeError, least_squares, fun_trivial, 2.0,
args=(3, 4,), method=self.method)
assert_raises(TypeError, least_squares, fun_trivial, 2.0,
kwargs={'kaboom': 3}, method=self.method)
def test_jac_options(self):
for jac in ['2-point', '3-point', 'cs', jac_trivial]:
with suppress_warnings() as sup:
sup.filter(UserWarning,
"jac='(3-point|cs)' works equivalently to '2-point' for method='lm'")
res = least_squares(fun_trivial, 2.0, jac, method=self.method)
assert_allclose(res.x, 0, atol=1e-4)
assert_raises(ValueError, least_squares, fun_trivial, 2.0, jac='oops',
method=self.method)
def test_nfev_options(self):
for max_nfev in [None, 20]:
res = least_squares(fun_trivial, 2.0, max_nfev=max_nfev,
method=self.method)
assert_allclose(res.x, 0, atol=1e-4)
def test_x_scale_options(self):
for x_scale in [1.0, np.array([0.5]), 'jac']:
res = least_squares(fun_trivial, 2.0, x_scale=x_scale)
assert_allclose(res.x, 0)
assert_raises(ValueError, least_squares, fun_trivial,
2.0, x_scale='auto', method=self.method)
assert_raises(ValueError, least_squares, fun_trivial,
2.0, x_scale=-1.0, method=self.method)
assert_raises(ValueError, least_squares, fun_trivial,
2.0, x_scale=None, method=self.method)
assert_raises(ValueError, least_squares, fun_trivial,
2.0, x_scale=1.0+2.0j, method=self.method)
def test_diff_step(self):
# res1 and res2 should be equivalent.
# res2 and res3 should be different.
res1 = least_squares(fun_trivial, 2.0, diff_step=1e-1,
method=self.method)
res2 = least_squares(fun_trivial, 2.0, diff_step=-1e-1,
method=self.method)
res3 = least_squares(fun_trivial, 2.0,
diff_step=None, method=self.method)
assert_allclose(res1.x, 0, atol=1e-4)
assert_allclose(res2.x, 0, atol=1e-4)
assert_allclose(res3.x, 0, atol=1e-4)
assert_equal(res1.x, res2.x)
assert_equal(res1.nfev, res2.nfev)
def test_incorrect_options_usage(self):
assert_raises(TypeError, least_squares, fun_trivial, 2.0,
method=self.method, options={'no_such_option': 100})
assert_raises(TypeError, least_squares, fun_trivial, 2.0,
method=self.method, options={'max_nfev': 100})
def test_full_result(self):
# MINPACK doesn't work very well with factor=100 on this problem,
# thus using low 'atol'.
res = least_squares(fun_trivial, 2.0, method=self.method)
assert_allclose(res.x, 0, atol=1e-4)
assert_allclose(res.cost, 12.5)
assert_allclose(res.fun, 5)
assert_allclose(res.jac, 0, atol=1e-4)
assert_allclose(res.grad, 0, atol=1e-2)
assert_allclose(res.optimality, 0, atol=1e-2)
assert_equal(res.active_mask, 0)
if self.method == 'lm':
assert_(res.nfev < 30)
assert_(res.njev is None)
else:
assert_(res.nfev < 10)
assert_(res.njev < 10)
assert_(res.status > 0)
assert_(res.success)
def test_full_result_single_fev(self):
# MINPACK checks the number of nfev after the iteration,
# so it's hard to tell what he is going to compute.
if self.method == 'lm':
return
res = least_squares(fun_trivial, 2.0, method=self.method,
max_nfev=1)
assert_equal(res.x, np.array([2]))
assert_equal(res.cost, 40.5)
assert_equal(res.fun, np.array([9]))
assert_equal(res.jac, np.array([[4]]))
assert_equal(res.grad, np.array([36]))
assert_equal(res.optimality, 36)
assert_equal(res.active_mask, np.array([0]))
assert_equal(res.nfev, 1)
assert_equal(res.njev, 1)
assert_equal(res.status, 0)
assert_equal(res.success, 0)
def test_rosenbrock(self):
x0 = [-2, 1]
x_opt = [1, 1]
for jac, x_scale, tr_solver in product(
['2-point', '3-point', 'cs', jac_rosenbrock],
[1.0, np.array([1.0, 0.2]), 'jac'],
['exact', 'lsmr']):
with suppress_warnings() as sup:
sup.filter(UserWarning,
"jac='(3-point|cs)' works equivalently to '2-point' for method='lm'")
res = least_squares(fun_rosenbrock, x0, jac, x_scale=x_scale,
tr_solver=tr_solver, method=self.method)
assert_allclose(res.x, x_opt)
def test_rosenbrock_cropped(self):
x0 = [-2, 1]
if self.method == 'lm':
assert_raises(ValueError, least_squares, fun_rosenbrock_cropped,
x0, method='lm')
else:
for jac, x_scale, tr_solver in product(
['2-point', '3-point', 'cs', jac_rosenbrock_cropped],
[1.0, np.array([1.0, 0.2]), 'jac'],
['exact', 'lsmr']):
res = least_squares(
fun_rosenbrock_cropped, x0, jac, x_scale=x_scale,
tr_solver=tr_solver, method=self.method)
assert_allclose(res.cost, 0, atol=1e-14)
def test_fun_wrong_dimensions(self):
assert_raises(ValueError, least_squares, fun_wrong_dimensions,
2.0, method=self.method)
def test_jac_wrong_dimensions(self):
assert_raises(ValueError, least_squares, fun_trivial,
2.0, jac_wrong_dimensions, method=self.method)
def test_fun_and_jac_inconsistent_dimensions(self):
x0 = [1, 2]
assert_raises(ValueError, least_squares, fun_rosenbrock, x0,
jac_rosenbrock_bad_dim, method=self.method)
def test_x0_multidimensional(self):
x0 = np.ones(4).reshape(2, 2)
assert_raises(ValueError, least_squares, fun_trivial, x0,
method=self.method)
def test_x0_complex_scalar(self):
x0 = 2.0 + 0.0*1j
assert_raises(ValueError, least_squares, fun_trivial, x0,
method=self.method)
def test_x0_complex_array(self):
x0 = [1.0, 2.0 + 0.0*1j]
assert_raises(ValueError, least_squares, fun_trivial, x0,
method=self.method)
def test_bvp(self):
# This test was introduced with fix #5556. It turned out that
# dogbox solver had a bug with trust-region radius update, which
# could block its progress and create an infinite loop. And this
# discrete boundary value problem is the one which triggers it.
n = 10
x0 = | np.ones(n**2) | numpy.ones |
# coding=utf-8
# Copyright 2021 The Trax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""RL environments created from supervised data-sets."""
import gym
import numpy as np
class SequenceDataEnv(object):
"""RL environment created from a generator of sequential data.
This class allows to create RL environments from supervised sequential data,
such as tokenized natural languague processing tasks. The data comes as:
(input1, output1, input2, output2, ...)
where inputs and outputs are all sequences of integers.
For example, with input (2, 3) and output (4, 5), so data = [(2, 3), (4, 5)],
the sequence of (observations, rewards, actions) will look like:
2 = env.reset() # first observation
3, 0.0, _, _ = env.step(ignored_action)
eos, 0.0, _, _ = env.step(ignored_action)
act1, 0.0, _, _ = env.step(act1) # observation = action
act2, 0.0, _, _ = env.step(act2) # observation = action
eos, score, _, _ = env.step(eos)
where score = metric((4, 5), (act1, act2)) is the reward gotten from
comparing the two actions to the actual output from the data.
The environment first presents the input as observations, doing this
sequentially, token-by-token, and ignoring all actions taken by the policy.
Then, the policy is asked to generate the response, again, token-by-token,
until it generates EOS. Generated tokens are repeated as observations.
When EOS is encountered, a metric is computed between the generated
output and the output from data, and this metric is returned as reward.
"""
def __init__(self, data_stream, vocab_size, metric=None,
eos_id=1, max_length=1000):
"""The constructor.
Args:
data_stream: A python generator creating lists or tuples of
sequences (list, tuples or numpy arrays) of integers.
vocab_size: Integer, the size of the vocabulary. All integers in the
data stream must be positive and smaller than this value.
metric: A function taking two lists of integers and returning a float.
If None, we use per-token accuracy as the default metric.
eos_id: Integer, the id of the EOS symbol.
max_length: Integer, maximum length of the policy reply to avoid
infinite episodes if policy never produces EOS.
Returns:
A new environment which presents the data and compares the policy
response with the expected data, returning metric as reward.
"""
self._data = data_stream
self._vocab_size = vocab_size
self._eos = eos_id
self._max_length = max_length
self._metric = _accuracy if metric is None else metric
self.reset()
@property
def _on_input(self):
"""Return True if we're currently processing input, False if output."""
cur_sequence_id, _ = self._cur_position
return cur_sequence_id % 2 == 0
@property
def observation(self):
cur_sequence_id, cur_token_id = self._cur_position
if cur_sequence_id >= len(self._cur_sequence):
obs = self._eos
elif self._on_input:
obs = self._cur_sequence[cur_sequence_id][cur_token_id]
else:
obs = self._response[-1] if self._response else self._eos
return np.array(int(obs), dtype=np.int32)
@property
def action_space(self):
return gym.spaces.Discrete(self._vocab_size)
@property
def observation_space(self):
return gym.spaces.Discrete(self._vocab_size)
def reset(self):
"""Reset this environment."""
self._cur_sequence = next(self._data)
# Position contains 2 indices: which sequnece are we in? (input1, output1,
# input2, output2 and so on) and which token in the sequence are we in?
self._cur_position = (0, 0)
self._response = []
return self.observation
def step(self, action):
"""Single step of the environment when policy took `action`."""
cur_sequence_id, cur_token_id = self._cur_position
if cur_sequence_id >= len(self._cur_sequence):
return np.array(self._eos, dtype=np.int32), 0.0, True, None
# Emit the control mask on the output.
control_mask = int(not self._on_input)
if self._on_input:
self._response = []
if cur_token_id + 1 < len(self._cur_sequence[cur_sequence_id]):
self._cur_position = (cur_sequence_id, cur_token_id + 1)
done = False
else:
self._cur_position = (cur_sequence_id + 1, 0)
done = cur_sequence_id + 1 >= len(self._cur_sequence)
reward = 0.0
discount_mask = 0
else:
self._response.append(action)
if action == self._eos or len(self._response) > self._max_length:
self._cur_position = (cur_sequence_id + 1, 0)
reward = self._metric(
self._response[:-1], self._cur_sequence[cur_sequence_id])
done = cur_sequence_id + 1 >= len(self._cur_sequence)
# Emit the discount mask on the last token of each action.
discount_mask = 1
else:
reward = 0.0
done = False
discount_mask = 0
info = {'control_mask': control_mask, 'discount_mask': discount_mask}
return self.observation, reward, done, info
def copy_stream(length, low=2, high=15, n=1):
"""Generate `n` random sequences of length `length` and yield with copies."""
while True:
res = []
for _ in range(n):
seq = np.random.randint(low, high, size=(length,), dtype=np.int32)
res.extend([seq, seq])
yield res
def _accuracy(seq1, seq2):
"""Token-level accuracy."""
seq1, seq2 = | np.array(seq1) | numpy.array |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import math
from glmatrix import *
import numpy as np
print("#########################################")
#np.set_printoptions(precision=3)
np.set_printoptions(formatter={'float': '{: 8.3f}'.format})
#np.set_printoptions(suppress=True)
location_v = vec3_create([5.0, 6.0, 7.0])
location_m = gl_mat4_from_translation(location_v)
print("Location Matrix")
print("")
#print(location_m)
transform_array = np.array(location_m, np.float32)
print(transform_array)
print("")
print("#########################################")
deg = -10
rad = (deg * math.pi / 180)
q_rot = gl_quat_from_x_rotation(rad)
rotation_m = mat4_create(None)
gl_mat4_from_quat(q_rot, rotation_m)
print("Rotation Matrix - X")
print("")
transform_array = np.array(q_rot, np.float32)
print(transform_array)
transform_array = np.array(rotation_m, np.float32)
print(transform_array)
print("")
print("#########################################")
deg = -10
rad = (deg * math.pi / 180)
q_rot = gl_quat_from_y_rotation(rad)
rotation_m = mat4_create(None)
gl_mat4_from_quat(q_rot, rotation_m)
print("Rotation Matrix - Y")
print("")
transform_array = np.array(q_rot, np.float32)
print(transform_array)
transform_array = np.array(rotation_m, np.float32)
print(transform_array)
print("")
print("#########################################")
deg = -10
rad = (deg * math.pi / 180)
q_rot = gl_quat_from_z_rotation(rad)
rotation_m = mat4_create(None)
gl_mat4_from_quat(q_rot, rotation_m)
print("Rotation Matrix - Z")
print("")
transform_array = np.array(q_rot, np.float32)
print(transform_array)
transform_array = np.array(rotation_m, np.float32)
print(transform_array)
print("")
print("#########################################")
displacement_v = vec3_create([10.0, 0.0, 0])
displacement_m = gl_mat4_from_translation(displacement_v)
print("Translate Matrix")
print("")
#print(displacement_m)
transform_array = np.array(displacement_m, np.float32)
print(transform_array)
print("")
print("#########################################")
print("Translate and Rotate")
ms = matstack()
print("")
print("")
ms.loadMatrix(location_m)
mvMatrix_tmp = mat4_create(None)
ms.multMatrix(displacement_m)
ms.getMatrix(mvMatrix_tmp)
transform_array = np.array(mvMatrix_tmp, np.float32)
print(transform_array)
print("")
mvMatrix = mat4_create(None)
ms.multMatrix(rotation_m)
ms.getMatrix(mvMatrix)
transform_array = np.array(mvMatrix, np.float32)
print(transform_array)
print("")
print("#########################################")
print("Rotate and Translate")
ms = matstack()
print("")
transform_array = np.array(rotation_m, np.float32)
print(transform_array)
print("")
transform_array = np.array(location_m, np.float32)
print(transform_array)
print("")
ms.loadMatrix(location_m)
mvMatrix_tmp = mat4_create(None)
ms.multMatrix(rotation_m)
ms.getMatrix(mvMatrix_tmp)
transform_array = np.array(mvMatrix_tmp, np.float32)
print(transform_array)
print("")
mvMatrix = mat4_create(None)
ms.multMatrix(displacement_m)
ms.getMatrix(mvMatrix)
transform_array = np.array(mvMatrix, np.float32)
print(transform_array)
print("")
print("#########################################")
print("#########################################")
print("Push / Pop version")
print("")
ms = matstack()
print("Initialise")
ms.loadMatrix(location_m)
mvMatrix = mat4_create(None)
ms.getMatrix(mvMatrix)
transform_array = np.array(mvMatrix, np.float32)
print(transform_array)
print("Push")
mvMatrix = mat4_create(None)
ms.pushMatrix()
ms.getMatrix(mvMatrix)
transform_array = np.array(mvMatrix, np.float32)
print(transform_array)
print("Translate")
mvMatrix = mat4_create(None)
ms.multMatrix(displacement_m)
ms.getMatrix(mvMatrix)
transform_array = np.array(mvMatrix, np.float32)
print(transform_array)
print("Pop")
mvMatrix = mat4_create(None)
ms.popMatrix()
ms.getMatrix(mvMatrix)
transform_array = np.array(mvMatrix, np.float32)
print(transform_array)
print("#########################################")
print("Push")
mvMatrix = mat4_create(None)
ms.pushMatrix()
ms.getMatrix(mvMatrix)
transform_array = np.array(mvMatrix, np.float32)
print(transform_array)
print("Translate")
mvMatrix = mat4_create(None)
ms.multMatrix(displacement_m)
ms.getMatrix(mvMatrix)
transform_array = | np.array(mvMatrix, np.float32) | numpy.array |
import numpy as np
from scipy.ndimage.interpolation import map_coordinates
import lentil
def circle(shape, radius, shift=(0, 0)):
"""Compute a circle with anti-aliasing.
Parameters
----------
shape : array_like
Size of output in pixels (nrows, ncols)
radius : float
Radius of circle in pixels
shift : (2,) array_like, optional
How far to shift center in float (rows, cols). Default is (0, 0).
Returns
-------
circle : ndarray
"""
rr, cc = lentil.helper.mesh(shape)
r = np.sqrt(np.square(rr - shift[0]) + np.square(cc - shift[1]))
return np.clip(radius + 0.5 - r, 0.0, 1.0)
def circlemask(shape, radius, shift=(0, 0)):
"""Compute a circular mask.
Parameters
----------
shape : array_like
Size of output in pixels (nrows, ncols)
radius : float
Radius of circle in pixels
shift : array_like, optional
How far to shift center in float (rows, cols). Default is (0, 0).
Returns
-------
mask : ndarray
"""
mask = lentil.circle(shape, radius, shift)
mask[mask > 0] = 1
return mask
def hexagon(shape, radius, rotate=False):
"""Compute a hexagon mask.
Parameters
----------
shape : array_like
Size of output in pixels (nrows, ncols)
radius : int
Radius of outscribing circle (which also equals the side length) in
pixels.
rotate : bool
Rotate mask so that flat sides are aligned with the Y direction instead
of the default orientation which is aligned with the X direction.
Returns
-------
mask : ndarray
"""
inner_radius = radius * np.sqrt(3)/2
side_length = radius/2
rr, cc = lentil.helper.mesh(shape)
rect = np.where(( | np.abs(cc) | numpy.abs |
import numpy as np
class GoGame:
def __init__(self, game_size=19, handicap=None, rule='weiqi'):
# assert game_size in [9, 13, 19]
assert rule in ['weiqi', 'wuzi']
self.end_game = False
self.game_size = game_size
# game_board represent the current state of the game, 1 represent black stone, 2 represent white stone
self.game_board = np.zeros(shape=(game_size, game_size), dtype=np.int8)
# boardered is game_board with boarders, boarders are '3's. It has a new coordinate system which (x, y) = (x + 1, y + 1)
self.boardered = None
# map storing information of whether a stone have qi or not
self.qi_map = np.zeros(shape=(game_size, game_size), dtype=np.int8)
# group_map show which group each stone belows to
self.group_map = np.zeros(shape=(game_size, game_size), dtype=np.int8)
self.group_indexes = []
# da_jie is a boolean whether there is a jie
self.da_jie = False
self.game_history = []
self.player = 1 # black goes
self.rule = rule
if handicap:
self.handicap = []
self.player = 2 # white goes first when handicap
def add_boarder(self):
# this method give boader to the game_board matrix, which will generate the boadered map of all stones
vertical = np.ones(shape=(1, self.game_size), dtype=np.int8).T
self.boardered = | np.column_stack((3 * vertical, self.game_board, 3 * vertical)) | numpy.column_stack |
import numpy as np
import sys, os, re, math
from numpy import linalg as LA
gamma = 1.4
gammaMinusOneInv = 1./(gamma-1.)
def sol(xin,yin,zin, t):
return 1. + 0.2*np.sin(np.pi*(xin+yin+zin-t*3.))
def computePressure(rho, u, v, w, E):
return (gamma - 1.) * (E - rho*(u**2+v**2+w**2)*0.5)
def extractN(ns):
reg = re.compile(r''+ns+'.+')
file1 = open('info.dat', 'r')
strings = re.search(reg, file1.read())
file1.close()
assert(strings)
return int(strings.group().split()[1])
if __name__== "__main__":
st = int(sys.argv[1])
nx = extractN('nx')
ny = extractN('ny')
nz = extractN('nz')
numCells = nx*ny*nz
fomTotDofs = numCells*5
D = np.fromfile("solution.bin")
nt = int( | np.size(D) | numpy.size |
from copy import deepcopy
from skimage import io
import hyperspy.api as hs
import numpy as np
from skimage.feature import match_template
def loadData(folderPath, fileName):
imgs = [];
if fileName.lower().endswith(('.png', '.jpg', '.jpeg','.tif')):
if len(io.imread(folderPath + fileName).shape)>2:
b= np.float64(io.imread(folderPath + fileName))
for j in range(b.shape[0]):
imgs.append(b[j,:,:])
else:
imgs.append(np.float64(io.imread(folderPath + fileName)))
elif fileName.lower().endswith(('.dm3','.emd')):
dm3Data=hs.load(folderPath + fileName)
if fileName.lower().endswith(('.emd')):
data = []
for channel in dm3Data:
data.append(np.float64(channel.data))
data = np.stack(data, axis=2)
imgs.append(data)
else:
imgs.append(np.float64(dm3Data.data))
if len(imgs[0].shape)>2:
imgs = list(imgs[0])
else:
print('ERROR: filetyp not supported! Please contact me.')
return imgs
def generateTemplates(startPosList, imgs, radius):
templates=[]
# templates are only taken from the first image of the imges list (since the images are similar)
for startPos in startPosList:
templates.append(deepcopy(imgs[0][startPos[0]:startPos[0]+np.int(2*radius),
startPos[1]:startPos[1]+np.int(2*radius)]))
return templates
def findDissimilarTemplates(templates, imgs, radius, minTemplateClasses):
minresults=[]
# best result is at idx 0
best=0
while len(templates)<minTemplateClasses:
i=0
for img in imgs:
if not len(minresults)==len(imgs):
minresult=[]
for template in templates:
result = match_template(img, template)
resultshape=result.shape
if len(minresult)==0:
minresult=np.abs(result)
else:
minresult=np.maximum(minresult,np.abs(result))
minresults.append(minresult)
idx = (minresults[i].flatten()).argsort()
idxd= | np.unravel_index(idx,resultshape) | numpy.unravel_index |
import numpy as np
def sigma_rutgers_mat(H,KB,sc_w,Cs_w,S_RUTGERS,rutgers):
VSTRETCHING = rutgers['Vstreching']
rtheta_s = rutgers['rtheta_s']
rtheta_b = rutgers['rtheta_b']
TCLINE = rutgers['Tcline']
KBm1 = KB-1
hc = TCLINE
z_w=np.ones((S_RUTGERS.shape[1],KB))*0.
z_w[:,0]=-1.
Zt_avg1 = 0.
for k in range(1,KBm1):
cff_w = hc*sc_w[k]
cff1_w = Cs_w[k]
hinv=1./(hc+H)
z_w[:,k]=(cff_w+cff1_w*H)*hinv
S_RUTGERS=np.fliplr(z_w).T
return S_RUTGERS
def sigma_rutgers_vec(h,KB,sc_w,Cs_w,V_RUTGERS,rutgers):
VSTRETCHING = rutgers['Vstreching']
rtheta_s = rutgers['rtheta_s']
rtheta_b = rutgers['rtheta_b']
TCLINE = rutgers['Tcline']
KBm1 = KB-1
hc = TCLINE
# ! ------------------------------------------------------------------
# ! Section 2 : Compute Vertical Height as in ROMS RUTGERS/UCLA
# !-----------------------------------------------------------------------
# ! New formulation: Compute vertical depths (meters, negative) at
# ! RHO- and W-points, and vertical grid thicknesses.
# ! Various stretching functions are possible, as defined above.
# !
# ! z_w(x,y,s,t) = zeta(x,y,t) + [zeta(x,y,t)+ h(x,y)] * Zo_w
# !
# ! Zo_w = [hc * s(k) + C(k) * h(x,y)] / [hc + h(x,y)]
# !
# ! but with zeta = 0
# !-----------------------------------------------------------------------
z_w = 0.
Zt_avg1 = 0.
z_w=np.ones((KB,1))*0.
z_w[0] = -1.
for k in range(1,KBm1):
cff_w = hc*sc_w[k]
cff1_w = Cs_w[k]
hwater=h
hinv=1./(hc+hwater)
cff2_w=(cff_w+cff1_w*hwater)*hinv
z_w[k]=cff2_w
# ! ------------------------------------------------------------------
# ! Section 3 : WRAPPER : ROMS vert. coord. to SCHISM vert. coord.
# !-----------------------------------------------------------------------
V_RUTGERS = | np.flipud(z_w) | numpy.flipud |
# Conversion of BaselineController_Main to python
# Author: <NAME> and <NAME>
#import necessary functions
import numpy as np
import pandas as pd
import random as rand
from CodeFrom_SWEEFA import *
def BaselineController_Main():
#----------------------------- OS Information ----------------------------%
OS = 2 # 1 - Linux ; 2 - Windows-Laptop ; 3 - Windows-PC
#-------------------------- Simulation Step Sizes ------------------------%
FileRes=10 # in Minutes
Simulation_StepSize = FileRes/60 # in Hours
StepSize = FileRes*60 # in Seconds
SmartCommunity_ControllerType=1 # 1 = Smart Local Controller ; 2 = Dumb Local Controller
#-------------------------- Simulation Parameters ------------------------%
SimulationType = 0 # Important for Single Large House Simulation [1,2,3,4,5,6,7,8] == [N_PV_Bat_EV, N_PV_Bat, N_PV_EV, N_Bat_EV, N_PV, N_Bat, N_EV, N_None]
LoadDataType=2 # 1 - File Generated from Preprocessed Pecan Street Data files ; 2 - .mat File already exists
WeatherDataType=2 # 1 - File Generated from Preprocessed NSRDB File ; 2 - .mat File already exists
Single_House_Plotting_Index=1 # House Index for Single House Plotting
#------------------------- Community Specification -----------------------%
N_PV_Bat=1 # Houses with both PV and Battery
N_PV=1 # Houses with just PV
N_Bat=1 # Houses with just Battery
N_None=1 # Houses with niether PV and Battery
# Computing Total Number of Houses
N_House = N_PV_Bat+N_PV+N_Bat+N_None
N_House_Vector=[N_PV_Bat,N_Bat,N_PV,N_None]
#----------------- Plant Initial Condition Specification -----------------%
# House Temperature Intial Condition
T_AC_Base=24
T_House_Variance=0.5
# Battery Initial Condition
N1=1; # User Input - Battery Max Changing Factor
Battery_Energy_Max = 13.5*N1
#--------------------- Simulation Period Specification -------------------%
# Load Computation Start Date
StartYear=2017 # User Defined
StartMonth=9 # User Defined
StartDay=11 # User Defined
StartTime=0 # User Defined
# Load Computation End Date
EndYear=2017 # User Defined
EndMonth=9 # User Defined
EndDay=18 # User Defined
EndTime=24-(FileRes/60) #24-(FileRes/60)
#----------------------- Folder Paths Specification ----------------------%
ImageFolder_Name='Gainesville_BaseLine_7DayTest_SC_PVBat1_Bat1_PV1_None1_SCL1_'
SimulationData_FileName='FigurePlotterData_Gainesville_BaseLine_7DayTest_SC_PVBat1_Bat1_PV1_None1_SLC1'
SimulationPerformanceData_FileName='PerformanceData_Gainesville_BaseLine_7DayTest_SC_PVBat1_Bat1_PV1_None1_SLC1'
LoadData_FileName='PecanStreet_LoadData_SC_PVBat1_Bat1_PV1_None1'
WeatherData_FileName='Gainesville_Irma'
#-------------------- Weather Data Location and Period -------------------%
# Getting to Weather Data Folder in the Correct OS Folder
if OS == 1: #Linux
WeatherDataFile_Path="/home/ninadgaikwad/Dropbox (UFL)/NinadGaikwad_PhD/Gaikwad_Research/Gaikwad_Research_Work/19_Resiliency/codes/Matlab_Scripts_New/CCTA_2020/Laptop_Final_Improved/DwellTime_CNCL_WithoutL1/Data/Gainesville_2017_To_2017_WeatherData_NSRDB_30minTo10minRes.csv"
LoadDataFolder_Path="/home/ninadgaikwad/Dropbox (UFL)/NinadGaikwad_PhD/Gaikwad_Research/Gaikwad_Research_Work/20_Gaikwad_SmartCommunity/data/PreProcessedFiles/10minute_data_austin_HouseWise/"
elif OS == 2: #Windows-Laptop
WeatherDataFile_Path = r"C:\Users\ninad\Dropbox (UFL)\NinadGaikwad_PhD\Gaikwad_Research\Gaikwad_Research_Work\19_Resiliency\codes\Matlab_Scripts_New\CCTA_2020\Laptop_Final_Improved\DwellTime_CNCL_WithoutL1\Data\Gainesville_2017_To_2017_WeatherData_NSRDB_30minTo10minRes.csv"
LoadDataFolder_Path= 'C:\\Users\\ninad\\Dropbox (UFL)\\NinadGaikwad_PhD\\Gaikwad_Research\\Gaikwad_Research_Work\\20_Gaikwad_SmartCommunity\\data\\PreProcessedFiles\\10minute_data_austin_HouseWise\\'
elif OS == 3: # Windows-PC
WeatherDataFile_Path = "C:\\Users\\Me!\\Dropbox (UFL)\\NinadGaikwad_PhD\\Gaikwad_Research\\Gaikwad_Research_Work\\19_Resiliency\\codes\\Matlab_Scripts_New\\CCTA_2020\\Laptop_Final_Improved\\DwellTime_CNCL_WithoutL1\\Data\\Gainesville_2017_To_2017_WeatherData_NSRDB_30minTo10minRes.csv"
LoadDataFolder_Path = "C:\\Users\\Me!\\Dropbox (UFL)\\NinadGaikwad_PhD\\Gaikwad_Research\\Gaikwad_Research_Work\\20_Gaikwad_SmartCommunity\\data\\PreProcessedFiles\\10minute_data_austin_HouseWise\\"
# Weather Data Extraction
# Creating Simulation_Params Struct (Dict data structure used in python)
Simulation_Params = {}
Simulation_Params['FileRes'] = FileRes
Simulation_Params['Simulation_StepSize'] = Simulation_StepSize
Simulation_Params['StepSize'] = StepSize
Simulation_Params['SmartCommunity_ControllerType'] = SmartCommunity_ControllerType
# Creating HEMSWeatherData_Input Struct (Dict data structure used in python)
#Weather data dictionary
HEMSWeatherData_Input = {'WeatherDataFile_Path':WeatherDataFile_Path,'StartYear':StartYear, 'StartMonth':StartMonth, 'StartDay':StartDay, 'StartTime':StartTime, 'EndYear':EndYear, 'EndMonth':EndMonth, 'EndDay':EndDay, 'EndTime':EndTime}
if WeatherDataType==1: # We do not have Weather Data File
#what is the output of weatherdataextractor, make it a dict?
HEMSWeatherData_Output = WeatherData_Extractor(HEMSWeatherData_Input,Simulation_Params,WeatherData_FileName);
elif (WeatherDataType==2): # We have Weather Data File
load(strcat(WeatherData_FileName,'.mat')) #load weather file
# Load Data Extraction
#Type=1; % Type of Load Data Extraction
if LoadDataType==1: # We do not have Load Data File
[PecanStreet_Data_Output] = PecanStreet_Data_Extractor(HEMSWeatherData_Input,Simulation_Params,LoadDataFolder_Path,N_House_Vector,SimulationType,LoadData_FileName);
elif LoadDataType==2: # We already have Load Data File
load(strcat(LoadData_FileName,'.mat'));
# Basic Computation
#-------------------- Creating Community_Params Struct -------------------%
Community_Params = {}
Community_Params['N_House']=N_House
Community_Params['N_PV_Bat']=N_PV_Bat
Community_Params['N_Bat']=N_Bat
Community_Params['N_PV']=N_PV
Community_Params['N_None']=N_None
#------------------- From Extracted Weather Data -------------------------%
Ws = HEMSWeatherData_Output['Ws']
T_am = HEMSWeatherData_Output['T_am']
GHI = HEMSWeatherData_Output['GHI']
DNI = HEMSWeatherData_Output['DNI']
DateTimeVector = HEMSWeatherData_Output['DateTimeVector']
DateTime_Matrix = HEMSWeatherData_Output['DateTime_Matrix']
Simulation_Steps_Total = len(DateTimeVector)
#------------------------ From Extracted Load Data -----------------------%
# Getting Renewable Source Data (adding -1 to indexes because matlab is 1 indexed and python is 0 indexed)
SolarGen_Data=PecanStreet_Data_Output[:,4:6,:]
Battery_ChargerDischarge_Data=PecanStreet_Data_Output[:,7-1,:]
EVCharging_Data=PecanStreet_Data_Output[:,7+1-1:9,:]
E_LoadData=PecanStreet_Data_Output[:,:,:]
# Making Negatives (-) = 0 in LoadData
#goes through all values in certain columns and takes out all negative values
#E_LoadData(E_LoadData[:,9+1-1:end,:]<0)=0
for i in range(E_LoadData.shape[0]):
for j in range(E_LoadData.shape[1]):
for k in range(9, E_LoadData.shape[2]):
if E_LoadData[i,j,k] < 0:
E_LoadData[i,j,k] = 0
# Creating 8 Level Priority Load Data
#rewrite to same functionality as sum function us np.sum
E_Load_P1 = np.sum(E_LoadData[:,:,9:21]) # Priority Level1 Sum column 9-20
E_Load_P2 = np.sum(E_LoadData[:,:,21:26]) # Priority Level2 Sum column 21-25
E_Load_P3 = | np.sum(E_LoadData[:,:,26:29]) | numpy.sum |
import os
import numpy as np
import matplotlib.pyplot as plt
from glssm import *
#################################################
#A constant, dim y = 1, dim x = 1, randomwalk(estPHI=False)
xdict = dict()
ydict = dict()
nsubj = 20
ns = np.zeros(nsubj, dtype=np.int)
mu0 = np.array([0.0])
SIGMA0 = np.array([1.0]).reshape(1,1)
R = np.array([2.5]).reshape(1,1)
PHI = np.array([1.0]).reshape(1,1)
Q = np.array([1.5]).reshape(1,1)
for i in range(nsubj):
ns[i] = int(np.random.uniform(200,400,1))
A = np.ones([ns[i],1,1])
s1 = dlm(mu0,SIGMA0,A,R,PHI,Q)
tempx, tempy = s1.simulate(ns[i])
ydict.update({i: tempy})
xdict.update({i: tempx})
bigxmat = xdict[0]
bigns = [ns[0]]
bigymat = ydict[0]
for i in range(1,nsubj):
bigxmat = np.vstack((bigxmat, xdict[i]))
bigymat = np.vstack((bigymat, ydict[i]))
bigns.append(ns[i])
ntimes = np.array(bigns)
#check the first series
tempy = ydict[0]
tempx = xdict[0]
tempy[50:70] = None
A = np.ones([ns[0],1,1])
s1 = dlm(mu0,SIGMA0,A,R,PHI,Q)
xp,xf,pp,pf,K_last = s1.filtering(tempy)
xs,ps,pcov,xp,pp = s1.smoothing(tempy)
#plots
fig, ax = plt.subplots()
plt.plot(tempx, label="x")
plt.plot(tempy, label="y")
plt.plot(xp, label="xp")
plt.plot(xf, label="xf")
plt.plot(xf, label="xs")
legend = ax.legend(loc='upper left')
plt.show()
s2 = dlm(mu0,SIGMA0,A,R,PHI,Q)
s2.EM(tempy, np.array([ns[0]]),estPHI=True,maxit=100, tol=1e-4,verbose=False)
s2.mu0, s2.SIGMA0, s2.R, s2.PHI, s2.Q
#filter and forecast
x_filter, P_filter = s2.onestep_filter(tempy[ns[0]-1,:],xp[ns[0]-1,:],pp[ns[0]-1,:,:],A[ns[0]-1,:,:])
x_forecast, P_forecast = s2.onestep_forecast(x_filter,P_filter)
#EM fitting
bigA = np.ones([bigymat.shape[0], 1, 1])
dlm1 = dlm(mu0,SIGMA0,bigA,R,PHI,Q)
dlm1.EM(bigymat, ntimes, estPHI=True, maxit=100, tol=1e-4,verbose=True)
dlm1.mu0, dlm1.SIGMA0, dlm1.R, dlm1.PHI, dlm1.Q
dlm1 = dlm(mu0,SIGMA0,bigA,R,PHI,Q)
#dlm1._EM(yy,A,mu0,SIGMA0,PHI0,Q0,R0,ntimes,maxit=40, tol=1e-4, estPHI=True)
dlm1.EM(bigymat, ntimes, estPHI=False, maxit=100, tol=1e-4,verbose=True)
dlm1.mu0, dlm1.SIGMA0, dlm1.R, dlm1.PHI, dlm1.Q
####################################################################################
#real data testing
ana1 = pd.read_csv("analysis.csv")
ana1.head()
ana1.shape
#units: ppb both
#reporting prediction errors
ana1.describe()
yy = ana1["sqrty"].values.reshape(8784,1)
zz = ana1["sqrtz"].values.reshape(8784,1)
plt.plot(yy)
plt.plot(zz)
sum(pd.isnull(yy))
indices = [k for k,val in enumerate(yy) if pd.isnull(yy[k])]
train = 6588
trainid = np.arange(train) #%75
testid = np.arange(train,8784)
##############################################
#DLM1: random walk + noise
#y_t = mu_t + v_t
#mu_t = mu_{t-1} + w_t
train = 6588
trainid = np.arange(train) #%75
testid = np.arange(train,8784)
fullA = np.repeat(1.0,8784).reshape(8784,1,1)
A = np.repeat(1.0,train).reshape(train,1,1)
mu0 = | np.array([5.0]) | numpy.array |
# by <NAME>
# This is a direct translation of Yuval Tassa's Matlab code into Python:
# https://benjaminmoll.com/wp-content/uploads/2020/06/LCP.m
# It solves LCP using a Newton type method
# To be consistent across platforms and with Yuval Tassa's code,
# I have tried to make as minimal changes as I could,
# so this code can be followed the same way as the original Matlab code does.
import numpy as np
import scipy.sparse as sparse
from scipy.sparse.linalg import spsolve
def LCP_python(M,q,l=[],u=[],x0=[],display=False):
tol = 1.0e-6;
mu = 1e-3;
mu_step = 5;
mu_min = 1e-5;
max_iter = 25;
b_tol = 1e-6;
n = M.shape[0]
if l == []:
l = np.zeros((n,1))
if u == []:
u = np.ones((n,1))*np.inf
if x0 == []:
x00 = np.maximum(np.ones((n,1)),l)
x0 = np.minimum(x00,u)
M = sparse.csc_matrix(M)
q = q.reshape((-1, 1))
l = l.reshape((-1, 1))
u = u.reshape((-1, 1))
x0 = x0.reshape((-1, 1))
lu = np.column_stack((l , u));
x = x0.copy();
psi,phi,J = FB(x,q,M,l,u);
new_x = True
for iter1 in range(0,max_iter):
if new_x:
mlu = np.min(np.column_stack((np.abs(x-l),np.abs(u-x))),1).reshape((-1, 1));
ilu = np.argmin(np.column_stack((np.abs(x-l),np.abs(u-x))),1).reshape((-1, 1));
bad = np.maximum(np.abs(phi),mlu) < b_tol;
psi = psi - 0.5*np.dot(phi[bad] , phi[bad])
notbad = bad == False
Jind = np.dot(notbad , notbad.T)
notbad_trues = np.sum(notbad*1)
J = sparse.csc_matrix(np.reshape(J[Jind] , (notbad_trues,notbad_trues) ))
phi = phi[notbad];
new_x = False;
nx = x.copy();
nx[bad] = lu.flatten()[(bad[bad])*1+(ilu[bad]-1)*n]
H = np.dot(J.T , J) + mu*sparse.eye(notbad_trues);
Jphi = sparse.csc_matrix.dot(J.T,phi)
d = -spsolve(sparse.csc_matrix(H) , Jphi)
nx[notbad] = x[notbad] + d;
npsi,nphi,nJ = FB(nx,q,M,l,u);
r = (psi - npsi)/ -(np.dot(Jphi.T,d) + 0.5*np.dot(sparse.csc_matrix.dot(d.T,H),d) ); # actual reduction / expected reduction
if r < 0.3:
mu = np.maximum(mu*mu_step,mu_min);
if r > 0:
x = nx.copy();
psi = npsi.copy();
phi = nphi.copy();
J = nJ.copy();
new_x = True;
if r > 0.8:
mu = mu/mu_step * (mu > mu_min);
if display:
print('iter = ', iter1 , ' --- psi = ' , psi ,' --- r = ' , r ,' --- mu = ' , mu);
if psi < tol:
break;
x = np.minimum(np.maximum(x,l),u);
return x
#----------------------------------------------------------
def FB(x,q,M,l,u):
n = x.size;
Zl = ((l >-np.inf) & (u==np.inf))
Zu = (l==-np.inf) & (u <np.inf);
Zlu = (l >-np.inf) & (u <np.inf);
Zf = (l==-np.inf) & (u==np.inf);
a = x.copy();
b = sparse.csc_matrix.dot(M,x)+q;
a[Zl] = x[Zl]-l[Zl];
a[Zu] = u[Zu]-x[Zu];
b[Zu] = -b[Zu];
if any(Zlu):
nt = np.sum(Zlu);
at = u[Zlu]-x[Zlu];
bt = -b[Zlu];
st = np.sqrt(np.power(at,2) + np.power(bt,2));
a[Zlu] = x[Zlu]-l[Zlu];
b[Zlu] = st -at -bt;
s = np.sqrt(np.power(a,2) + np.power(b,2));
phi = s - a - b;
phi[Zu] = -phi[Zu];
phi[Zf] = -b[Zf];
psi = 0.5*np.dot(phi.T , phi);
if any(Zlu):
M[Zlu,:] = -sparse.csc_matrix((at/st-np.ones((nt,1)),(np.arange(nt),Zlu[Zlu != 0])),nt,n , dtype=np.float) - sparse.csc_matrix.dot(sparse.csc_matrix((bt/st- | np.ones((nt,1)) | numpy.ones |
###############################################################################
# DiskSCFPotential.py: Potential expansion for disk+halo potentials
###############################################################################
from pkg_resources import parse_version
import copy
import numpy
import scipy
_SCIPY_VERSION= parse_version(scipy.__version__)
if _SCIPY_VERSION < parse_version('0.10'): #pragma: no cover
from scipy.maxentropy import logsumexp
elif _SCIPY_VERSION < parse_version('0.19'): #pragma: no cover
from scipy.misc import logsumexp
else:
from scipy.special import logsumexp
from ..util import conversion
from .Potential import Potential
from .SCFPotential import SCFPotential, \
scf_compute_coeffs_axi, scf_compute_coeffs
class DiskSCFPotential(Potential):
"""Class that implements a basis-function-expansion technique for solving the Poisson equation for disk (+halo) systems. We solve the Poisson equation for a given density :math:`\\rho(R,\phi,z)` by introducing *K* helper function pairs :math:`[\\Sigma_i(R),h_i(z)]`, with :math:`h_i(z) = \mathrm{d}^2 H(z) / \mathrm{d} z^2` and search for solutions of the form
.. math::
\Phi(R,\phi,z = \Phi_{\mathrm{ME}}(R,\phi,z) + 4\pi G\sum_i \\Sigma_i(r)\,H_i(z)\,,
where :math:`r` is the spherical radius :math:`r^2 = R^2+z^2`. We can solve for :math:`\Phi_{\mathrm{ME}}(R,\phi,z)` by solving
.. math::
\\frac{\\Delta \Phi_{\mathrm{ME}}(R,\phi,z)}{4\pi G} = \\rho(R,\phi,z) - \sum_i\left\{ \Sigma_i(r)\,h_i(z) + \\frac{\mathrm{d}^2 \Sigma_i(r)}{\mathrm{d} r^2}\,H_i(z)+\\frac{2}{r}\,\\frac{\mathrm{d} \Sigma_i(r)}{\mathrm{d} r}\left[H_i(z)+z\,\\frac{\mathrm{d}H_i(z)}{\mathrm{d} z}\\right]\\right\}\,.
We solve this equation by using the :ref:`SCFPotential <scf_potential>` class and methods (:ref:`scf_compute_coeffs_axi <scf_compute_coeffs_axi>` or :ref:`scf_compute_coeffs <scf_compute_coeffs>` depending on whether :math:`\\rho(R,\phi,z)` is axisymmetric or not). This technique works very well if the disk portion of the potential can be exactly written as :math:`\\rho_{\mathrm{disk}} = \sum_i \Sigma_i(R)\,h_i(z)`, because the effective density on the right-hand side of this new Poisson equation is then not 'disky' and can be well represented using spherical harmonics. But the technique is general and can be used to compute the potential of any disk+halo potential; the closer the disk is to :math:`\\rho_{\mathrm{disk}} \\approx \sum_i \Sigma_i(R)\,h_i(z)`, the better the technique works.
This technique was introduced by `<NAME> (1995) <http://adsabs.harvard.edu/abs/1995MNRAS.277.1341K>`__ and was popularized by `Dehnen & Binney (1998) <http://adsabs.harvard.edu/abs/1998MNRAS.294..429D>`__. The current implementation is a slight generalization of the technique in those papers and uses the SCF approach of `Hernquist & Ostriker (1992)
<http://adsabs.harvard.edu/abs/1992ApJ...386..375H>`__ to solve the Poisson equation for :math:`\Phi_{\mathrm{ME}}(R,\phi,z)` rather than solving it on a grid using spherical harmonics and interpolating the solution (as done in `Dehnen & Binney 1998 <http://adsabs.harvard.edu/abs/1998MNRAS.294..429D>`__).
"""
def __init__(self,amp=1.,normalize=False,
dens= lambda R,z: 13.5*numpy.exp(-3.*R)\
*numpy.exp(-27.*numpy.fabs(z)),
Sigma={'type':'exp','h':1./3.,'amp':1.},
hz={'type':'exp','h':1./27.},
Sigma_amp=None,dSigmadR=None,d2SigmadR2=None,
Hz=None,dHzdz=None,
N=10,L=10,a=1.,radial_order=None,costheta_order=None,
phi_order=None,
ro=None,vo=None):
"""
NAME:
__init__
PURPOSE:
initialize a DiskSCF Potential
INPUT:
amp - amplitude to be applied to the potential (default: 1); cannot have units currently
normalize - if True, normalize such that vc(1.,0.)=1., or, if given as a number, such that the force is this fraction of the force necessary to make vc(1.,0.)=1.
ro=, vo= distance and velocity scales for translation into internal units (default from configuration file)
dens= function of R,z[,phi optional] that gives the density [in natural units, cannot return a Quantity currently]
N=, L=, a=, radial_order=, costheta_order=, phi_order= keywords setting parameters for SCF solution for Phi_ME (see :ref:`scf_compute_coeffs_axi <scf_compute_coeffs_axi>` or :ref:`scf_compute_coeffs <scf_compute_coeffs>` depending on whether :math:`\\rho(R,\phi,z)` is axisymmetric or not)
Either:
(a) Sigma= Dictionary of surface density (example: {'type':'exp','h':1./3.,'amp':1.,'Rhole':0.} for amp x exp(-Rhole/R-R/h) )
hz= Dictionary of vertical profile, either 'exp' or 'sech2' (example {'type':'exp','h':1./27.} for exp(-|z|/h)/[2h], sech2 is sech^2(z/[2h])/[4h])
(b) Sigma= function of R that gives the surface density
dSigmadR= function that gives d Sigma / d R
d2SigmadR2= function that gives d^2 Sigma / d R^2
Sigma_amp= amplitude to apply to all Sigma functions
hz= function of z that gives the vertical profile
Hz= function of z such that d^2 Hz(z) / d z^2 = hz
dHzdz= function of z that gives d Hz(z) / d z
In both of these cases lists of arguments can be given for multiple disk components; can't mix (a) and (b) in these lists; if hz is a single item the same vertical profile is assumed for all Sigma
OUTPUT:
DiskSCFPotential object
HISTORY:
2016-12-26 - Written - Bovy (UofT)
"""
Potential.__init__(self,amp=amp,ro=ro,vo=vo,amp_units=None)
a= conversion.parse_length(a,ro=self._ro)
# Parse and store given functions
self.isNonAxi= dens.__code__.co_argcount == 3
self._parse_Sigma(Sigma_amp,Sigma,dSigmadR,d2SigmadR2)
self._parse_hz(hz,Hz,dHzdz)
if self.isNonAxi:
self._inputdens= dens
else:
self._inputdens= lambda R,z,phi: dens(R,z)
# Solve Poisson equation for Phi_ME
if not self.isNonAxi:
dens_func= lambda R,z: phiME_dens(R,z,0.,self._inputdens,
self._Sigma,self._dSigmadR,
self._d2SigmadR2,
self._hz,self._Hz,
self._dHzdz,self._Sigma_amp)
Acos, Asin= scf_compute_coeffs_axi(dens_func,N,L,a=a,
radial_order=radial_order,
costheta_order=costheta_order)
else:
dens_func= lambda R,z,phi: phiME_dens(R,z,phi,self._inputdens,
self._Sigma,self._dSigmadR,
self._d2SigmadR2,
self._hz,self._Hz,
self._dHzdz,self._Sigma_amp)
Acos, Asin= scf_compute_coeffs(dens_func,N,L,a=a,
radial_order=radial_order,
costheta_order=costheta_order,
phi_order=phi_order)
self._phiME_dens_func= dens_func
self._scf= SCFPotential(amp=1.,Acos=Acos,Asin=Asin,a=a,ro=None,vo=None)
if not self._Sigma_dict is None and not self._hz_dict is None:
self.hasC= True
self.hasC_dens= True
if normalize or \
(isinstance(normalize,(int,float)) \
and not isinstance(normalize,bool)):
self.normalize(normalize)
return None
def _parse_Sigma(self,Sigma_amp,Sigma,dSigmadR,d2SigmadR2):
"""
NAME:
_parse_Sigma
PURPOSE:
Parse the various input options for Sigma* functions
HISTORY:
2016-12-27 - Written - Bovy (UofT/CCA)
"""
if isinstance(Sigma,dict):
Sigma= [Sigma]
try:
nsigma= len(Sigma)
except TypeError:
Sigma_amp= [Sigma_amp]
Sigma= [Sigma]
dSigmadR= [dSigmadR]
d2SigmadR2= [d2SigmadR2]
nsigma= 1
self._nsigma= nsigma
self._Sigma_amp= Sigma_amp
self._Sigma= Sigma
self._dSigmadR= dSigmadR
self._d2SigmadR2= d2SigmadR2
if isinstance(Sigma[0],dict):
self._Sigma_dict= copy.copy(Sigma)
self._parse_Sigma_dict()
else:
self._Sigma_dict= None
return None
def _parse_Sigma_dict(self):
Sigma_amp, Sigma, dSigmadR, d2SigmadR2= [], [], [], []
for ii in range(self._nsigma):
ta, ts, tds, td2s= self._parse_Sigma_dict_indiv(self._Sigma[ii])
Sigma_amp.append(ta)
Sigma.append(ts)
dSigmadR.append(tds)
d2SigmadR2.append(td2s)
self._Sigma_amp= Sigma_amp
self._Sigma= Sigma
self._dSigmadR= dSigmadR
self._d2SigmadR2= d2SigmadR2
return None
def _parse_Sigma_dict_indiv(self,Sigma):
stype= Sigma.get('type','exp')
if stype == 'exp' and not 'Rhole' in Sigma:
rd= Sigma.get('h',1./3.)
ta= Sigma.get('amp',1.)
ts= lambda R, trd=rd: numpy.exp(-R/trd)
tds= lambda R, trd=rd: -numpy.exp(-R/trd)/trd
td2s= lambda R, trd=rd: numpy.exp(-R/trd)/trd**2.
elif stype == 'expwhole' or (stype == 'exp' and 'Rhole' in Sigma):
rd= Sigma.get('h',1./3.)
rm= Sigma.get('Rhole',0.5)
ta= Sigma.get('amp',1.)
ts= lambda R, trd=rd, trm=rm: numpy.exp(-trm/R-R/trd)
tds= lambda R, trd=rd, trm=rm: \
(trm/R**2.-1./trd)*numpy.exp(-trm/R-R/trd)
td2s= lambda R, trd=rd,trm=rm: \
((trm/R**2.-1./trd)**2.-2.*trm/R**3.)*numpy.exp(-trm/R-R/trd)
return (ta,ts,tds,td2s)
def _parse_hz(self,hz,Hz,dHzdz):
"""
NAME:
_parse_hz
PURPOSE:
Parse the various input options for Sigma* functions
HISTORY:
2016-12-27 - Written - Bovy (UofT/CCA)
"""
if isinstance(hz,dict):
hz= [hz]
try:
nhz= len(hz)
except TypeError:
hz= [hz]
Hz= [Hz]
dHzdz= [dHzdz]
nhz= 1
if nhz != self._nsigma and nhz != 1:
raise ValueError('Number of hz functions needs to be equal to the number of Sigma functions or to 1')
if nhz == 1 and self._nsigma > 1:
hz= [hz[0] for ii in range(self._nsigma)]
if not isinstance(hz[0],dict):
Hz= [Hz[0] for ii in range(self._nsigma)]
dHzdz= [dHzdz[0] for ii in range(self._nsigma)]
self._Hz= Hz
self._hz= hz
self._dHzdz= dHzdz
self._nhz= len(self._hz)
if isinstance(hz[0],dict):
self._hz_dict= copy.copy(hz)
self._parse_hz_dict()
else:
self._hz_dict= None
return None
def _parse_hz_dict(self):
hz, Hz, dHzdz= [], [], []
for ii in range(self._nhz):
th, tH, tdH= self._parse_hz_dict_indiv(self._hz[ii])
hz.append(th)
Hz.append(tH)
dHzdz.append(tdH)
self._hz= hz
self._Hz= Hz
self._dHzdz= dHzdz
return None
def _parse_hz_dict_indiv(self,hz):
htype= hz.get('type','exp')
if htype == 'exp':
zd= hz.get('h',0.0375)
th= lambda z, tzd=zd: 1./2./tzd*numpy.exp(-numpy.fabs(z)/tzd)
tH= lambda z, tzd= zd: (numpy.exp(-numpy.fabs(z)/tzd)-1.
+numpy.fabs(z)/tzd)*tzd/2.
tdH= lambda z, tzd= zd: 0.5*numpy.sign(z)\
*(1.-numpy.exp(-numpy.fabs(z)/tzd))
elif htype == 'sech2':
zd= hz.get('h',0.0375)
th= lambda z, tzd=zd: 1./numpy.cosh(z/2./tzd)**2./4./tzd
# Avoid overflow in cosh
tH= lambda z, tzd= zd: \
tzd*(logsumexp(numpy.array([z/2./tzd,-z/2./tzd]),axis=0)\
-numpy.log(2.))
tdH= lambda z, tzd= zd: numpy.tanh(z/2./tzd)/2.
return (th,tH,tdH)
def _evaluate(self,R,z,phi=0.,t=0.):
"""
NAME:
_evaluate
PURPOSE:
evaluate the potential at (R,z, phi)
INPUT:
R - Cylindrical Galactocentric radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
potential at (R,z, phi)
HISTORY:
2016-12-26 - Written - Bovy (UofT/CCA)
"""
r= numpy.sqrt(R**2.+z**2.)
out= self._scf(R,z,phi=phi,use_physical=False)
for a,s,H in zip(self._Sigma_amp,self._Sigma,self._Hz):
out+= 4.*numpy.pi*a*s(r)*H(z)
return out
def _Rforce(self,R,z,phi=0, t=0):
"""
NAME:
_Rforce
PURPOSE:
evaluate the radial force at (R,z, phi)
INPUT:
R - Cylindrical Galactocentric radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
radial force at (R,z, phi)
HISTORY:
2016-12-26 - Written - Bovy (UofT/CCA)
"""
r= numpy.sqrt(R**2.+z**2.)
out= self._scf.Rforce(R,z,phi=phi,use_physical=False)
for a,ds,H in zip(self._Sigma_amp,self._dSigmadR,self._Hz):
out-= 4.*numpy.pi*a*ds(r)*H(z)*R/r
return out
def _zforce(self,R,z,phi=0,t=0):
"""
NAME:
_zforce
PURPOSE:
evaluate the vertical force at (R,z, phi)
INPUT:
R - Cylindrical Galactocentric radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
vertical force at (R,z, phi)
HISTORY:
2016-12-26 - Written - Bovy (UofT/CCA)
"""
r= numpy.sqrt(R**2.+z**2.)
out= self._scf.zforce(R,z,phi=phi,use_physical=False)
for a,s,ds,H,dH in zip(self._Sigma_amp,self._Sigma,self._dSigmadR,
self._Hz,self._dHzdz):
out-= 4.*numpy.pi*a*(ds(r)*H(z)*z/r+s(r)*dH(z))
return out
def _phiforce(self,R,z,phi=0.,t=0.):
"""
NAME:
_phiforce
PURPOSE:
evaluate the azimuthal force for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the azimuthal force
HISTORY:
2016-12-26 - Written - Bovy (UofT)
"""
return self._scf.phiforce(R,z,phi=phi,use_physical=False)
def _R2deriv(self,R,z,phi=0.,t=0.):
"""
NAME:
_R2deriv
PURPOSE:
evaluate the second radial derivative for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the second radial derivative
HISTORY:
2016-12-26 - Written - Bovy (UofT/CCA)
"""
r= numpy.sqrt(R**2.+z**2.)
out= self._scf.R2deriv(R,z,phi=phi,use_physical=False)
for a,ds,d2s,H in zip(self._Sigma_amp,self._dSigmadR,self._d2SigmadR2,
self._Hz):
out+= 4.*numpy.pi*a*H(z)/r**2.*(d2s(r)*R**2.+z**2./r*ds(r))
return out
def _z2deriv(self,R,z,phi=0.,t=0.):
"""
NAME:
_z2deriv
PURPOSE:
evaluate the second vertical derivative for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the second vertical derivative
HISTORY:
2016-12-26 - Written - Bovy (UofT/CCA)
"""
r= numpy.sqrt(R**2.+z**2.)
out= self._scf.z2deriv(R,z,phi=phi,use_physical=False)
for a,s,ds,d2s,h,H,dH in zip(self._Sigma_amp,
self._Sigma,self._dSigmadR,self._d2SigmadR2,
self._hz,self._Hz,self._dHzdz):
out+= 4.*numpy.pi*a*(H(z)/r**2.*(d2s(r)*z**2.+ds(r)*R**2./r)
+2.*ds(r)*dH(z)*z/r+s(r)*h(z))
return out
def _Rzderiv(self,R,z,phi=0.,t=0.):
"""
NAME:
_Rzderiv
PURPOSE:
evaluate the mixed R,z derivative for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
d2phi/dR/dz
HISTORY:
2016-12-26 - Written - Bovy (UofT/CCA)
"""
r= | numpy.sqrt(R**2.+z**2.) | numpy.sqrt |
import numpy as np
from sim.sim2d import sim_run
# Simulator options.
options = {}
options['FIG_SIZE'] = [8,8]
options['DRIVE_IN_CIRCLE'] = False
# If False, measurements will be x,y.
# If True, measurements will be x,y, and current angle of the car.
# Required if you want to pass the driving in circle.
options['MEASURE_ANGLE'] = True
options['RECIEVE_INPUTS'] = False
class KalmanFilter:
def __init__(self):
# Initial State
if(options['MEASURE_ANGLE'] == False):
self.x = np.matrix([[0.],
[0.],
[0.],
[0]])
# Uncertainity Matrix
self.P = np.matrix([[1000., 0., 0., 0.],
[0., 1000., 0.,0.],
[0., 0., 1000., 0.],
[0., 0., 0., 1000.]])
# Next State Function
self.F = np.matrix([[1., 0., 1., 0.],
[0., 1., 0., 1.],
[0., 0., 1., 0.],
[0., 0., 0., 1.]])
# Measurement Function
self.H = np.matrix([[1., 0., 0., 0.],
[0., 1., 0., 0.]])
# Measurement Uncertainty
self.R = np.matrix([[1,0],
[0,1]])
# Identity Matrix
self.I = np.matrix([[1., 0., 0., 0.],
[0., 1., 0., 0.],
[0., 0., 1., 0.],
[0., 0., 0., 1.]])
self.prev_time = 0
else:
# X, Y, theta, X_dot, Y_dot, theta_dot
self.x = np.matrix([[0.],
[0.],
[0.],
[0.],
[0.],
[0.]])
# Uncertainity Matrix
self.P = np.matrix([[1000., 0., 0., 0., 0., 0.],
[0., 1000., 0.,0., 0., 0.],
[0., 0., 1000., 0., 0., 0.],
[0., 0., 0., 1000., 0., 0.],
[0., 0., 0., 0., 1000., 0.],
[0., 0., 0., 0., 0., 1000.]])
# Next State Function
self.F = np.matrix([[1., 0., 0., 1., 0., 0.],
[0., 1., 0., 0., 1., 0.],
[0., 0., 1., 0., 0., 1.],
[0., 0., 0., 1., 0., 0.],
[0., 0., 0., 0., 1., 0.],
[0., 0., 0., 0., 0., 1.]])
# Measurement Function
self.H = np.matrix([[1., 0., 0., 0., 0., 0.],
[0., 1., 0., 0., 0., 0.],
[0., 0., 1., 0., 0., 0.]])
# Measurement Uncertainty
self.R = np.matrix([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]])
# Identity Matrix
self.I = np.matrix([[1., 0., 0., 0., 0., 0.],
[0., 1., 0., 0., 0., 0.],
[0., 0., 1., 0., 0., 0.],
[0., 0., 0., 1., 0., 0.],
[0., 0., 0., 0., 1., 0.],
[0., 0., 0., 0., 0., 1.]])
self.prev_time = 0
def predict(self,t):
dt = t - self.prev_time
for i in range((np.shape(self.F)[0] // 2)):
self.F[i,i+2] = dt
# self.F[1,3] = dt
self.x = self.F*self.x
self.P = self.F*self.P* | np.transpose(self.F) | numpy.transpose |
from __future__ import print_function
import inbreast
import keras.backend as K
from roc_auc import RocAucScoreOp, PrecisionOp, RecallOp, F1Op
from roc_auc import AUCEpoch, PrecisionEpoch, RecallEpoch, F1Epoch, LossEpoch, ACCEpoch
#from keras.preprocessing.image import ImageDataGenerator
from image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten, BatchNormalization, SpatialDropout2D
from keras.layers import advanced_activations
from keras.layers import Convolution2D, MaxPooling2D
from keras.optimizers import SGD, Adam, RMSprop
from keras.utils import np_utils
import numpy as np
from keras.callbacks import ModelCheckpoint
from keras.regularizers import l1l2
import inbreast
from convnetskeras.convnets import preprocess_image_batch, convnet
import os
from sklearn.metrics import roc_auc_score,roc_curve
np.random.seed(1)
#srng = RandomStreams(1)
fold = 2 # 4
valfold = 4
lr = 5e-5 #1e-3 #5e-5 #5e-5
nb_epoch = 500
batch_size = 80
l2factor = 1e-5
l1factor = 0#2e-7
weighted = False #False #True
noises = 50
data_augmentation = True
modelname = 'alexnet' # miccai16, alexnet, levynet, googlenet
pretrain = True #True #True
savename = modelname+'new_fd'+str(fold)+'_vf'+str(valfold)+'_lr'+str(lr)+'_l2'+str(l2factor)+'_l1'\
+str(l1factor)+'_ep'+str(nb_epoch)+'_bs'+str(batch_size)+'_w'+str(weighted)+'_dr'+str(False)+str(noises)+str(pretrain)
print(savename)
nb_classes = 2
# input image dimensions
img_rows, img_cols = 227, 227
# the CIFAR10 images are RGB
img_channels = 1
# the data, shuffled and split between train and test sets
trX, y_train, teX, y_test, teteX, y_test_test = inbreast.loaddataenhance(fold, 5, valfold=valfold)
trY = y_train.reshape((y_train.shape[0],1))
teY = y_test.reshape((y_test.shape[0],1))
teteY = y_test_test.reshape((y_test_test.shape[0],1))
print('tr, val, te pos num and shape')
print(trY.sum(), teY.sum(), teteY.sum(), trY.shape[0], teY.shape[0], teteY.shape[0])
ratio = trY.sum()*1./trY.shape[0]*1.
print('tr ratio'+str(ratio))
weights = np.array((ratio, 1-ratio))
#trYori = np.concatenate((1-trY, trY), axis=1)
#teY = np.concatenate((1-teY, teY), axis=1)
#teteY = np.concatenate((1-teteY, teteY), axis=1)
X_train = trX.reshape(-1, img_channels, img_rows, img_cols)
X_test = teX.reshape(-1, img_channels, img_rows, img_cols)
X_test_test = teteX.reshape(-1, img_channels, img_rows, img_cols)
print('tr, val, te mean, std')
print(X_train.mean(), X_test.mean(), X_test_test.mean())
# convert class vectors to binary class matrices
Y_train = np.zeros((y_train.shape[0],2))
Y_train[:,0] = 1-y_train
Y_train[:,1] = y_train #np_utils.to_categorical(y_train, nb_classes)
Y_test = | np.zeros((y_test.shape[0],2)) | numpy.zeros |
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import json
import os
import datetime as dt
import main
from eval import data_analysis
# LaTeX settings
plt.rc('text', usetex=True)
plt.rc('font', **{'family': 'serif', 'sans-serif': ['lmodern'], 'size': 18})
plt.rc('axes', **{'titlesize': 18, 'labelsize': 18})
# Constants
JSON_PATH = './out/'
OUT_PATH = './out/'
MODEL_NAMES = {
'KF': ('KalmanFilter', ''),
'KF(+W)': ('KalmanFilter', '_W'),
'KF(+WF)': ('KalmanFilter', '_WF'),
'KD-IC': ('KD-IC', ''),
'KD-IC(+W)': ('KD-IC', '_W'),
'KD-IC(+WF)': ('KD-IC', '_WF'),
'LN-IC': ('LogNormal-IC', ''),
'LN-IC(+W)': ('LogNormal-IC', '_W'),
'LN-IC(+WF)': ('LogNormal-IC', '_WF'),
'DeepAR': ('DeepAR', ''),
'DeepAR(+W)': ('DeepAR', '_W'),
'DeepAR(+WF)': ('DeepAR', '_WF'),
'LW': ('LastWeek', '')
}
MAIN_SEED = '42'
DECIMALS = 2
COLORS = ('C0', 'C1', 'C3', 'C9', 'C7')
MARKERS = ('o', 'X', 'v', 'd', 'p')
LINESTYLES = ('solid', 'dashed', 'dashdot')
S_D = 48
S_W = 7 * S_D
def get_file_name(model, level, cluster, seed=''):
return f'{MODEL_NAMES[model][0]}{seed}_{level}_{cluster}{MODEL_NAMES[model][1]}'
def get_path(model, level, cluster, seed=''):
return JSON_PATH + f'{MODEL_NAMES[model][0]}{seed}/{get_file_name(model, level, cluster, seed)}.json'
def load_res(model, level, cluster, seed=''):
if 'DeepAR' in model and seed == '':
seed = MAIN_SEED
with open(get_path(model, level, cluster, seed), 'r') as fp:
res = json.load(fp)
return res
def collect_results(
levels=('L0', 'L1', 'L2', 'L3'),
metrics=('MAPE', 'rMAE', 'rRMSE', 'rCRPS'),
models=('KF', 'KF(+W)', 'KF(+WF)',
'KD-IC', 'KD-IC(+W)', 'KD-IC(+WF)',
'DeepAR', 'DeepAR(+W)', 'DeepAR(+WF)',
'LW'),
seeds=(0, 1, 2, 3, 4),
forecast_reps=28,
save_results_with_info=True
):
results_path = os.path.join(JSON_PATH, 'results_with_info.npy')
if os.path.isfile(results_path):
results_with_info = np.load(results_path, allow_pickle=True)
return results_with_info[0], results_with_info[1]
results = {}
level_info = data_analysis.get_level_info(levels)
for level in levels:
clusters = level_info[level]['clusters']
# Create results array
results[level] = np.empty((len(metrics), len(models), len(clusters), forecast_reps))
results[level][:] = np.nan
for m, model in enumerate(models):
if level == 'L3' and 'KF' in model:
# No level 3 results for the KF model
continue
for c, cluster in enumerate(clusters):
if 'DeepAR' in model and level is not 'L3':
res_per_seed = []
for seed in seeds:
res_per_seed.append(load_res(model, level, cluster, seed))
for i, metric in enumerate(metrics):
results[level][i, m, c] = np.mean([res[metric] for res in res_per_seed], axis=0)
else:
res = load_res(model, level, cluster)
for i, metric in enumerate(metrics):
if 'CRPS' in metric and model == 'LW':
# No distributional forecasts for LW model
continue
results[level][i, m, c] = res[metric]
info = {
'levels': level_info,
'metrics': list(metrics),
'models': list(models),
'reps': forecast_reps
}
if save_results_with_info:
np.save(results_path, (results, info), allow_pickle=True)
return results, info
def collect_results_per_tstp(
levels=('L0', 'L1', 'L2'),
metrics=('rMAE', 'rRMSE', 'rCRPS'),
models=('KF', 'KF(+W)', 'KF(+WF)',
'KD-IC', 'KD-IC(+W)', 'KD-IC(+WF)',
'DeepAR', 'DeepAR(+W)', 'DeepAR(+WF)',
'LW'),
seeds=(0, 1, 2, 3, 4),
forecast_reps=28,
horizon=192,
save_results_per_tstp_with_info=True
):
results_path = os.path.join(JSON_PATH, 'results_per_tstp_with_info.npy')
if os.path.isfile(results_path):
results_with_info = np.load(results_path, allow_pickle=True)
return results_with_info[0], results_with_info[1]
results = {}
level_info = data_analysis.get_level_info(levels)
t_train, t_val = main.train_val_split(data_analysis.energy_df.index)
for level in levels:
clusters = level_info[level]['clusters']
# Create results array
results[level] = np.empty((len(seeds), len(metrics), len(models), len(clusters), forecast_reps, horizon))
results[level][:] = np.nan
level_info[level]['y_mean'] = []
for c, cluster in enumerate(clusters):
level_info[level]['y_mean'].append(
np.nanmean(data_analysis.get_observations_at(level, cluster, t_train))
)
y_true = data_analysis.get_observations_at(level, cluster, t_val).reshape(forecast_reps, horizon)
for m, model in enumerate(models):
if level == 'L3' and 'KF' in model:
# No level 3 results for the KF model
continue
if 'DeepAR' in model and level is not 'L3':
for s, seed in enumerate(seeds):
res = load_res(model, level, cluster, seed)
for i, metric in enumerate(metrics):
if metric == 'rMAE':
results[level][s, i, m, c] = np.abs(y_true - res['p50'])
elif metric == 'rRMSE':
results[level][s, i, m, c] = (y_true - res['mean']) ** 2
elif metric == 'rCRPS':
results[level][s, i, m, c] = res['CRPS']
else:
res = load_res(model, level, cluster)
for i, metric in enumerate(metrics):
if 'CRPS' in metric and model == 'LW':
# No distributional forecasts for LW model
continue
if metric == 'rMAE':
results[level][0, i, m, c] = np.abs(y_true - res['p50'])
elif metric == 'rRMSE':
results[level][0, i, m, c] = (y_true - res['mean']) ** 2
elif metric == 'rCRPS':
results[level][0, i, m, c] = res['CRPS']
info = {
'levels': level_info,
'metrics': list(metrics),
'models': list(models),
'reps': forecast_reps,
'horizon': horizon
}
if save_results_per_tstp_with_info:
np.save(results_path, (results, info), allow_pickle=True)
return results, info
def create_metric_df(metric, with_std=True, to_LaTeX=True):
results, info = collect_results()
i = info['metrics'].index(metric)
row_names = info['models']
col_names = info['levels'].keys()
metric_df = pd.DataFrame(index=row_names, columns=col_names, dtype=float)
for level in col_names:
for m, model in enumerate(row_names):
mean = np.mean(results[level][i, m])
metric_df.loc[model, level] = (('%%.%sf' % DECIMALS) % mean) if not np.isnan(mean) else '-'
if with_std and not np.isnan(mean):
std = np.std(results[level][i, m])
metric_df.loc[model, level] += (' (%%.%sf)' % DECIMALS) % std
if to_LaTeX:
df_to_LaTeX(metric_df)
return metric_df
def create_level_df(level, with_std=True, to_LaTeX=True):
results, info = collect_results()
row_names = info['metrics']
col_names = info['models']
level_df = pd.DataFrame(index=row_names, columns=col_names, dtype=float)
for i, metric in enumerate(row_names):
for m, model in enumerate(col_names):
mean = np.mean(results[level][i, m])
level_df.loc[metric, model] = (('%%.%sf' % DECIMALS) % mean) if not np.isnan(mean) else '-'
if with_std and not np.isnan(mean):
std = np.std(results[level][i, m])
level_df.loc[metric, model] += (' (%%.%sf)' % DECIMALS) % std
if to_LaTeX:
df_to_LaTeX(level_df)
return level_df
def create_runtime_df(models=('KF', 'KD-IC', 'DeepAR', 'LW'), with_std=False, to_LaTeX=True):
_, info = collect_results()
train_name = 'Avg. training time [s]'
prediction_name = 'Avg. prediction time [s]'
runtime_df = pd.DataFrame(index=[train_name, prediction_name], columns=models, dtype=float)
for model in models:
training_times = []
prediction_times = []
for level in info['levels'].keys():
if level == 'L3' and 'KF' in model:
# No level 3 results for the KF model
continue
for cluster in info['levels'][level]['clusters']:
res = load_res(model, level, cluster)
training_times.append(res['fit_time'])
prediction_times.append(res['prediction_time'])
decimals = DECIMALS + 1
runtime_df.loc[train_name, model] = ('%%.%sf' % decimals) % np.mean(training_times)
runtime_df.loc[prediction_name, model] = ('%%.%sf' % decimals) % np.mean(prediction_times)
if with_std:
runtime_df.loc[train_name, model] += (' (%%.%sf)' % decimals) % np.std(training_times)
runtime_df.loc[prediction_name, model] += (' (%%.%sf)' % decimals) % np.std(prediction_times)
if to_LaTeX:
df_to_LaTeX(runtime_df)
return runtime_df
def df_to_LaTeX(df):
num_columns = len(df.columns)
print(df.to_latex(
float_format=f'%.{DECIMALS}f',
na_rep='-',
column_format='l' + ''.join('r' * num_columns)
))
def get_color(model):
if 'KF' in model:
return COLORS[0]
elif 'KD-IC' in model:
return COLORS[1]
elif 'DeepAR' in model:
return COLORS[2]
elif 'LW' in model:
return COLORS[3]
else:
return COLORS[4]
def get_linestyle(model):
if '(+W)' in model:
return LINESTYLES[1]
elif '(+WF)' in model:
return LINESTYLES[2]
else:
return LINESTYLES[0]
def _complete_plot(name, legend=True, grid=True):
if legend:
plt.legend()
if grid:
plt.grid()
plt.tight_layout()
plt.savefig(OUT_PATH + f'{name}.pdf', bbox_inches='tight')
plt.close()
def plot_epoch_loss(model, level, cluster, seed=MAIN_SEED):
assert 'DeepAR' in model, "Loss plot only available for deep models"
res = load_res(model, level, cluster, seed)
train_loss = res['train_loss']
val_loss = res['val_loss']
plt.figure(figsize=(6, 4))
plt.plot(np.arange(len(train_loss)) + 1, train_loss, color=COLORS[0], label='Train')
plt.plot(np.arange(len(val_loss)) + 1, val_loss, color=COLORS[1], label='Validation')
plt.ylabel('Loss')
plt.xlabel('Epoch')
_complete_plot(f'{get_file_name(model, level, cluster, seed)}_epoch_loss', grid=False)
def plot_horizon(model, metric, horizons=(1, 2, 3, 4), levels=('L0', 'L1', 'L2')):
results, info = collect_results_per_tstp()
model_W = model + '(+W)'
model_WF = model + '(+WF)'
i = info['metrics'].index(metric)
m = info['models'].index(model)
m_W = info['models'].index(model_W)
m_WF = info['models'].index(model_WF)
score = np.empty(len(horizons))
score_W = np.empty(len(horizons))
score_WF = np.empty(len(horizons))
for h, horizon in enumerate(horizons):
idx = np.arange(0, horizon * S_D)
res = []
res_W = []
res_WF = []
for level in levels:
for c, cluster in enumerate(info['levels'][level]['clusters']):
y_mean = info['levels'][level]['y_mean'][c]
if metric == 'rRMSE':
res.append(100 * np.sqrt(np.mean(results[level][:, i, m, c, :, idx], axis=2)) / y_mean)
res_W.append(100 * np.sqrt(np.mean(results[level][:, i, m_W, c, :, idx], axis=2)) / y_mean)
res_WF.append(100 * np.sqrt(np.mean(results[level][:, i, m_WF, c, :, idx], axis=2)) / y_mean)
else:
res.append(100 * np.mean(results[level][:, i, m, c, :, idx], axis=2) / y_mean)
res_W.append(100 * np.mean(results[level][:, i, m_W, c, :, idx], axis=2) / y_mean)
res_WF.append(100 * np.mean(results[level][:, i, m_WF, c, :, idx], axis=2) / y_mean)
score[h] = np.nanmean(res)
score_W[h] = np.nanmean(res_W)
score_WF[h] = np.nanmean(res_WF)
skill_W = 100 * (1 - score_W / score)
skill_WF = 100 * (1 - score_WF / score)
print(f'SS_{metric} (W): {skill_W}')
print(f'SS_{metric} (WF): {skill_WF}')
plt.figure(figsize=(3.5, 4))
plt.plot(
score,
linestyle=get_linestyle(model),
color=get_color(model),
marker=MARKERS[0]
)
plt.plot(
score_W,
linestyle=get_linestyle(model_W),
color=get_color(model_W),
marker=MARKERS[1]
)
plt.plot(
score_WF,
linestyle=get_linestyle(model_WF),
color=get_color(model_WF),
marker=MARKERS[2]
)
plt.ylim(6.95, 8.35)
plt.ylabel(metric)
plt.xlabel('Horizon')
plt.xticks(np.arange(len(horizons)), np.array(horizons))
plt.title(model)
_complete_plot(f"{model}_{metric}_horizon", grid=False, legend=False)
def plot_reps(metric, levels=('L0', 'L1', 'L2'), models=None, name=None):
results, info = collect_results()
models = info['models'] if models is None else models
i = info['metrics'].index(metric)
# Lines for second legend
_, ax = plt.subplots()
lines = ax.plot([0, 1], [0, 1], '-C7', [0, 1], [0, 2], '--C7')
plt.close()
plt.figure(figsize=(10, 4))
for j, model in enumerate(models):
m = info['models'].index(model)
reps_mean = []
for level in levels:
if level == 'L3' and 'KF' in model:
# No level 3 results for the KF model
continue
for c, cluster in enumerate(info['levels'][level]['clusters']):
reps_mean.append(results[level][i, m, c])
reps_mean = np.mean(reps_mean, axis=0)
plt.plot(
reps_mean,
label=model if '(' not in model else None,
linestyle=get_linestyle(model),
color=get_color(model)
)
plt.ylabel(metric)
plt.xlabel('Forecast origin')
plt.yticks(np.arange(5, 17, 2.5))
t0 = load_res('LW', 'L0', 'Agg')['t0']
ticks = [dt.datetime.strptime(tstp, '%Y-%m-%d, %H:%M').strftime('%b, %d') for tstp in t0[1::5]]
plt.xticks(np.arange(1, len(t0), 5), ticks, rotation=0)
plt.grid(axis='y')
second_legend = plt.legend(lines, ('no weather', 'actual weather'), loc='upper left')
plt.gca().add_artist(second_legend)
_complete_plot(f"{f'{name}_' if name is not None else ''}{metric}_reps", grid=False)
def plot_clusters(level, metric, models=None, name=None):
results, info = collect_results()
models = info['models'] if models is None else models
i = info['metrics'].index(metric)
plt.figure(figsize=(10, 4))
for model in models:
if level == 'L3' and 'KF' in model:
# No level 3 results for the KF model
continue
m = info['models'].index(model)
clusters_mean = np.mean(results[level][i, m], axis=1)
plt.plot(
clusters_mean,
label=model,
linestyle=get_linestyle(model),
color=get_color(model)
)
plt.ylabel(metric)
cluster_labels = [f"{cluster.replace('ACORN-', '')} ({count})" for cluster, count in zip(
info['levels'][level]['clusters'],
info['levels'][level]['cardinality']
)]
if level == 'L3':
plt.xticks(np.arange(0, len(cluster_labels), 100), np.array(cluster_labels)[::100], rotation=90)
elif level == 'L2':
plt.xticks(np.arange(len(cluster_labels)), cluster_labels, rotation=90)
else:
plt.xticks(np.arange(len(cluster_labels)), cluster_labels)
_complete_plot(f"{f'{name}_' if name is not None else ''}{level}_{metric}_clusters")
def plot_aggregate_size(metric, models=None, name=None):
results, info = collect_results()
models = info['models'] if models is None else models
i = info['metrics'].index(metric)
aggregate_sizes = []
errors = {}
bottom_level_errors = {}
for model in models:
errors[model] = []
bottom_level_errors[model] = []
for level, level_info in info['levels'].items():
for c, agg_size in enumerate(level_info['cardinality']):
if level != 'L3':
aggregate_sizes.append(agg_size)
for model in models:
m = info['models'].index(model)
errors[model].append(np.mean(results[level][i, m, c]))
else:
for model in models:
m = info['models'].index(model)
bottom_level_errors[model].append(np.mean(results[level][i, m, c]))
aggregate_sizes.append(1)
for model in models:
errors[model].append(np.mean(bottom_level_errors[model]))
sorted_idx = np.argsort(aggregate_sizes)
aggregate_sizes = np.array(aggregate_sizes)[sorted_idx]
plt.figure(figsize=(6, 4))
for model in models:
if 'CRPS' in metric and model == 'LW':
# No distributional forecasts for LW model
continue
plt.plot(
aggregate_sizes,
np.array(errors[model])[sorted_idx],
label=model,
linestyle=get_linestyle(model),
color=get_color(model)
)
plt.ylabel(metric)
plt.yticks(np.arange(0, 70, 20))
plt.xlabel('\\# aggregated meters')
plt.xscale('log')
plt.xticks([1, 10, 100, 1000], ['1', '10', '100', '1000'])
_complete_plot(f"{f'{name}_' if name is not None else ''}{metric}_aggregate_size", grid=False)
def get_skill_scores(model, metric, no_L3=False):
results, info = collect_results()
i = info['metrics'].index(metric)
m = info['models'].index(model)
m_W = info['models'].index(model + '(+W)')
m_WF = info['models'].index(model + '(+WF)')
aggregate_sizes = []
score = []
score_W = []
score_WF = []
bottom_level_score = []
bottom_level_score_W = []
bottom_level_score_WF = []
t_train = main.train_val_split(data_analysis.energy_df.index)[0]
u = data_analysis.daily(
data_analysis.get_weather_df(forecast=False).loc[t_train, 'temperature'].to_numpy(float),
reduce=True
)
u_F = data_analysis.daily(
data_analysis.get_weather_df(forecast=True).loc[t_train, 'temperature'].to_numpy(float),
reduce=True
)
corr_W = []
corr_WF = []
bottom_level_corr_W = []
bottom_level_corr_WF = []
for level, level_info in info['levels'].items():
if level == 'L3' and ('KF' in model or no_L3):
# No level 3 results for the KF model
continue
for c, (cluster, agg_size) in enumerate(zip(level_info['clusters'], level_info['cardinality'])):
y = data_analysis.daily(
np.array(data_analysis.get_observations_at(level, cluster, t_train)),
reduce=True
)
if level != 'L3':
aggregate_sizes.append(agg_size)
score.append(np.mean(results[level][i, m, c]))
score_W.append(np.mean(results[level][i, m_W, c]))
score_WF.append(np.mean(results[level][i, m_WF, c]))
corr_W.append(data_analysis.correlation(u, y) ** 2)
corr_WF.append(data_analysis.correlation(u_F, y) ** 2)
else:
bottom_level_score.append(np.mean(results[level][i, m, c]))
bottom_level_score_W.append(np.mean(results[level][i, m_W, c]))
bottom_level_score_WF.append(np.mean(results[level][i, m_WF, c]))
bottom_level_corr_W.append(data_analysis.correlation(u, y) ** 2)
bottom_level_corr_WF.append(data_analysis.correlation(u_F, y) ** 2)
if 'KF' not in model and not no_L3:
aggregate_sizes.append(1)
score.append(np.mean(bottom_level_score))
score_W.append(np.mean(bottom_level_score_W))
score_WF.append(np.mean(bottom_level_score_WF))
corr_W.append(np.mean(bottom_level_corr_W))
corr_WF.append(np.mean(bottom_level_corr_WF))
aggregate_sizes = np.array(aggregate_sizes)
score = np.array(score)
score_W = np.array(score_W)
score_WF = np.array(score_WF)
corr_W = np.array(corr_W)
corr_WF = np.array(corr_WF)
skill_W = 100 * (1 - score_W / score)
skill_WF = 100 * (1 - score_WF / score)
return skill_W, skill_WF, aggregate_sizes, corr_W, corr_WF
def plot_aggregate_size_skill(model, metric):
skill_W, skill_WF, aggregate_sizes, _, _ = get_skill_scores(model, metric)
print(f'Correlation (W): {data_analysis.correlation(np.log(aggregate_sizes), skill_W):.3f}')
print(f'Correlation (WF): {data_analysis.correlation(np.log(aggregate_sizes), skill_WF):.3f}')
plt.figure(figsize=(3.5, 4))
plt.plot([1, 2500], [0, 0], color='grey', linestyle='dashed')
plt.scatter(
aggregate_sizes,
skill_W,
label='W',
marker=MARKERS[0],
color=get_color(model),
edgecolors='none'
)
plt.scatter(
aggregate_sizes,
skill_WF,
label='WF',
marker=MARKERS[1],
color=get_color(model),
edgecolors='none',
alpha=0.5
)
plt.ylabel(f'$SS_{{\\mathrm{{{metric}}}}}$')
plt.xlabel('\\# aggregated meters')
plt.xscale('log')
plt.xticks([1, 10, 100, 1000], ['1', '10', '100', '1000'])
plt.title(model)
_complete_plot(f"{model}_{metric}_aggregate_size_skill", grid=False, legend=False)
def plot_temperature_correlation_skill(model, metric):
skill_W, skill_WF, _, corr_W, corr_WF = get_skill_scores(model, metric, no_L3=True)
print(f'Correlation (W): {data_analysis.correlation(corr_W, skill_W):.3f}')
print(f'Correlation (WF): {data_analysis.correlation(corr_WF, skill_WF):.3f}')
plt.figure(figsize=(3.5, 4))
plt.plot(
[min(np.amin(corr_W), np.amin(corr_WF)), max(np.amax(corr_W), np.amax(corr_WF))],
[0, 0],
color='grey',
linestyle='dashed'
)
plt.scatter(
corr_W,
skill_W,
label='W',
marker=MARKERS[0],
color=get_color(model),
edgecolors='none'
)
plt.scatter(
corr_WF,
skill_WF,
label='WF',
marker=MARKERS[1],
color=get_color(model),
edgecolors='none',
alpha=0.5
)
plt.ylabel(f'$SS_{{\\mathrm{{{metric}}}}}$')
plt.xlabel('Temperature corr. [$R^2$]')
plt.title(model)
_complete_plot(f'{model}_{metric}_temperature_correlation_skill', grid=False, legend=False)
def plot_coverage(levels=('L0', 'L1', 'L2'), models=None, name=None):
_, info = collect_results()
models = info['models'] if models is None else models
p = np.linspace(0, 1, 101)
plt.figure(figsize=(3.5, 4))
for j, model in enumerate(models):
pit = []
for level in levels:
if level == 'L3' and 'KF' in model:
# No level 3 results for the KF model
continue
for c, cluster in enumerate(info['levels'][level]['clusters']):
res = load_res(model, level, cluster)
pit += list(np.ravel(res['PIT']))
coverage = np.mean(p[:, np.newaxis] > | np.array(pit) | numpy.array |
import numpy as np
import pgl
import paddle.fluid as fluid
def to_undirected(graph):
""" to_undirected """
inv_edges = np.zeros(graph.edges.shape)
inv_edges[:, 0] = graph.edges[:, 1]
inv_edges[:, 1] = graph.edges[:, 0]
edges = np.vstack((graph.edges, inv_edges))
edges = np.unique(edges, axis=0)
# print(edges.shape)
g = pgl.graph.Graph(num_nodes=graph.num_nodes, edges=edges)
for k, v in graph._node_feat.items():
g._node_feat[k] = v
return g
def add_self_loop(graph):
""" add_self_loop """
self_loop_edges = np.zeros((graph.num_nodes, 2))
self_loop_edges[:, 0] = self_loop_edges[:, 1]=np.arange(graph.num_nodes)
edges = np.vstack((graph.edges, self_loop_edges))
edges = np.unique(edges, axis=0)
# print(edges.shape)
g = pgl.graph.Graph(num_nodes=graph.num_nodes, edges=edges)
for k, v in graph._node_feat.items():
g._node_feat[k] = v
return g
def linear_warmup_decay(learning_rate, warmup_steps, num_train_steps):
""" Applies linear warmup of learning rate from 0 and decay to 0."""
with fluid.default_main_program()._lr_schedule_guard():
lr = fluid.layers.tensor.create_global_var(
shape=[1],
value=0.0,
dtype='float32',
persistable=True,
name="scheduled_learning_rate")
global_step = fluid.layers.learning_rate_scheduler._decay_step_counter()
with fluid.layers.control_flow.Switch() as switch:
with switch.case(global_step < warmup_steps):
warmup_lr = learning_rate * (global_step / warmup_steps)
fluid.layers.tensor.assign(warmup_lr, lr)
with switch.default():
decayed_lr = fluid.layers.learning_rate_scheduler.polynomial_decay(
learning_rate=learning_rate,
decay_steps=num_train_steps,
end_learning_rate=0.0,
power=1.0,
cycle=False)
fluid.layers.tensor.assign(decayed_lr, lr)
return lr, global_step
def add_vnode(graph, num_vnode=1):
""" add_vnode """
num_nodes = graph.num_nodes + num_vnode
src = np.tile(np.arange(num_nodes), [num_vnode, 1]).reshape(-1)
dst = np.tile(np.arange(graph.num_nodes, num_nodes), [num_nodes, 1]).T.reshape(-1)
new_edges = np.stack([src, dst]).T
edges = | np.vstack((graph.edges, new_edges)) | numpy.vstack |
import xarray as xr
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
from matplotlib import image
import random
import os
import math
import argparse
"""
This script plots the results of training and testing with the pix2pix algorithm. It returns the following:
- if only one job added, it evaluates that job by printing the average error overall + in each wind direction, and it plots sum(flux*real) vs sum(flux*fake) (if size_cut == size_output), or sum(real) vs sum(fake) otherwise.
- if more than one job is added, it returns the above for each job plus two comparative graphs:
- % of correctly identified pixels (as white or non white)
- average total error overall
"""
# Initialise parser and define arguments
parser = argparse.ArgumentParser(description='generate pix2pix dataset from netcdf files')
parser.add_argument('--jobname', required=True, nargs='*', help='jobname to evaluate or jobnames to evaluate and compare')
parser.add_argument('--size_cut', type=int, default = 256, help = 'Size file was cut to during data generation')
parser.add_argument('--size_output', type=int, default = 256, help = 'Size of output figure inputted during data generation. If size_cut is not equal to size output, the flux information will not be used.')
args = parser.parse_args()
def rgb2gray(rgb):
# greyscale goes from 0 (black) to 1 (white)
grey = np.dot(rgb[...,:3], [0.2989, 0.5870, 0.1140])
# it is inverted so it goes from 0 (white) to 1 (black)
return abs(grey - 1)
def get_flux(size, met_data): # not functional yet - cutting to crrect size needs fixing
# get release coordinates
fp_data = xr.open_dataset("/work/al18242/ML_summer_2020/MHD-10magl_EUROPE_201801.nc")
# find latitude-longitude datapoints in metdata closest to the release coordinates
# get emissions map
emissions_map = xr.open_dataset("/work/al18242/ML_summer_2020/ch4_EUROPE_2013.nc")
release_lat = min(emissions_map.lat.values, key=lambda x:abs(x-fp_data.release_lat[0]))
release_lon = min(emissions_map.lon.values, key=lambda x:abs(x-fp_data.release_lon[0]))
# find the index of these datapoints and determine boundaries
lat_bound =[int(np.where(emissions_map.lat.values == release_lat)[0][0]-size/2), int(np.where(emissions_map.lat.values == release_lat)[0][0]+size/2)]
lon_bound =[int(np.where(emissions_map.lon.values == release_lon)[0][0]-size/2), int(np.where(emissions_map.lon.values == release_lon)[0][0]+size/2)]
print(lat_bound, lon_bound)
# cut to dimensions
print( | np.shape(emissions_map.flux.values) | numpy.shape |
# ------------------------------------------------------------------------
# MIT License
#
# Copyright (c) [2021] [<NAME>]
#
# This code is part of the library PyDL <https://github.com/nash911/PyDL>
# This code is licensed under MIT license (see LICENSE.txt for details)
# ------------------------------------------------------------------------
import numpy as np
import pickle
import matplotlib.pyplot as plt
from pydl.nn.layers import FC
from pydl.nn.conv import Conv
from pydl.nn.pool import Pool
from pydl.nn.nn import NN
from pydl.training.adam import Adam
def get_data(file_path):
data_files = ['data_batch_1', 'data_batch_2', 'data_batch_3', 'data_batch_4',
'data_batch_5', 'test_batch']
meta_file = 'batches.meta'
data = list()
labels = list()
class_names = None
for df in data_files:
with open(file_path + df, 'rb') as batch_file:
data_dict = pickle.load(batch_file, encoding='bytes')
data.append(np.reshape(data_dict[b'data'], newshape=(-1, 3, 32, 32)))
labels.append(data_dict[b'labels'])
with open(file_path + meta_file, 'rb') as meta_f:
meta_dict = pickle.load(meta_f, encoding='bytes')
class_names = meta_dict[b'label_names']
class_names = [c_name.decode("utf-8") for c_name in class_names]
return np.vstack(data), np.hstack(labels), class_names
def main():
X, y, class_names = get_data('data/CIFAR-10/')
K = | np.max(y) | numpy.max |
import numpy as np
from random import random
from scipy.interpolate import pchip_interpolate
import matplotlib.pyplot as plt
#####################
# STEP 1 # import the data
#####################
# uncomment for a toy example
#n=5 # problem size
#A = 10*np.random.rand(n,n+3) # n stocks with n+3 different prices
#A = np.abs(A)
# load the data: it must be pre-processed - 9:00AM-3:30PM prices - about 62 obs.
A = np.transpose( np.loadtxt('data.txt') ) # it is a tiny example
print('There are ', A.shape[0], ' stocks and ',A.shape[1],'observations.')
stock_number = A.shape[0]
# Important : the rows represent each stock and the columns represent prices at certain moment
#####################
# STEP 2 # interpolate
#####################
### toy example 1
#data = np.array([[0, 4, 12, 27, 47, 60, 79, 87, 99, 100], [-33, -33, -19, -2, 12, 26, 38, 45, 53, 55]])
#xx = np.linspace(0,100,200)
#curve = pchip_interpolate(data[0], data[1],xx)
#plt.plot(xx, curve, "x"); #plt.plot(data[0],data[1],"o"); #plt.show()
### toy example 2
#x = np.arange(A.shape[1])
#m = 200 # number of the final data points
#xx = np.linspace(0,x[-1],200)
#curve1 = pchip_interpolate(x , A[0,:], xx)
#print(curve1.shape, type(curve1)) ; #plt.plot(xx, curve1, "x"); #plt.plot(x,A[0,:],"o") ;#plt.show()
m = 200
curve_save = np.zeros((stock_number,m)) # array created to save the interpolated data
for ii in range(stock_number): # loop through each stock
x = np.arange(A.shape[1]) # the prices
m = 200 # number of the final data points
xx = | np.linspace(0, x[-1], 200) | numpy.linspace |
import numpy as np
import random
import numexpr as ne
def gen_layer(rin, rout, nsize):
R = 1.0
phi = np.random.uniform(0, 2*np.pi, size=(nsize))
costheta = np.random.uniform(-1, 1, size=(nsize))
u = np.random.uniform(rin**3, rout**3, size=(nsize))
theta = np.arccos( costheta )
r = R * np.cbrt( u )
x = r * np.sin( theta ) * np.cos( phi )
y = r * np.sin( theta ) * np.sin( phi )
z = r * np.cos( theta )
return( x, y, z )
def LPFbead(qrange, sigmabead):
'''
Compute the spherical form factor given a range of q values.
Parameters
----------
qrange: numpy.array
array of values in q-space to compute form factor for.
sigmabead: float
diameter of the sphere.
Return
-------
Fqb: numpy.array
array of values of the spherical form factors (F(q)) computed at q-points listed in qrange.
'''
R=np.true_divide(sigmabead,2)
QR=np.multiply(qrange,R)
Fqb=np.multiply(np.true_divide(np.sin(QR)-np.multiply(QR,np.cos(QR)),np.power(QR,3)),3)
return Fqb
def LPOmega(qrange, nAin, nAout, nB, r): # qvalues number_of_B number_of_A scatterer_coordinates
Ntot=nAin+nB+nAout # Total number of scatterers to loop through
omegaarrt=np.zeros((1,len(qrange))) # initiating array
omegaarr=np.zeros((1,len(qrange))) # initiating array
rur=r[0,:,:]# selects
rur=rur.transpose()
for i in range(Ntot-1): # loops through index and all further indexes to prevent double counting
all_disp = rur[i,:]-rur[(i+1):,:]
rij = np.sqrt(np.sum(np.square(all_disp),axis=1))
rij = rij.transpose()
rs = rij[:,np.newaxis] # reshapes array for consistency
Q = qrange[np.newaxis,:] # reshapes array for consistency
vals = ne.evaluate("sin(Q*rs)/(Q*rs)") # ne is efficient at calculations
inds=np.argwhere(np.isnan(vals)) # error catching in case there are NaN values
if len(inds)>0:
for val in inds:
vals[val[0],val[1]]=1
inds_double_check=np.argwhere(np.isnan(vals))
if len(inds_double_check)>0:
print('nan error!')
vals = ne.evaluate("sum((vals), axis=0)") # adds together scatterer contributions for each q value
omegaarr+=vals
omegaarr=np.true_divide(2*omegaarr,Ntot)+1 # 1 accounts for the guarenteed overlap of same bead # 2* accounts for double counting avoided to reduce computational expense by looping for all other pairs
omegaarrt+=omegaarr # stores values between loops
return omegaarrt
def visualize(r, Rcore, dR_Ain, dR_B, dR_Aout, sigmabead):
import py3Dmol
view = py3Dmol.view()
for ri in r[0,:,:].transpose():
if np.linalg.norm(ri) < Rcore+dR_Ain or np.linalg.norm(ri) > (Rcore+dR_Ain+dR_B):
col = 'blue'
else:
col = 'red'
view.addSphere(
{
'center': {'x': ri[0], 'y': ri[1], 'z': ri[2]},
'radius': sigmabead/2,
'color': col,
'alpha': 0.9,
}
)
#view.zoomTo()
view.show()
return view
def genLP(Rcore, dR_Ain, dR_B, dR_Aout, sigmabead, nAin, nAout, nB):
# core radius, inner A layer thickness, B layer thickness, outer A layer thickness,
# bead diameter, # of inner A beads, # of outer A beads, # of B beads
ntot = nAin+nB+nAout
power = 2
r = np.zeros((1, 3, ntot))
types = np.zeros((ntot))
### Create configuration for each replicate with dispersity ###
for step in range(0, 1):
### Populate A inner Layer ###
x, y, z = gen_layer(Rcore, Rcore+dR_Ain, nAin)
for i in range(nAin):
r[0,:,i] = [x[i], y[i], z[i]]
types[i] = 1
### Populate B middle Layer ###
x, y, z = gen_layer(Rcore+dR_Ain, Rcore+dR_Ain+dR_B, nB)
for i in range(nB):
r[0,:,i+nAin] = [x[i], y[i], z[i]]
types[i+nAin] = 2
### Populate A outer Layer ###
x, y, z = gen_layer(Rcore+dR_Ain+dR_B, Rcore+dR_Ain+dR_B+dR_Aout, nAout)
for i in range(nAout):
r[0,:,i+nAin+nB] = [x[i], y[i], z[i]]
types[i+nAin+nB] = 1
return r
class scatterer_generator:
'''
The wrapper class for vesicle shape. Default length unit: Angstrom.
Notes
-----
**The following 7 shape-specific descriptors are to be specified by user (see
*Attributes*) as
a list, in the precise order as listed, while calling `Model.load_shape`
to load this shape:**
num_scatterers:
Number of scatterers used to represent a chain. Default: 24
N:
Number of monomers in a chain. Default: 54
eta_B:
Packing fraction of scatterers in B layer. Default: 0.5
lmono_b:
Diameter of a monomer of chemistry B. Default: 50.4 A
lmono_a:
Diameter of a monomer of chemistry A. Default: 50.4 A
fb:
Fraction of monomers in chain that are of B type. fa = 1-fb. Default: 0.55
nLP:
Number of replicates for each individual. Default: 7
**The following 7 parameters are to be predicted, in the precise order
as listed, by GA:**
R_core:
Core radius. Default [min,max]: [50 A, 400 A]
t_Ain:
Thickness of inner A layer. Default [min,max]: [30 A, 200 A]
t_B:
Thickness of B layer. Default [min,max]: [30 A, 200 A]
t_Aout:
Thickness of outer A layer. Default [min,max]: [30 A, 200 A]
sigma_Ain:
Split of solvophilic scatterers between inner and outer layers.
Default [min,max]: [0.1, 0.45]
sigma_R:
Dispersity in vesicle size as implemented in the core radius.
Default [min,max]: [0.0, 0.45]
log10(bg):
Negative log10 of background intensity.
E.g. an background intensity of 0.001 leads to this value being 3.
Default [min,max]: [0.1,4]
See also
--------
crease_ga.Model.load_shape
'''
def __init__(self,
shape_params = [24,54,0.5,50.4,50.4,0.55,7],
minvalu = (50, 30, 30, 30, 0.1, 0.0, 0.1),
maxvalu = (400, 200, 200, 200, 0.45, 0.45, 4)):
num_scatterers = shape_params[0]
N = shape_params[1]
rho_B = shape_params[2]
lmono_a = shape_params[3]
lmono_b= shape_params[4]
fb = shape_params[5]
nLP = shape_params[6]
self._numvars = 7
self.minvalu = minvalu
self.maxvalu = maxvalu
self.num_scatterers=num_scatterers ## number of scatterers per chain
self.N=N ## Number of beads on chain
self.rho_B=rho_B ## density/volume fraction of beads in B layer
self.lmono_a=lmono_a ## Angstrom 'monomer contour length'
self.lmono_b=lmono_b ## Angstrom 'monomer contour length'
self.MB=np.pi/6*(self.lmono_b)**3 ## volume of B monomer
self.sigmabead= | np.true_divide(self.N*self.lmono_b,self.num_scatterers) | numpy.true_divide |
import tempfile
import shutil
import os
import pandas
import numpy as np
import datetime
import pkg_resources
from unittest import TestCase
from dfs.nba.featurizers import feature_generators
from dfs.nba.featurizers import fantasy_points_fzr, last5games_fzr, nf_stats_fzr, vegas_fzr, \
opp_ffpg_fzr, salary_fzr
class FeaturizersTest(TestCase):
def setUp(self):
# A little test data from the past few years, useful for testing BREF data
testfn = pkg_resources.resource_filename(__name__, 'test.pickle')
self.data = pandas.read_pickle(testfn)
# More recent test data -- necessary for testing external data
recentfn = pkg_resources.resource_filename(__name__, 'recent.pickle')
self.recentdata = pandas.read_pickle(recentfn)
def testDataIntegrity(self):
assert len(self.data) == 10
assert self.data.iloc[0]['bref_id'] == 'gallola01'
assert self.data.iloc[9]['bref_id'] == 'dunlemi02'
assert len(self.recentdata) == 10
assert self.recentdata.iloc[0]['bref_id'] == 'barnema02'
assert self.recentdata.iloc[9]['bref_id'] == 'lawsoty01'
def testDecorator(self):
# Make sure the decorator is properly wrapping functions and turning their list outputs into pandas.Series
for func_name in feature_generators:
assert isinstance(func_name, basestring)
wrapper, columns, live = feature_generators[func_name]
output = wrapper(self.data.iloc[0])
self.assertTrue(isinstance(output, pandas.Series))
self.assertItemsEqual(columns, output.index)
def applyFeaturizer(self, fzr_function, expected_output, use_recent=False):
data = self.recentdata if use_recent else self.data
for integer_index, (_, row) in enumerate(data.iterrows()):
actual_output = fzr_function(row)
for i in range(len(expected_output[integer_index])):
# First check if they're both NaN
if np.isnan(expected_output[integer_index][i]) and | np.isnan(actual_output.iloc[i]) | numpy.isnan |
import numpy as np
import pandas as pd
from pandas import DataFrame
from typing import Tuple
from plotnine import *
import math
def fit_linear_regression(mat_x: np.array, res_vec: np.array) -> Tuple[np.array, np.array]:
"""
Linear Regression solver
Parameters:
:param mat_x: The design matrix X (np array)
:param res_vec: Response Vector y (np array)
Returns: Tuple of the coefficient vector and the singular values of X.
"""
ones_vec = np.ones(mat_x.shape[1]) # vectors of ones
mat_x = np.vstack((ones_vec, mat_x)) # adding ones to the matrix
mat_x_t = mat_x.transpose() # transposing after adding one
return np.linalg.pinv(mat_x_t) @ res_vec, np.linalg.svd(mat_x_t, compute_uv=False)
def predict(x: np.array, coef_v: np.array) -> np.array:
"""
Prediction function
:param x: Design matrix.
:param coef_v: Coefficient vector.
:return: The prediction of the result vector.
"""
return np.dot(x.transpose(), coef_v)
def mse(res_vec: np.array, prediction_vec: np.array) -> float:
"""
Mean Square Error function.
:param res_vec: Response vector.
:param prediction_vec: Prediction vector.
:return: The error.
"""
return (1/float(res_vec.size)) * (np.linalg.norm(prediction_vec - res_vec)**2)
def load_data(path: str) -> np.array:
"""
Loads the data into a matrix (np array).
:param path: The path to the csv of the data.
:return: Data design matrix.
"""
try:
data = pd.read_csv(path)
except FileNotFoundError:
print("FAILED TO FIND THE DATA LOCATION!")
return
except Exception:
print("AN ERROR OCCURRED WHILE LOADING THE DATA!")
return
# filt1 = data['bedrooms'] < 33
filt_non_positive = (data['price'] > 0) & (data['sqft_lot15'] > 0) & (data['sqft_living'] > 0) & \
(data['floors'] > 0)
filt_condition = (data['condition'] <= 5) & (data['condition'] >= 1)
filt_year_built = data['yr_built'] <= 2015
filt_date = data['date'].notnull()
filt_id = data['id'].notnull()
data = data.loc[filt_non_positive & filt_condition & filt_year_built & filt_date & filt_id] # apply filters
data = data.drop_duplicates() # drop duplicates
data = categorical_features(data) # address categorical features
data = data.drop(['id', 'date', 'zipcode'], axis=1) # drop the categorical columns and the id
# data.to_csv("./lol.csv") # save csv for myself
return data
def categorical_features(data: np.array) -> np.array:
"""
Addressing the categorical features with one hot encoding solution.
:param data: The data in a form of an np array.
:return: The processed data.
"""
# addressing zip code (One hot encoding)
zips = data['zipcode']
data = pd.concat([data, pd.get_dummies(zips)], axis=1)
# addressing dates (Cleaning + One hot encoding)
dates = data['date']
dates = pd.concat([dates.str.slice(0, 4), dates.str.slice(4, 6), dates.str.slice(6, 8)], axis=1)
dates.columns = ['year', 'month', 'day'] # renaming the columns for easier access
year, month, day = dates['year'], dates['month'], dates['day']
data = pd.concat([data, pd.get_dummies(year)], axis=1)
data = pd.concat([data, pd.get_dummies(month)], axis=1)
data = pd.concat([data, pd.get_dummies(day)], axis=1)
return data
def plot_singular_values(singular_values: iter):
"""
Given some singular values plots the scree plot.
:param singular_values: Singular values collection
:return: ggplot.
"""
y = singular_values
y.sort()
y = y[::-1]
x = [index for index in range(1, len(singular_values) + 1)]
df = DataFrame({'x': x, 'y': y})
return ggplot(df, aes(x='x', y='y')) + geom_point(size=1) + geom_line() + \
ggtitle("Scree plot of the singular values") + \
labs(y="Singular value", x="Component Number")
def question_15(data):
"""
loading the data and plotting the singular values
:return: plot of the singular values (scree plot)
"""
data = data.drop(['price'], axis=1) # drop price
data_np = data.transpose()
ones_vec = np.ones(data_np.shape[1]) # vectors of ones
mat_x = np.vstack((ones_vec, data_np)) # adding ones to the matrix
mat_x_t = mat_x.transpose() # transposing after adding one
singulars = np.linalg.svd(mat_x_t, compute_uv=False)
return plot_singular_values(singulars)
def split_data_train_and_test(data):
"""
Splits the data into train and test-sets randomly, such that the size
of the test set is 1/4 of the total data, and 3/4 of the data as training data.
:param data: Not splitted data.
:return: Splitted data.
"""
total_data = len(data)
np.random.seed(7)
msk = np.random.rand(total_data) < 0.75
train = data[msk]
test = data[~msk]
return train, test
def question_16(data):
training_data, testing_data = split_data_train_and_test(data)
real_price_vec = testing_data['price']
testing_data = testing_data.drop(['price'], axis=1)
testing_data = testing_data.transpose()
ones_vec = np.ones(testing_data.shape[1]) # vectors of ones
testing_data = np.vstack((ones_vec, testing_data)) # adding ones to the matrix
price_vector = training_data['price']
training_data = training_data.drop(['price'], axis=1)
mses = []
for i in range(1, 101):
train_number = i / 100
rows = math.floor(train_number*len(training_data))
mat_x = training_data[:math.floor(train_number*len(training_data))]
mat_x = mat_x.transpose()
w, singulars = fit_linear_regression(mat_x, price_vector[:rows])
pred = predict(testing_data, w)
mses.append(mse(real_price_vec, pred))
return mses
def plot_results(res):
"""
plots the MSE over the test set as a function of p%
:param res: results.
:return: plot
"""
x = [index for index in range(1, 101)]
df = DataFrame({'x': x, 'y': res})
return ggplot(df, aes(x='x', y='y')) + geom_point(size=1) + geom_line() + \
ggtitle("MSE over the test set as a function of p%") + \
labs(y="MSE", x="p% (precent of the data trained)")
def plot_scatter_features_values(vector_1, res_v, name):
"""
plots the non categorical features to the screen.
:param vector_1: the vector of the feature.
:param res_v: the price vector.
:param name: the name of the feature.
:return: a plot.
"""
cov_mat = | np.cov(vector_1, res_v, ddof=1) | numpy.cov |
import sys
from typing import List
import numpy as np
import math
#from memory_profiler import profile
#from pypcd import pypcd
from pypcd import pypcd
from config import CAMERA_ICL, PM, MAP_IP
import cv2
import read_office
import evaluate_ate
import evaluate_rpe
from plane_extraction import *
import open3d as o3d
import os
import subprocess as sp
import argparse
from mrob.mrob import FGraph, geometry, registration, LM
def rotation_matrix_to_quaternion(r_matrix):
# First row of the rotation matrix
r00 = r_matrix[0, 0]
r01 = r_matrix[0, 1]
r02 = r_matrix[0, 2]
# Second row of the rotation matrix
r10 = r_matrix[1, 0]
r11 = r_matrix[1, 1]
r12 = r_matrix[1, 2]
# Third row of the rotation matrix
r20 = r_matrix[2, 0]
r21 = r_matrix[2, 1]
r22 = r_matrix[2, 2]
tr = r00 + r11 + r22
if tr > 0:
s = math.sqrt(tr+1.0) * 2
qw = 0.25 * s
qx = (r21 - r12) / s
qy = (r02 - r20) / s
qz = (r10 - r01) / s
elif r00 > r11 and r00 > r22:
s = math.sqrt(1.0 + r00 - r11 - r22) * 2
qw = (r21 - r12) / s
qx = 0.25 * s
qy = (r01 + r10) / s
qz = (r02 + r20) / s
elif r11 > r22:
s = math.sqrt(1.0 + r11 - r00 - r22) * 2
qw = (r02 - r20) / s
qx = (r01 + r10) / s
qy = 0.25 * s
qz = (r12 + r21) / s
else:
s = math.sqrt(1.0 + r22 - r00 - r11) * 2
qw = (r10 - r01) / s
qx = (r02 + r20) / s
qy = (r12 + r21) / s
qz = 0.25 * s
q = [qx, qy, qz, qw]
return q
def image_processing(function, depth_annot, camera_intrinsics, map_indx_points, planes, func_indx, planes_matcher):
points_of_images = []
colors_of_images = []
matrix_v = None
i = 0
for image, depth in depth_annot:
matrix_color = cv2.imread(image, cv2.IMREAD_COLOR)
matrix_depth = cv2.imread(depth, cv2.IMREAD_ANYDEPTH)
print(image)
if matrix_v is None:
rows, columns, _ = matrix_color.shape
columns_indices = np.arange(columns)
matrix_v = np.tile(columns_indices, (rows, 1))
matrix_u = np.transpose(np.tile(np.arange(rows), (columns, 1)))
matrix_xyz = convert_from_plane_to_3d(
matrix_u,
matrix_v,
matrix_depth,
camera_intrinsics
) # getting xyz coordinates of each point
points_of_image = matrix_xyz.reshape(-1, matrix_xyz.shape[2]) # now we have a list of points
colors_of_image = matrix_color.reshape(-1, matrix_color.shape[2])
points_of_images.append(points_of_image)
colors_of_images.append(colors_of_image)
if function != None:
if func_indx == 1:
function(points_of_image, colors_of_image, planes_matcher, map_indx_points)
elif func_indx == 2:
function(points_of_image, colors_of_image, planes_matcher, map_indx_points, planes)
elif func_indx == 3:
if i == 0:
indx, cur_planes = function(points_of_image, colors_of_image, map_indx_points, 0, None)
planes.append(cur_planes)
else:
indx, cur_planes = function(points_of_image, colors_of_image, map_indx_points, indx, planes[i-1])
planes.append(cur_planes)
i += 1
return points_of_images, colors_of_images
def image_processing_office(function, colors_files, depths_files, camera_intrinsics,
map_indx_points,
planes,
planes_matcher,
first_or_sec):
points_of_images = []
colors_of_images = []
for i, image in enumerate(colors_files):
colors_of_image = cv2.imread(image, cv2.IMREAD_COLOR)
colors_of_image = colors_of_image.reshape(-1, colors_of_image.shape[2])
colors_of_images.append(colors_of_image)
points_of_image = read_office.getting_points(i, depths_files, camera_intrinsics)
points_of_images.append(points_of_image)
print(image)
if first_or_sec == 1:
function(points_of_image, colors_of_image, planes_matcher, map_indx_points)
elif first_or_sec == 2:
function(points_of_image, colors_of_image, planes_matcher, map_indx_points, planes)
return points_of_images, colors_of_images
def point_cloud_processing(data_directory, map_indx_points, planes, indx_to_color):
folders = os.listdir(data_directory)
points_of_images = []
colors_of_images = []
annot_of_images = []
set_of_generated_colors = set()
set_of_generated_colors.add("0#0#0")
planes_global_indx = 0
true_colors = []
for j, folder in enumerate(folders):
cur_path = os.path.join(data_directory, folder)
npy, pcd = os.listdir(cur_path)
print(npy)
annot_of_image = np.load(os.path.join(cur_path, npy))
path_to_pc = os.path.join(cur_path, pcd)
pc = o3d.io.read_point_cloud(path_to_pc)
pc_of_image = np.asarray(pc.points)/1000
pc = None
# pc = o3d.geometry.PointCloud()
# pc.points = o3d.utility.Vector3dVector(pc_of_image)
# o3d.visualization.draw_geometries([pc])
colors_of_image = np.zeros(pc_of_image.shape)
annot_unique = np.unique(annot_of_image, axis=0)
# for indx in annot_unique:
# indices = np.where((annot_of_image == indx))
# cur_color = generate_color(set_of_generated_colors)
# if indx != 1:
# colors_of_image[indices[0]] = cur_color
# colors_of_images.append(np.asarray(colors_of_image))
# if function_indx == 1:
# building_maps_frontend(pc_of_image, annot_of_image, planes_matcher, map_indx_points)
points_of_images.append(pc_of_image)
annot_of_images.append(annot_of_image)
colors_of_images.append(colors_of_image)
if j == 0:
planes_global_indx, cur_planes, cur_true_colors = planes_extraction_for_frontend(pc_of_image, annot_of_image,
planes_global_indx, map_indx_points, None, indx_to_color, set_of_generated_colors)
planes.append(cur_planes)
true_colors.append(cur_true_colors)
# pc = o3d.geometry.PointCloud()
# pc.points = o3d.utility.Vector3dVector(pc_of_image)
# pc.colors = o3d.utility.Vector3dVector(cur_true_colors.astype(np.float64) / 255.0)
# o3d.visualization.draw_geometries([pc])
else:
planes_global_indx, cur_planes, cur_true_colors = planes_extraction_for_frontend(pc_of_image, annot_of_image,
planes_global_indx, map_indx_points, planes[j-1], indx_to_color, set_of_generated_colors)
planes.append(cur_planes)
true_colors.append(cur_true_colors)
# pc = o3d.geometry.PointCloud()
# pc.points = o3d.utility.Vector3dVector(pc_of_image)
# pc.colors = o3d.utility.Vector3dVector(cur_true_colors.astype(np.float64) / 255.0)
# o3d.visualization.draw_geometries([pc])
# o3d.visualization.draw_geometries([pc])
return np.asarray(points_of_images), np.asarray(true_colors), | np.asarray(annot_of_images) | numpy.asarray |
import json
import os
from datetime import date
from datetime import datetime
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from scipy.stats import ks_2samp
from sklearn.metrics import roc_auc_score, accuracy_score
from statistics_utils import kl_divergence
def compute_parameter_size(feature_extractor_architecture):
all_num_param = 0
for archi in feature_extractor_architecture:
for i in range(1, len(archi)):
all_num_param += archi[i] * archi[i - 1]
print(f"{archi} has # of parameters:{all_num_param}")
return all_num_param
def create_id_from_hyperparameters(hyper_parameter_dict):
hyper_param_list = [key + str(value) for key, value in hyper_parameter_dict.items()]
return "_".join(hyper_param_list)
def get_latest_timestamp(timestamped_file_name, folder):
timestamp_list = []
for filename in os.listdir(folder):
if filename.startswith(timestamped_file_name):
maybe_timestamp = filename.split("_")[-1]
# print("[DEBUG] [get_latest_timestamp()] maybe_timestamp: ", maybe_timestamp)
if maybe_timestamp.endswith(".json"):
timestamp = int(maybe_timestamp.split(".")[0])
else:
timestamp = int(maybe_timestamp)
timestamp_list.append(timestamp)
timestamp_list.sort()
latest_timestamp = timestamp_list[-1]
return latest_timestamp
def get_timestamp():
return int(datetime.utcnow().timestamp())
def get_current_date():
return date.today().strftime("%Y%m%d")
def save_dann_experiment_result(root, task_id, param_dict, metric_dict, timestamp):
task_folder = task_id
task_root_folder = os.path.join(root, task_folder)
if not os.path.exists(task_root_folder):
os.makedirs(task_root_folder)
result_dict = dict()
result_dict["lr_param"] = param_dict
result_dict["metrics"] = metric_dict
file_name = "dann_exp_result_" + str(timestamp) + '.json'
file_full_name = os.path.join(task_root_folder, file_name)
with open(file_full_name, 'w') as outfile:
json.dump(result_dict, outfile)
def load_dann_experiment_result(root, task_id, timestamp=None):
task_folder = "task_" + task_id
task_folder_path = os.path.join(root, task_folder)
if not os.path.exists(task_folder_path):
raise FileNotFoundError(f"{task_folder_path} is not found.")
experiment_result = "dann_exp_result"
if timestamp is None:
timestamp = get_latest_timestamp(experiment_result, task_folder_path)
print(f"[INFO] get latest timestamp {timestamp}")
experiment_result_file_name = str(experiment_result) + "_" + str(timestamp) + '.json'
experiment_result_file_path = os.path.join(task_folder_path, experiment_result_file_name)
if not os.path.exists(experiment_result_file_path):
raise FileNotFoundError(f"{experiment_result_file_path} is not found.")
with open(experiment_result_file_path) as json_file:
print(f"[INFO] load experiment result file from {experiment_result_file_path}")
dann_exp_result_dict = json.load(json_file)
return dann_exp_result_dict
def test_classifier(model, data_loader, tag):
print(f"---------- {tag} classification ----------")
correct = 0
n_total = 0
y_pred_list = []
y_real_list = []
y_pos_pred_prob_list = []
model.change_to_eval_mode()
for batch_idx, (data, label) in enumerate(data_loader):
label = label.flatten()
n_total += len(label)
batch_corr, y_pred, pos_y_prob = model.calculate_classifier_correctness(data, label)
correct += batch_corr
y_real_list += label.tolist()
y_pred_list += y_pred.tolist()
y_pos_pred_prob_list += pos_y_prob.tolist()
acc = correct / n_total
auc_0 = roc_auc_score(y_real_list, y_pred_list)
auc_1 = roc_auc_score(y_real_list, y_pos_pred_prob_list)
get_ks = lambda y_pred, y_true: ks_2samp(y_pred[y_true == 1], y_pred[y_true != 1]).statistic
ks = get_ks( | np.array(y_pos_pred_prob_list) | numpy.array |
"""
Soft-Encoding Utils
+ Convert ab channels to categorical data as Zhang et al. paper
References:
+ https://github.com/foamliu/Colorful-Image-Colorization
"""
import numpy as np
import sklearn.neighbors as nn
class ColorizedSoftEncoding(object):
"""
Class Convert Channeld ab in Lab to Categorical Data as Zhang et al. paper
pts_in_hull.npy --> array of pts in colorspaces ab for categorical data
(shape: (??, 2))
Usage:
soft_encoding = ColorizedSoftEncoding(pts_in_hull_path = "pts_in_hull.npy",
nb_neighbors = 5, sigma_neighbor = 5)
image_Lab = read_image(...)["org_image_Lab"]
y = soft_encoding(image_Lab);
"""
def __init__(self, pts_in_hull_path, nb_neighbors = 5, sigma_neighbor = 5):
self.pts_in_hull_path = pts_in_hull_path
self.nb_neighbors = nb_neighbors
self.sigma_neighbor = sigma_neighbor
self.q_ab, self.nn_finder = load_nn_finder(self.pts_in_hull_path, self.nb_neighbors)
self.nb_q = self.q_ab.shape[0]
pass
# __init__
def __call__(self, image_Lab):
self.input = image_Lab
self.output = get_soft_encoding(self.input, self.nn_finder, self.nb_q, self.sigma_neighbor)
return self.output
# __call__
# ColorizedSoftEncoding
def load_nn_finder(pts_in_hull_path, nb_neighbors = 5):
# Load the array of quantized ab value
q_ab = np.load(pts_in_hull_path)
nn_finder = nn.NearestNeighbors(n_neighbors=nb_neighbors, algorithm='ball_tree').fit(q_ab)
return q_ab, nn_finder
# load_nn_finder
def get_soft_encoding(image_Lab, nn_finder, nb_q, sigma_neighbor = 5):
"""
image_Lab = read_image("...")["res_image_Lab"]
q_ab, nn_finder = load_nn_finder("pts_in_hull.npy", nb_neighbors = 5)
y = get_soft_encoding(image_Lab, nn_finder, nb_q = q_ab.shape[0], sigma_neighbor = 5)
"""
# get and normalize image_ab
# due to preprocessing weighted with minus 128
image_ab = image_Lab[:, :, 1:].astype(np.int32) - 128
h, w = image_ab.shape[:2]
a = np.ravel(image_ab[:, :, 0])
b = np.ravel(image_ab[:, :, 1])
ab = np.vstack((a, b)).T
# Get the distance to and the idx of the nearest neighbors
dist_neighb, idx_neigh = nn_finder.kneighbors(ab)
# Smooth the weights with a gaussian kernel
wts = np.exp(-dist_neighb ** 2 / (2 * sigma_neighbor ** 2))
wts = wts / np.sum(wts, axis=1)[:, np.newaxis]
# format the target
y = np.zeros((ab.shape[0], nb_q))
idx_pts = | np.arange(ab.shape[0]) | numpy.arange |
"""TNQMetro: Tensor-network based package for efficient quantum metrology computations."""
# Table of Contents
#
# 1 Functions for finite size systems......................................29
# 1.1 High level functions...............................................37
# 1.2 Low level functions...............................................257
# 1.2.1 Problems with exact derivative.............................1207
# 1.2.2 Problems with discrete approximation of the derivative.....2411
# 2 Functions for infinite size systems..................................3808
# 2.1 High level functions.............................................3816
# 2.2 Low level functions..............................................4075
# 3 Auxiliary functions..................................................5048
import itertools
import math
import warnings
import numpy as np
from ncon import ncon
########################################
# #
# #
# 1 Functions for finite size systems. #
# #
# #
########################################
#############################
# #
# 1.1 High level functions. #
# #
#############################
def fin(N, so_before_list, h, so_after_list, BC='O', L_ini=None, psi0_ini=None, imprecision=10**-2, D_L_max=100, D_L_max_forced=False, L_herm=True, D_psi0_max=100, D_psi0_max_forced=False):
"""
Optimization of the QFI over operator L (in MPO representation) and wave function psi0 (in MPS representation) and check of convergence in their bond dimensions. Function for finite size systems.
User has to provide information about the dynamics by specifying the quantum channel. It is assumed that the quantum channel is translationally invariant and is built from layers of quantum operations.
User has to provide one defining operation for each layer as a local superoperator. These local superoperators have to be input in order of their action on the system.
Parameter encoding is a stand out quantum operation. It is assumed that the parameter encoding acts only once and is unitary so the user has to provide only its generator h.
Generator h has to be diagonal in computational basis, or in other words, it is assumed that local superoperators are expressed in the eigenbasis of h.
Parameters:
N: integer
Number of sites in the chain of tensors (usually number of particles).
so_before_list: list of ndarrays of a shape (d**(2*k),d**(2*k)) where k describes on how many sites a particular local superoperator acts
List of local superoperators (in order) which act before unitary parameter encoding.
h: ndarray of a shape (d,d)
Generator of unitary parameter encoding. Dimension d is the dimension of local Hilbert space (dimension of physical index).
Generator h has to be diagonal in the computational basis, or in other words, it is assumed that local superoperators are expressed in the eigenbasis of h.
so_after_list: list of ndarrays of a shape (d**(2*k),d**(2*k)) where k describes on how many sites particular local superoperator acts
List of local superoperators (in order) which act after unitary parameter encoding.
BC: 'O' or 'P', optional
Boundary conditions, 'O' for OBC, 'P' for PBC.
L_ini: list of length N of ndarrays of a shape (Dl_L,Dr_L,d,d) for OBC (Dl_L, Dr_L can vary between sites) or ndarray of a shape (D_L,D_L,d,d,N) for PBC, optional
Initial MPO for L.
psi0_ini: list of length N of ndarrays of a shape (Dl_psi0,Dr_psi0,d) for OBC (Dl_psi0, Dr_psi0 can vary between sites) or ndarray of a shape (D_psi0,D_psi0,d,N) for PBC, optional
Initial MPS for psi0.
imprecision: float, optional
Expected relative imprecision of the end results.
D_L_max: integer, optional
Maximal value of D_L (D_L is bond dimension for MPO representing L).
D_L_max_forced: bool, optional
True if D_L_max has to be reached, otherwise False.
L_herm: bool, optional
True if Hermitian gauge has to be imposed on MPO representing L, otherwise False.
D_psi0_max: integer, optional
Maximal value of D_psi0 (D_psi0 is bond dimension for MPS representing psi0).
D_psi0_max_forced: bool, optional
True if D_psi0_max has to be reached, otherwise False.
Returns:
result: float
Optimal value of figure of merit.
result_m: ndarray
Matrix describing the figure of merit as a function of bond dimensions of respectively L [rows] and psi0 [columns].
L: list of length N of ndarrays of a shape (Dl_L,Dr_L,d,d) for OBC (Dl_L, Dr_L can vary between sites) or ndarray of a shape (D_L,D_L,d,d,N) for PBC
Optimal L in MPO representation.
psi0: list of length N of ndarrays of a shape (Dl_psi0,Dr_psi0,d) for OBC (Dl_psi0, Dr_psi0 can vary between sites) or ndarray of a shape (D_psi0,D_psi0,d,N) for PBC
Optimal psi0 in MPS representation.
"""
if np.linalg.norm(h - np.diag(np.diag(h))) > 10**-10:
warnings.warn('Generator h have to be diagonal in computational basis, or in other words it is assumed that local superoperators are expressed in the eigenbasis of h.')
d = np.shape(h)[0]
ch = fin_create_channel(N, d, BC, so_before_list + so_after_list)
ch2 = fin_create_channel_derivative(N, d, BC, so_before_list, h, so_after_list)
result, result_m, L, psi0 = fin_gen(N, d, BC, ch, ch2, None, L_ini, psi0_ini, imprecision, D_L_max, D_L_max_forced, L_herm, D_psi0_max, D_psi0_max_forced)
return result, result_m, L, psi0
def fin_gen(N, d, BC, ch, ch2, epsilon=None, L_ini=None, psi0_ini=None, imprecision=10**-2, D_L_max=100, D_L_max_forced=False, L_herm=True, D_psi0_max=100, D_psi0_max_forced=False):
"""
Optimization of the figure of merit (usually interpreted as the QFI) over operator L (in MPO representation) and wave function psi0 (in MPS representation) and check of convergence when increasing their bond dimensions. Function for finite size systems.
User has to provide information about the dynamics by specifying a quantum channel ch and its derivative ch2 (or two channels separated by small parameter epsilon) as superoperators in MPO representation.
There are no constraints on the structure of the channel but the complexity of calculations highly depends on the channel's bond dimension.
Parameters:
N: integer
Number of sites in the chain of tensors (usually number of particles).
d: integer
Dimension of local Hilbert space (dimension of physical index).
BC: 'O' or 'P'
Boundary conditions, 'O' for OBC, 'P' for PBC.
ch: list of length N of ndarrays of a shape (Dl_ch,Dr_ch,d**2,d**2) for OBC (Dl_ch, Dr_ch can vary between sites) or ndarray of a shape (D_ch,D_ch,d**2,d**2,N) for PBC
Quantum channel as a superoperator in MPO representation.
ch2: list of length N of ndarrays of a shape (Dl_ch2,Dr_ch2,d**2,d**2) for OBC (Dl_ch2, Dr_ch2 can vary between sites) or ndarray of a shape (D_ch2,D_ch2,d**2,d**2,N) for PBC
Interpretiaon depends on whether epsilon is specifed (2) or not (1, default approach):
1) derivative of the quantum channel as a superoperator in the MPO representation,
2) the quantum channel as superoperator in the MPO representation for the value of estimated parameter shifted by epsilon in relation to ch.
epsilon: float, optional
If specified then interpeted as value of a separation between estimated parameters encoded in ch and ch2.
L_ini: list of length N of ndarrays of a shape (Dl_L,Dr_L,d,d) for OBC (Dl_L, Dr_L can vary between sites) or ndarray of a shape (D_L,D_L,d,d,N) for PBC, optional
Initial MPO for L.
psi0_ini: list of length N of ndarrays of a shape (Dl_psi0,Dr_psi0,d) for OBC (Dl_psi0, Dr_psi0 can vary between sites) or ndarray of a shape (D_psi0,D_psi0,d,N) for PBC, optional
Initial MPS for psi0.
imprecision: float, optional
Expected relative imprecision of the end results.
D_L_max: integer, optional
Maximal value of D_L (D_L is bond dimension for MPO representing L).
D_L_max_forced: bool, optional
True if D_L_max has to be reached, otherwise False.
L_herm: bool, optional
True if the Hermitian gauge has to be imposed on MPO representing L, otherwise False.
D_psi0_max: integer, optional
Maximal value of D_psi0 (D_psi0 is bond dimension for MPS representing psi0).
D_psi0_max_forced: bool, optional
True if D_psi0_max has to be reached, otherwise False.
Returns:
result: float
Optimal value of the figure of merit.
result_m: ndarray
Matrix describing the figure of merit as a function of bond dimensions of respectively L [rows] and psi0 [columns].
L: list of length N of ndarrays of a shape (Dl_L,Dr_L,d,d) for OBC (Dl_L, Dr_L can vary between sites) or ndarray of a shape (D_L,D_L,d,d,N) for PBC
Optimal L in MPO representation.
psi0: list of length N of ndarrays of a shape (Dl_psi0,Dr_psi0,d) for OBC (Dl_psi0, Dr_psi0 can vary between sites) or ndarray of a shape (D_psi0,D_psi0,d,N) for PBC
Optimal psi0 in MPS representation.
"""
if epsilon is None:
result, result_m, L, psi0 = fin_FoM_FoMD_optbd(N, d, BC, ch, ch2, L_ini, psi0_ini, imprecision, D_L_max, D_L_max_forced, L_herm, D_psi0_max, D_psi0_max_forced)
else:
result, result_m, L, psi0 = fin2_FoM_FoMD_optbd(N, d, BC, ch, ch2, epsilon, L_ini, psi0_ini, imprecision, D_L_max, D_L_max_forced, L_herm, D_psi0_max, D_psi0_max_forced)
return result, result_m, L, psi0
def fin_state(N, so_before_list, h, so_after_list, rho0, BC='O', L_ini=None, imprecision=10**-2, D_L_max=100, D_L_max_forced=False, L_herm=True):
"""
Optimization of the QFI over operator L (in MPO representation) and check of convergence when increasing its bond dimension. Function for finite size systems and fixed state of the system.
User has to provide information about the dynamics by specifying a quantum channel. It is assumed that the quantum channel is translationally invariant and is built from layers of quantum operations.
User has to provide one defining operation for each layer as a local superoperator. Those local superoperator have to be input in order of their action on the system.
Parameter encoding is a stand out quantum operation. It is assumed that parameter encoding acts only once and is unitary so the user has to provide only its generator h.
Generator h has to be diagonal in the computational basis, or in other words it is assumed that local superoperators are expressed in the eigenbasis of h.
Parameters:
N: integer
Number of sites in the chain of tensors (usually number of particles).
so_before_list: list of ndarrays of a shape (d**(2*k),d**(2*k)) where k describes on how many sites particular local superoperator acts
List of local superoperators (in order) which act before unitary parameter encoding.
h: ndarray of a shape (d,d)
Generator of unitary parameter encoding. Dimension d is the dimension of local Hilbert space (dimension of physical index).
Generator h have to be diagonal in computational basis, or in other words it is assumed that local superoperators are expressed in the eigenbasis of h.
so_after_list: list of ndarrays of a shape (d**(2*k),d**(2*k)) where k describes on how many sites particular local superoperator acts
List of local superoperators (in order) which act after unitary parameter encoding.
rho0: list of length N of ndarrays of a shape (Dl_rho0,Dr_rho0,d,d) for OBC (Dl_rho0, Dr_rho0 can vary between sites) or ndarray of a shape (D_rho0,D_rho0,d,d,N) for PBC
Density matrix describing initial state of the system in MPO representation.
BC: 'O' or 'P', optional
Boundary conditions, 'O' for OBC, 'P' for PBC.
L_ini: list of length N of ndarrays of shape (Dl_L,Dr_L,d,d) for OBC, (Dl_L, Dr_L can vary between sites) or ndarray of shape (D_L,D_L,d,d,N) for PBC, optional
Initial MPO for L.
imprecision: float, optional
Expected relative imprecision of the end results.
D_L_max: integer, optional
Maximal value of D_L (D_L is bond dimension for MPO representing L).
D_L_max_forced: bool, optional
True if D_L_max has to be reached, otherwise False.
L_herm: bool, optional
True if Hermitian gauge has to be imposed on MPO representing L, otherwise False.
Returns:
result: float
Optimal value of figure of merit.
result_v: ndarray
Vector describing figure of merit in function of bond dimensions of L.
L: list of length N of ndarrays of a shape (Dl_L,Dr_L,d,d) for OBC (Dl_L, Dr_L can vary between sites) or ndarray of a shape (D_L,D_L,d,d,N) for PBC
Optimal L in the MPO representation.
"""
if np.linalg.norm(h - np.diag(np.diag(h))) > 10**-10:
warnings.warn('Generator h have to be diagonal in computational basis, or in other words it is assumed that local superoperators are expressed in the eigenbasis of h.')
d = np.shape(h)[0]
ch = fin_create_channel(N, d, BC, so_before_list + so_after_list)
ch2 = fin_create_channel_derivative(N, d, BC, so_before_list, h, so_after_list)
rho = channel_acting_on_operator(ch, rho0)
rho2 = channel_acting_on_operator(ch2, rho0)
result, result_v, L = fin_state_gen(N, d, BC, rho, rho2, None, L_ini, imprecision, D_L_max, D_L_max_forced, L_herm)
return result, result_v, L
def fin_state_gen(N, d, BC, rho, rho2, epsilon=None, L_ini=None, imprecision=10**-2, D_L_max=100, D_L_max_forced=False, L_herm=True):
"""
Optimization of the the figure of merit (usually interpreted as the QFI) over operator L (in MPO representation) and check of convergence when increasing its bond dimension. Function for finite size systems and fixed state of the system.
User has to provide information about the dynamics by specifying a quantum channel ch and its derivative ch2 (or two channels separated by small parameter epsilon) as superoperators in the MPO representation.
There are no constraints on the structure of the channel but the complexity of calculations highly depends on channel's bond dimension.
Parameters:
N: integer
Number of sites in the chain of tensors (usually number of particles).
d: integer
Dimension of local Hilbert space (dimension of physical index).
BC: 'O' or 'P'
Boundary conditions, 'O' for OBC, 'P' for PBC.
rho: list of length N of ndarrays of a shape (Dl_rho,Dr_rho,d,d) for OBC (Dl_rho, Dr_rho can vary between sites) or ndarray of a shape (D_rho,D_rho,d,d,N) for PBC
Density matrix at the output of the quantum channel in the MPO representation.
rho2: list of length N of ndarrays of a shape (Dl_rho2,Dr_rho2,d,d) for OBC (Dl_rho2, Dr_rho2 can vary between sites) or ndarray of a shape (D_rho2,D_rho2,d,d,N) for PBC
Interpretaion depends on whether epsilon is specifed (2) or not (1, default approach):
1) derivative of density matrix at the output of quantum channel in MPO representation,
2) density matrix at the output of quantum channel in MPO representation for the value of estimated parameter shifted by epsilon in relation to rho.
epsilon: float, optional
If specified then it is interpeted as the value of separation between estimated parameters encoded in rho and rho2.
L_ini: list of length N of ndarrays of a shape (Dl_L,Dr_L,d,d) for OBC (Dl_L, Dr_L can vary between sites) or ndarray of a shape (D_L,D_L,d,d,N) for PBC, optional
Initial MPO for L.
imprecision: float, optional
Expected relative imprecision of the end results.
D_L_max: integer, optional
Maximal value of D_L (D_L is bond dimension for MPO representing L).
D_L_max_forced: bool, optional
True if D_L_max has to be reached, otherwise False.
L_herm: bool, optional
True if Hermitian gauge has to be imposed on MPO representing L, otherwise False.
Returns:
result: float
Optimal value of figure of merit.
result_v: ndarray
Vector describing figure of merit as a function of bond dimensions of L.
L: list of length N of ndarrays of a shape (Dl_L,Dr_L,d,d) for OBC (Dl_L, Dr_L can vary between sites) or ndarray of a shape (D_L,D_L,d,d,N) for PBC
Optimal L in MPO representation.
"""
if epsilon is None:
result, result_v, L = fin_FoM_optbd(N, d, BC, rho, rho2, L_ini, imprecision, D_L_max, D_L_max_forced, L_herm)
else:
result, result_v, L = fin2_FoM_optbd(N, d, BC, rho, rho2, epsilon, L_ini, imprecision, D_L_max, D_L_max_forced, L_herm)
return result, result_v, L
############################
# #
# 1.2 Low level functions. #
# #
############################
def fin_create_channel(N, d, BC, so_list, tol=10**-10):
"""
Creates MPO for a superoperator describing translationally invariant quantum channel from list of local superoperators. Function for finite size systems.
For OBC, tensor-network length N has to be at least 2k-1, where k is the correlation length (number of sites on which acts the biggest local superoperator).
Local superoperators acting on more then 4 neighbouring sites are not currently supported.
Parameters:
N: integer
Number of sites in the chain of tensors (usually number of particles).
For OBC tensor-network length N has to be at least 2k-1 where k is the correlation length (number of sites on which acts the biggest local superoperator).
d: integer
Dimension of local Hilbert space (dimension of physical index).
BC: 'O' or 'P'
Boundary conditions, 'O' for OBC, 'P' for PBC.
so_list: list of ndarrays of a shape (d**(2*k),d**(2*k)) where k describes on how many sites a particular local superoperator acts
List of local superoperators in order of their action on the system.
Local superoperators acting on more then 4 neighbour sites are not currently supported.
tol: float, optional
Factor which after multiplication by the highest singular value gives a cutoff on singular values that are treated as nonzero.
Returns:
ch: list of length N of ndarrays of shape (Dl_ch,Dr_ch,d**2,d**2) for OBC (Dl_ch, Dr_ch can vary between sites) or ndarray of shape (D_ch,D_ch,d**2,d**2,N) for PBC
Quantum channel as a superoperator in the MPO representation.
"""
if so_list == []:
if BC == 'O':
ch = np.eye(d**2,dtype=complex)
ch = ch[np.newaxis,np.newaxis,:,:]
ch = [ch]*N
elif BC == 'P':
ch = np.eye(d**2,dtype=complex)
ch = ch[np.newaxis,np.newaxis,:,:,np.newaxis]
ch = np.tile(ch,(1,1,1,1,N))
return ch
if BC == 'O':
ch = [0]*N
kmax = max([int(math.log(np.shape(so_list[i])[0],d**2)) for i in range(len(so_list))])
if N < 2*kmax-1:
warnings.warn('For OBC tensor-network length N have to be at least 2k-1 where k is correlation length (number of sites on which acts the biggest local superoperator).')
for x in range(N):
if x >= kmax and N-x >= kmax:
ch[x] = ch[x-1]
continue
for i in range(len(so_list)):
so = so_list[i]
k = int(math.log(np.shape(so)[0],d**2))
if np.linalg.norm(so-np.diag(np.diag(so))) < 10**-10:
so = np.diag(so)
if k == 1:
bdchil = 1
bdchir = 1
chi = np.zeros((bdchil,bdchir,d**2,d**2),dtype=complex)
for nx in range(d**2):
chi[:,:,nx,nx] = so[nx]
elif k == 2:
so = np.reshape(so,(d**2,d**2),order='F')
u,s,vh = np.linalg.svd(so)
s = s[s > s[0]*tol]
bdchi = np.shape(s)[0]
u = u[:,:bdchi]
vh = vh[:bdchi,:]
us = u @ np.diag(np.sqrt(s))
sv = np.diag(np.sqrt(s)) @ vh
if x == 0:
bdchil = 1
bdchir = bdchi
chi = np.zeros((bdchil,bdchir,d**2,d**2),dtype=complex)
for nx in range(d**2):
tensors = [us[nx,:]]
legs = [[-1]]
chi[:,:,nx,nx] = np.reshape(ncon(tensors,legs),(bdchil,bdchir),order='F')
elif x > 0 and x < N-1:
bdchil = bdchi
bdchir = bdchi
chi = np.zeros((bdchil,bdchir,d**2,d**2),dtype=complex)
for nx in range(d**2):
tensors = [sv[:,nx],us[nx,:]]
legs = [[-1],[-2]]
chi[:,:,nx,nx] = np.reshape(ncon(tensors,legs),(bdchil,bdchir),order='F')
elif x == N-1:
bdchil = bdchi
bdchir = 1
chi = np.zeros((bdchil,bdchir,d**2,d**2),dtype=complex)
for nx in range(d**2):
tensors = [sv[:,nx]]
legs = [[-1]]
chi[:,:,nx,nx] = np.reshape(ncon(tensors,legs),(bdchil,bdchir),order='F')
elif k == 3:
so = np.reshape(so,(d**2,d**4),order='F')
u1,s1,vh1 = np.linalg.svd(so,full_matrices=False)
s1 = s1[s1 > s1[0]*tol]
bdchi1 = np.shape(s1)[0]
u1 = u1[:,:bdchi1]
vh1 = vh1[:bdchi1,:]
us1 = u1 @ np.diag(np.sqrt(s1))
sv1 = np.diag(np.sqrt(s1)) @ vh1
sv1 = np.reshape(sv1,(bdchi1*d**2,d**2),order='F')
u2,s2,vh2 = np.linalg.svd(sv1,full_matrices=False)
s2 = s2[s2 > s2[0]*tol]
bdchi2 = np.shape(s2)[0]
u2 = u2[:,:bdchi2]
vh2 = vh2[:bdchi2,:]
us2 = u2 @ np.diag(np.sqrt(s2))
us2 = np.reshape(us2,(bdchi1,d**2,bdchi2),order='F')
sv2 = np.diag(np.sqrt(s2)) @ vh2
if x == 0:
bdchil = 1
bdchir = bdchi1
chi = np.zeros((bdchil,bdchir,d**2,d**2),dtype=complex)
for nx in range(d**2):
tensors = [us1[nx,:]]
legs = [[-1]]
chi[:,:,nx,nx] = np.reshape(ncon(tensors,legs),(bdchil,bdchir),order='F')
elif x == 1:
bdchil = bdchi1
bdchir = bdchi2*bdchi1
chi = np.zeros((bdchil,bdchir,d**2,d**2),dtype=complex)
for nx in range(d**2):
tensors = [us2[:,nx,:],us1[nx,:]]
legs = [[-1,-2],[-3]]
chi[:,:,nx,nx] = np.reshape(ncon(tensors,legs),(bdchil,bdchir),order='F')
elif x > 1 and x < N-2:
bdchil = bdchi2*bdchi1
bdchir = bdchi2*bdchi1
chi = np.zeros((bdchil,bdchir,d**2,d**2),dtype=complex)
for nx in range(d**2):
tensors = [sv2[:,nx],us2[:,nx,:],us1[nx,:]]
legs = [[-1],[-2,-3],[-4]]
chi[:,:,nx,nx] = np.reshape(ncon(tensors,legs),(bdchil,bdchir),order='F')
elif x == N-2:
bdchil = bdchi2*bdchi1
bdchir = bdchi2
chi = np.zeros((bdchil,bdchir,d**2,d**2),dtype=complex)
for nx in range(d**2):
tensors = [sv2[:,nx],us2[:,nx,:]]
legs = [[-1],[-2,-3]]
chi[:,:,nx,nx] = np.reshape(ncon(tensors,legs),(bdchil,bdchir),order='F')
elif x == N-1:
bdchil = bdchi2
bdchir = 1
chi = np.zeros((bdchil,bdchir,d**2,d**2),dtype=complex)
for nx in range(d**2):
tensors = [sv2[:,nx]]
legs = [[-1]]
chi[:,:,nx,nx] = np.reshape(ncon(tensors,legs),(bdchil,bdchir),order='F')
elif k == 4:
so = np.reshape(so,(d**2,d**6),order='F')
u1,s1,vh1 = np.linalg.svd(so,full_matrices=False)
s1 = s1[s1 > s1[0]*tol]
bdchi1 = np.shape(s1)[0]
u1 = u1[:,:bdchi1]
vh1 = vh1[:bdchi1,:]
us1 = u1 @ np.diag(np.sqrt(s1))
sv1 = np.diag(np.sqrt(s1)) @ vh1
sv1 = np.reshape(sv1,(bdchi1*d**2,d**4),order='F')
u2,s2,vh2 = np.linalg.svd(sv1,full_matrices=False)
s2 = s2[s2 > s2[0]*tol]
bdchi2 = np.shape(s2)[0]
u2 = u2[:,:bdchi2]
vh2 = vh2[:bdchi2,:]
us2 = u2 @ np.diag(np.sqrt(s2))
us2 = np.reshape(us2,(bdchi1,d**2,bdchi2),order='F')
sv2 = np.diag(np.sqrt(s2)) @ vh2
sv2 = np.reshape(sv2,(bdchi2*d**2,d**2),order='F')
u3,s3,vh3 = np.linalg.svd(sv2,full_matrices=False)
s3 = s3[s3 > s3[0]*tol]
bdchi3 = np.shape(s3)[0]
u3 = u3[:,:bdchi3]
vh3 = vh3[:bdchi3,:]
us3 = u3 @ np.diag(np.sqrt(s3))
us3 = np.reshape(us3,(bdchi2,d**2,bdchi3),order='F')
sv3 = np.diag(np.sqrt(s3)) @ vh3
if x == 0:
bdchil = 1
bdchir = bdchi1
chi = np.zeros((bdchil,bdchir,d**2,d**2),dtype=complex)
for nx in range(d**2):
tensors = [us1[nx,:]]
legs = [[-1]]
chi[:,:,nx,nx] = np.reshape(ncon(tensors,legs),(bdchil,bdchir),order='F')
elif x == 1:
bdchil = bdchi1
bdchir = bdchi2*bdchi1
chi = np.zeros((bdchil,bdchir,d**2,d**2),dtype=complex)
for nx in range(d**2):
tensors = [us2[:,nx,:],us1[nx,:]]
legs = [[-1,-2],[-3]]
chi[:,:,nx,nx] = np.reshape(ncon(tensors,legs),(bdchil,bdchir),order='F')
elif x == 2:
bdchil = bdchi2*bdchi1
bdchir = bdchi3*bdchi2*bdchi1
chi = np.zeros((bdchil,bdchir,d**2,d**2),dtype=complex)
for nx in range(d**2):
tensors = [us3[:,nx,:],us2[:,nx,:],us1[nx,:]]
legs = [[-1,-3],[-2,-4],[-5]]
chi[:,:,nx,nx] = np.reshape(ncon(tensors,legs),(bdchil,bdchir),order='F')
elif x > 2 and x < N-3:
bdchil = bdchi3*bdchi2*bdchi1
bdchir = bdchi3*bdchi2*bdchi1
chi = np.zeros((bdchil,bdchir,d**2,d**2),dtype=complex)
for nx in range(d**2):
tensors = [sv3[:,nx],us3[:,nx,:],us2[:,nx,:],us1[nx,:]]
legs = [[-1],[-2,-4],[-3,-5],[-6]]
chi[:,:,nx,nx] = np.reshape(ncon(tensors,legs),(bdchil,bdchir),order='F')
elif x == N-3:
bdchil = bdchi3*bdchi2*bdchi1
bdchir = bdchi3*bdchi2
chi = np.zeros((bdchil,bdchir,d**2,d**2),dtype=complex)
for nx in range(d**2):
tensors = [sv3[:,nx],us3[:,nx,:],us2[:,nx,:]]
legs = [[-1],[-2,-4],[-3,-5]]
chi[:,:,nx,nx] = np.reshape(ncon(tensors,legs),(bdchil,bdchir),order='F')
elif x == N-2:
bdchil = bdchi3*bdchi2
bdchir = bdchi3
chi = np.zeros((bdchil,bdchir,d**2,d**2),dtype=complex)
for nx in range(d**2):
tensors = [sv3[:,nx],us3[:,nx,:]]
legs = [[-1],[-2,-3]]
chi[:,:,nx,nx] = np.reshape(ncon(tensors,legs),(bdchil,bdchir),order='F')
elif x == N-1:
bdchil = bdchi3
bdchir = 1
chi = np.zeros((bdchil,bdchir,d**2,d**2),dtype=complex)
for nx in range(d**2):
tensors = [sv3[:,nx]]
legs = [[-1]]
chi[:,:,nx,nx] = np.reshape(ncon(tensors,legs),(bdchil,bdchir),order='F')
else:
warnings.warn('Local superoperators acting on more then 4 neighbour sites are not currently supported.')
else:
if k == 1:
bdchil = 1
bdchir = 1
chi = so[np.newaxis,np.newaxis,:,:]
elif k == 2:
u,s,vh = np.linalg.svd(so)
s = s[s > s[0]*tol]
bdchi = np.shape(s)[0]
u = u[:,:bdchi]
vh = vh[:bdchi,:]
us = u @ np.diag(np.sqrt(s))
sv = np.diag(np.sqrt(s)) @ vh
us = np.reshape(us,(d**2,d**2,bdchi),order='F')
sv = np.reshape(sv,(bdchi,d**2,d**2),order='F')
tensors = [sv,us]
legs = [[-1,-3,1],[1,-4,-2]]
chi = ncon(tensors,legs)
if x == 0:
tensors = [us]
legs = [[-2,-3,-1]]
chi = ncon(tensors,legs)
bdchil = 1
bdchir = bdchi
elif x > 0 and x < N-1:
tensors = [sv,us]
legs = [[-1,-3,1],[1,-4,-2]]
chi = ncon(tensors,legs)
bdchil = bdchi
bdchir = bdchi
elif x == N-1:
tensors = [sv]
legs = [[-1,-2,-3]]
chi = ncon(tensors,legs)
bdchil = bdchi
bdchir = 1
chi = np.reshape(chi,(bdchil,bdchir,d**2,d**2),order='F')
elif k == 3:
so = np.reshape(so,(d**4,d**8),order='F')
u1,s1,vh1 = np.linalg.svd(so,full_matrices=False)
s1 = s1[s1 > s1[0]*tol]
bdchi1 = np.shape(s1)[0]
u1 = u1[:,:bdchi1]
vh1 = vh1[:bdchi1,:]
us1 = u1 @ np.diag(np.sqrt(s1))
sv1 = np.diag(np.sqrt(s1)) @ vh1
us1 = np.reshape(us1,(d**2,d**2,bdchi1),order='F')
sv1 = np.reshape(sv1,(bdchi1*d**4,d**4),order='F')
u2,s2,vh2 = np.linalg.svd(sv1,full_matrices=False)
s2 = s2[s2 > s2[0]*tol]
bdchi2 = np.shape(s2)[0]
u2 = u2[:,:bdchi2]
vh2 = vh2[:bdchi2,:]
us2 = u2 @ np.diag(np.sqrt(s2))
us2 = np.reshape(us2,(bdchi1,d**2,d**2,bdchi2),order='F')
sv2 = np.diag(np.sqrt(s2)) @ vh2
sv2 = np.reshape(sv2,(bdchi2,d**2,d**2),order='F')
if x == 0:
tensors = [us1]
legs = [[-2,-3,-1]]
chi = ncon(tensors,legs)
bdchil = 1
bdchir = bdchi1
elif x == 1:
tensors = [us2,us1]
legs = [[-1,-5,1,-2],[1,-6,-3]]
chi = ncon(tensors,legs)
bdchil = bdchi1
bdchir = bdchi2*bdchi1
elif x > 1 and x < N-2:
tensors = [sv2,us2,us1]
legs = [[-1,-5,1],[-2,1,2,-3],[2,-6,-4]]
chi = ncon(tensors,legs)
bdchil = bdchi2*bdchi1
bdchir = bdchi2*bdchi1
elif x == N-2:
tensors = [sv2,us2]
legs = [[-1,-4,1],[-2,1,-5,-3]]
chi = ncon(tensors,legs)
bdchil = bdchi2*bdchi1
bdchir = bdchi2
elif x == N-1:
tensors = [sv2]
legs = [[-1,-2,-3]]
chi = ncon(tensors,legs)
bdchil = bdchi2
bdchir = 1
chi = np.reshape(chi,(bdchil,bdchir,d**2,d**2),order='F')
elif k == 4:
so = np.reshape(so,(d**4,d**12),order='F')
u1,s1,vh1 = np.linalg.svd(so,full_matrices=False)
s1 = s1[s1 > s1[0]*tol]
bdchi1 = np.shape(s1)[0]
u1 = u1[:,:bdchi1]
vh1 = vh1[:bdchi1,:]
us1 = u1 @ np.diag(np.sqrt(s1))
sv1 = np.diag(np.sqrt(s1)) @ vh1
us1 = np.reshape(us1,(d**2,d**2,bdchi1),order='F')
sv1 = np.reshape(sv1,(bdchi1*d**4,d**8),order='F')
u2,s2,vh2 = np.linalg.svd(sv1,full_matrices=False)
s2 = s2[s2 > s2[0]*tol]
bdchi2 = np.shape(s2)[0]
u2 = u2[:,:bdchi2]
vh2 = vh2[:bdchi2,:]
us2 = u2 @ np.diag(np.sqrt(s2))
us2 = np.reshape(us2,(bdchi1,d**2,d**2,bdchi2),order='F')
sv2 = np.diag(np.sqrt(s2)) @ vh2
sv2 = np.reshape(sv2,(bdchi2*d**4,d**4),order='F')
u3,s3,vh3 = np.linalg.svd(sv2,full_matrices=False)
s3 = s3[s3 > s3[0]*tol]
bdchi3 = np.shape(s3)[0]
u3 = u3[:,:bdchi3]
vh3 = vh3[:bdchi3,:]
us3 = u3 @ np.diag(np.sqrt(s3))
us3 = np.reshape(us3,(bdchi2,d**2,d**2,bdchi3),order='F')
sv3 = np.diag(np.sqrt(s3)) @ vh3
sv3 = np.reshape(sv3,(bdchi3,d**2,d**2),order='F')
if x == 0:
tensors = [us1]
legs = [[-2,-3,-1]]
chi = ncon(tensors,legs)
bdchil = 1
bdchir = bdchi1
elif x == 1:
tensors = [us2,us1]
legs = [[-1,-4,1,-2],[1,-5,-3]]
chi = ncon(tensors,legs)
bdchil = bdchi1
bdchir = bdchi2*bdchi1
elif x == 2:
tensors = [us3,us2,us1]
legs = [[-1,-6,1,-3],[-2,1,2,-4],[2,-7,-5]]
chi = ncon(tensors,legs)
bdchil = bdchi2*bdchi1
bdchir = bdchi3*bdchi2*bdchi1
elif x > 2 and x < N-3:
tensors = [sv3,us3,us2,us1]
legs = [[-1,-7,1],[-2,1,2,-4],[-3,2,3,-5],[3,-8,-6]]
chi = ncon(tensors,legs)
bdchil = bdchi3*bdchi2*bdchi1
bdchir = bdchi3*bdchi2*bdchi1
elif x == N-3:
tensors = [sv3,us3,us2]
legs = [[-1,-6,1],[-2,1,2,-4],[-3,2,-7,-5]]
chi = ncon(tensors,legs)
bdchil = bdchi3*bdchi2*bdchi1
bdchir = bdchi3*bdchi2
elif x == N-2:
tensors = [sv3,us3]
legs = [[-1,-4,1],[-2,1,-5,-3]]
chi = ncon(tensors,legs)
bdchil = bdchi3*bdchi2
bdchir = bdchi3
elif x == N-1:
tensors = [sv3]
legs = [[-1,-2,-3]]
chi = ncon(tensors,legs)
bdchil = bdchi3
bdchir = 1
chi = np.reshape(chi,(bdchi,bdchi,d**2,d**2),order='F')
else:
warnings.warn('Local superoperators acting on more then 4 neighbour sites are not currently supported.')
if i == 0:
bdchl = bdchil
bdchr = bdchir
ch[x] = chi
else:
bdchl = bdchil*bdchl
bdchr = bdchir*bdchr
tensors = [chi,ch[x]]
legs = [[-1,-3,-5,1],[-2,-4,1,-6]]
ch[x] = ncon(tensors,legs)
ch[x] = np.reshape(ch[x],(bdchl,bdchr,d**2,d**2),order='F')
elif BC == 'P':
for i in range(len(so_list)):
so = so_list[i]
k = int(math.log(np.shape(so)[0],d**2))
if np.linalg.norm(so-np.diag(np.diag(so))) < 10**-10:
so = np.diag(so)
if k == 1:
bdchi = 1
chi = np.zeros((bdchi,bdchi,d**2,d**2),dtype=complex)
for nx in range(d**2):
chi[:,:,nx,nx] = so[nx]
elif k == 2:
so = np.reshape(so,(d**2,d**2),order='F')
u,s,vh = np.linalg.svd(so)
s = s[s > s[0]*tol]
bdchi = np.shape(s)[0]
u = u[:,:bdchi]
vh = vh[:bdchi,:]
us = u @ np.diag(np.sqrt(s))
sv = np.diag(np.sqrt(s)) @ vh
chi = np.zeros((bdchi,bdchi,d**2,d**2),dtype=complex)
for nx in range(d**2):
chi[:,:,nx,nx] = np.outer(sv[:,nx],us[nx,:])
elif k == 3:
so = np.reshape(so,(d**2,d**4),order='F')
u1,s1,vh1 = np.linalg.svd(so,full_matrices=False)
s1 = s1[s1 > s1[0]*tol]
bdchi1 = np.shape(s1)[0]
u1 = u1[:,:bdchi1]
vh1 = vh1[:bdchi1,:]
us1 = u1 @ np.diag(np.sqrt(s1))
sv1 = np.diag(np.sqrt(s1)) @ vh1
sv1 = np.reshape(sv1,(bdchi1*d**2,d**2),order='F')
u2,s2,vh2 = np.linalg.svd(sv1,full_matrices=False)
s2 = s2[s2 > s2[0]*tol]
bdchi2 = np.shape(s2)[0]
u2 = u2[:,:bdchi2]
vh2 = vh2[:bdchi2,:]
us2 = u2 @ np.diag(np.sqrt(s2))
us2 = np.reshape(us2,(bdchi1,d**2,bdchi2),order='F')
sv2 = np.diag(np.sqrt(s2)) @ vh2
bdchi = bdchi2*bdchi1
chi = np.zeros((bdchi,bdchi,d**2,d**2),dtype=complex)
for nx in range(d**2):
tensors = [sv2[:,nx],us2[:,nx,:],us1[nx,:]]
legs = [[-1],[-2,-3],[-4]]
chi[:,:,nx,nx] = np.reshape(ncon(tensors,legs),(bdchi,bdchi),order='F')
elif k == 4:
so = np.reshape(so,(d**2,d**6),order='F')
u1,s1,vh1 = np.linalg.svd(so,full_matrices=False)
s1 = s1[s1 > s1[0]*tol]
bdchi1 = np.shape(s1)[0]
u1 = u1[:,:bdchi1]
vh1 = vh1[:bdchi1,:]
us1 = u1 @ np.diag(np.sqrt(s1))
sv1 = np.diag(np.sqrt(s1)) @ vh1
sv1 = np.reshape(sv1,(bdchi1*d**2,d**4),order='F')
u2,s2,vh2 = np.linalg.svd(sv1,full_matrices=False)
s2 = s2[s2 > s2[0]*tol]
bdchi2 = np.shape(s2)[0]
u2 = u2[:,:bdchi2]
vh2 = vh2[:bdchi2,:]
us2 = u2 @ np.diag(np.sqrt(s2))
us2 = np.reshape(us2,(bdchi1,d**2,bdchi2),order='F')
sv2 = np.diag(np.sqrt(s2)) @ vh2
sv2 = np.reshape(sv2,(bdchi2*d**2,d**2),order='F')
u3,s3,vh3 = np.linalg.svd(sv2,full_matrices=False)
s3 = s3[s3 > s3[0]*tol]
bdchi3 = np.shape(s3)[0]
u3 = u3[:,:bdchi3]
vh3 = vh3[:bdchi3,:]
us3 = u3 @ np.diag(np.sqrt(s3))
us3 = np.reshape(us3,(bdchi2,d**2,bdchi3),order='F')
sv3 = np.diag(np.sqrt(s3)) @ vh3
bdchi = bdchi3*bdchi2*bdchi1
chi = np.zeros((bdchi,bdchi,d**2,d**2),dtype=complex)
for nx in range(d**2):
tensors = [sv3[:,nx],us3[:,nx,:],us2[:,nx,:],us1[nx,:]]
legs = [[-1],[-2,-4],[-3,-5],[-6]]
chi[:,:,nx,nx] = np.reshape(ncon(tensors,legs),(bdchi,bdchi),order='F')
else:
warnings.warn('Local superoperators acting on more then 4 neighbour sites are not currently supported.')
else:
if k == 1:
bdchi = 1
chi = so[np.newaxis,np.newaxis,:,:]
elif k == 2:
u,s,vh = np.linalg.svd(so)
s = s[s > s[0]*tol]
bdchi = np.shape(s)[0]
u = u[:,:bdchi]
vh = vh[:bdchi,:]
us = u @ np.diag(np.sqrt(s))
sv = np.diag(np.sqrt(s)) @ vh
us = np.reshape(us,(d**2,d**2,bdchi),order='F')
sv = np.reshape(sv,(bdchi,d**2,d**2),order='F')
tensors = [sv,us]
legs = [[-1,-3,1],[1,-4,-2]]
chi = ncon(tensors,legs)
elif k == 3:
so = np.reshape(so,(d**4,d**8),order='F')
u1,s1,vh1 = np.linalg.svd(so,full_matrices=False)
s1 = s1[s1 > s1[0]*tol]
bdchi1 = np.shape(s1)[0]
u1 = u1[:,:bdchi1]
vh1 = vh1[:bdchi1,:]
us1 = u1 @ np.diag(np.sqrt(s1))
sv1 = np.diag(np.sqrt(s1)) @ vh1
us1 = np.reshape(us1,(d**2,d**2,bdchi1),order='F')
sv1 = np.reshape(sv1,(bdchi1*d**4,d**4),order='F')
u2,s2,vh2 = np.linalg.svd(sv1,full_matrices=False)
s2 = s2[s2 > s2[0]*tol]
bdchi2 = np.shape(s2)[0]
u2 = u2[:,:bdchi2]
vh2 = vh2[:bdchi2,:]
us2 = u2 @ np.diag(np.sqrt(s2))
us2 = np.reshape(us2,(bdchi1,d**2,d**2,bdchi2),order='F')
sv2 = np.diag(np.sqrt(s2)) @ vh2
sv2 = np.reshape(sv2,(bdchi2,d**2,d**2),order='F')
tensors = [sv2,us2,us1]
legs = [[-1,-5,1],[-2,1,2,-3],[2,-6,-4]]
chi = ncon(tensors,legs)
bdchi = bdchi2*bdchi1
chi = np.reshape(chi,(bdchi,bdchi,d**2,d**2),order='F')
elif k == 4:
so = np.reshape(so,(d**4,d**12),order='F')
u1,s1,vh1 = np.linalg.svd(so,full_matrices=False)
s1 = s1[s1 > s1[0]*tol]
bdchi1 = np.shape(s1)[0]
u1 = u1[:,:bdchi1]
vh1 = vh1[:bdchi1,:]
us1 = u1 @ np.diag(np.sqrt(s1))
sv1 = np.diag(np.sqrt(s1)) @ vh1
us1 = np.reshape(us1,(d**2,d**2,bdchi1),order='F')
sv1 = np.reshape(sv1,(bdchi1*d**4,d**8),order='F')
u2,s2,vh2 = np.linalg.svd(sv1,full_matrices=False)
s2 = s2[s2 > s2[0]*tol]
bdchi2 = np.shape(s2)[0]
u2 = u2[:,:bdchi2]
vh2 = vh2[:bdchi2,:]
us2 = u2 @ np.diag(np.sqrt(s2))
us2 = np.reshape(us2,(bdchi1,d**2,d**2,bdchi2),order='F')
sv2 = np.diag(np.sqrt(s2)) @ vh2
sv2 = np.reshape(sv2,(bdchi2*d**4,d**4),order='F')
u3,s3,vh3 = np.linalg.svd(sv2,full_matrices=False)
s3 = s3[s3 > s3[0]*tol]
bdchi3 = np.shape(s3)[0]
u3 = u3[:,:bdchi3]
vh3 = vh3[:bdchi3,:]
us3 = u3 @ np.diag(np.sqrt(s3))
us3 = np.reshape(us3,(bdchi2,d**2,d**2,bdchi3),order='F')
sv3 = np.diag(np.sqrt(s3)) @ vh3
sv3 = np.reshape(sv3,(bdchi3,d**2,d**2),order='F')
tensors = [sv3,us3,us2,us1]
legs = [[-1,-7,1],[-2,1,2,-4],[-3,2,3,-5],[3,-8,-6]]
chi = ncon(tensors,legs)
bdchi = bdchi3*bdchi2*bdchi1
chi = np.reshape(chi,(bdchi,bdchi,d**2,d**2),order='F')
else:
warnings.warn('Local superoperators acting on more then 4 neighbour sites are not currently supported.')
if i == 0:
bdch = bdchi
ch = chi
else:
bdch = bdchi*bdch
tensors = [chi,ch]
legs = [[-1,-3,-5,1],[-2,-4,1,-6]]
ch = ncon(tensors,legs)
ch = np.reshape(ch,(bdch,bdch,d**2,d**2),order='F')
ch = ch[:,:,:,:,np.newaxis]
ch = np.tile(ch,(1,1,1,1,N))
return ch
def fin_create_channel_derivative(N, d, BC, so_before_list, h, so_after_list):
"""
Creates a MPO for the derivative (over estimated parameter) of the superoperator describing the quantum channel. Function for finite size systems.
Function for translationally invariant channels with unitary parameter encoding generated by h.
Generator h has to be diagonal in the computational basis, or in other words it is assumed that local superoperators are expressed in the eigenbasis of h.
Parameters:
N: integer
Number of sites in the chain of tensors (usually number of particles).
d: integer
Dimension of local Hilbert space (dimension of physical index).
BC: 'O' or 'P'
Boundary conditions, 'O' for OBC, 'P' for PBC.
so_before_list: list of ndarrays of a shape (d**(2*k),d**(2*k)) where k describes on how many sites particular local superoperator acts
List of local superoperators (in order) which act before unitary parameter encoding.
h: ndarray of a shape (d,d)
Generator of unitary parameter encoding.
Generator h have to be diagonal in computational basis, or in other words it is assumed that local superoperators are expressed in the eigenbasis of h.
so_after_list: list of ndarrays of a shape (d**(2*k),d**(2*k)) where k describes on how many sites particular local superoperator acts
List of local superoperators (in order) which act after unitary parameter encoding.
Returns:
chd: list of length N of ndarrays of a shape (Dl_chd,Dr_chd,d**2,d**2) for OBC (Dl_chd, Dr_chd can vary between sites) or ndarray of a shape (D_chd,D_chd,d**2,d**2,N) for PBC
Derivative of superoperator describing quantum channel in MPO representation.
"""
if np.linalg.norm(h-np.diag(np.diag(h))) > 10**-10:
warnings.warn('Generator h have to be diagonal in computational basis, or in other words it is assumed that local superoperators are expressed in the eigenbasis of h.')
if len(so_before_list) == 0:
if BC == 'O':
ch1 = np.eye(d**2,dtype=complex)
ch1 = ch1[np.newaxis,np.newaxis,:,:]
ch1 = [ch1]*N
elif BC == 'P':
ch1 = np.eye(d**2,dtype=complex)
ch1 = ch1[np.newaxis,np.newaxis,:,:,np.newaxis]
ch1 = np.tile(ch1,(1,1,1,1,N))
ch1d = fin_commutator(N,d,BC,ch1,h,1j)
ch2 = fin_create_channel(N,d,BC,so_after_list)
if BC == 'O':
chd = [0]*N
for x in range(N):
bdch1dl = np.shape(ch1d[x])[0]
bdch1dr = np.shape(ch1d[x])[1]
bdch2l = np.shape(ch2[x])[0]
bdch2r = np.shape(ch2[x])[1]
tensors = [ch2[x],ch1d[x]]
legs = [[-1,-3,-5,1],[-2,-4,1,-6]]
chd[x] = np.reshape(ncon(tensors,legs),(bdch1dl*bdch2l,bdch1dr*bdch2r,d**2,d**2),order='F')
elif BC == 'P':
bdch1d = np.shape(ch1d)[0]
bdch2 = np.shape(ch2)[0]
chd = np.zeros((bdch1d*bdch2,bdch1d*bdch2,d**2,d**2,N),dtype=complex)
for x in range(N):
tensors = [ch2[:,:,:,:,x],ch1d[:,:,:,:,x]]
legs = [[-1,-3,-5,1],[-2,-4,1,-6]]
chd[:,:,:,:,x] = np.reshape(ncon(tensors,legs),(bdch1d*bdch2,bdch1d*bdch2,d**2,d**2),order='F')
elif len(so_after_list) == 0:
ch1 = fin_create_channel(N,d,BC,so_before_list)
chd = fin_commutator(N,d,BC,ch1,h,1j)
else:
ch1 = fin_create_channel(N,d,BC,so_before_list)
ch1d = fin_commutator(N,d,BC,ch1,h,1j)
ch2 = fin_create_channel(N,d,BC,so_after_list)
if BC == 'O':
chd = [0]*N
for x in range(N):
bdch1dl = np.shape(ch1d[x])[0]
bdch1dr = np.shape(ch1d[x])[1]
bdch2l = np.shape(ch2[x])[0]
bdch2r = np.shape(ch2[x])[1]
tensors = [ch2[x],ch1d[x]]
legs = [[-1,-3,-5,1],[-2,-4,1,-6]]
chd[x] = np.reshape(ncon(tensors,legs),(bdch1dl*bdch2l,bdch1dr*bdch2r,d**2,d**2),order='F')
elif BC == 'P':
bdch1d = np.shape(ch1d)[0]
bdch2 = np.shape(ch2)[0]
chd = np.zeros((bdch1d*bdch2,bdch1d*bdch2,d**2,d**2,N),dtype=complex)
for x in range(N):
tensors = [ch2[:,:,:,:,x],ch1d[:,:,:,:,x]]
legs = [[-1,-3,-5,1],[-2,-4,1,-6]]
chd[:,:,:,:,x] = np.reshape(ncon(tensors,legs),(bdch1d*bdch2,bdch1d*bdch2,d**2,d**2),order='F')
return chd
def fin_commutator(N, d, BC, a, h, c):
"""
Calculate MPO for commutator b = [a, c*sum{h}] of MPO a with sum of local generators h and with arbitrary multiplicative scalar factor c.
Generator h have to be diagonal in computational basis, or in other words it is assumed that a is expressed in the eigenbasis of h.
Parameters:
N: integer
Number of sites in the chain of tensors (usually number of particles).
d: integer
Dimension of local Hilbert space (dimension of physical index).
BC: 'O' or 'P'
Boundary conditions, 'O' for OBC, 'P' for PBC.
a: list of length N of ndarrays of a shape (Dl_a,Dr_a,d,d) for OBC (Dl_a, Dr_a can vary between sites) or ndarray of a shape (D_a,D_a,d,d,N) for PBC
MPO.
h: ndarray of a shape (d,d)
Generator of unitary parameter encoding.
Generator h have to be diagonal in computational basis, or in other words it is assumed that a is expressed in the eigenbasis of h.
c: complex
Scalar factor which multiplies sum of local generators.
Returns:
b: list of length N of ndarrays of a shape (Dl_b,Dr_b,d,d) for OBC (Dl_b, Dr_b can vary between sites) or ndarray of a shape (D_b,D_b,d,d,N) for PBC
Commutator [a, c*sum{h}] in MPO representation.
"""
if np.linalg.norm(h-np.diag(np.diag(h))) > 10**-10:
warnings.warn('Generator h have to be diagonal in computational basis, or in other words it is assumed that a is expressed in the eigenbasis of h.')
if BC == 'O':
bh = [0]*N
b = [0]*N
for x in range(N):
da = np.shape(a[x])[2]
bda1 = np.shape(a[x])[0]
bda2 = np.shape(a[x])[1]
if x == 0:
bdbh1 = 1
bdbh2 = 2
bh[x] = np.zeros((bdbh1,bdbh2,d,d),dtype=complex)
for nx in range(d):
for nxp in range(d):
bh[x][:,:,nx,nxp] = np.array([[c*(h[nxp,nxp]-h[nx,nx]),1]])
elif x > 0 and x < N-1:
bdbh1 = 2
bdbh2 = 2
bh[x] = np.zeros((bdbh1,bdbh2,d,d),dtype=complex)
for nx in range(d):
for nxp in range(d):
bh[x][:,:,nx,nxp] = np.array([[1,0],[c*(h[nxp,nxp]-h[nx,nx]),1]])
elif x == N-1:
bdbh1 = 2
bdbh2 = 1
bh[x] = np.zeros((bdbh1,bdbh2,d,d),dtype=complex)
for nx in range(d):
for nxp in range(d):
bh[x][:,:,nx,nxp] = np.array([[1],[c*(h[nxp,nxp]-h[nx,nx])]])
if da == d:
# a is operator
b[x] = np.zeros((bdbh1*bda1,bdbh2*bda2,d,d),dtype=complex)
for nx in range(d):
for nxp in range(d):
b[x][:,:,nx,nxp] = np.kron(bh[x][:,:,nx,nxp],a[x][:,:,nx,nxp])
elif da == d**2:
# a is superoperator (vectorized channel)
bh[x] = np.reshape(bh[x],(bdbh1,bdbh2,d**2),order='F')
b[x] = np.zeros((bdbh1*bda1,bdbh2*bda2,d**2,d**2),dtype=complex)
for nx in range(d**2):
for nxp in range(d**2):
b[x][:,:,nx,nxp] = np.kron(bh[x][:,:,nx],a[x][:,:,nx,nxp])
elif BC == 'P':
da = np.shape(a)[2]
bda = np.shape(a)[0]
if N == 1:
bdbh = 1
else:
bdbh = 2
bh = np.zeros((bdbh,bdbh,d,d,N),dtype=complex)
for nx in range(d):
for nxp in range(d):
if N == 1:
bh[:,:,nx,nxp,0] = c*(h[nxp,nxp]-h[nx,nx])
else:
bh[:,:,nx,nxp,0] = np.array([[c*(h[nxp,nxp]-h[nx,nx]),1],[0,0]])
for x in range(1,N-1):
bh[:,:,nx,nxp,x] = np.array([[1,0],[c*(h[nxp,nxp]-h[nx,nx]),1]])
bh[:,:,nx,nxp,N-1] = np.array([[1,0],[c*(h[nxp,nxp]-h[nx,nx]),0]])
if da == d:
# a is operator
b = np.zeros((bdbh*bda,bdbh*bda,d,d,N),dtype=complex)
for nx in range(d):
for nxp in range(d):
for x in range(N):
b[:,:,nx,nxp,x] = np.kron(bh[:,:,nx,nxp,x],a[:,:,nx,nxp,x])
elif da == d**2:
# a is superoperator (vectorized channel)
bh = np.reshape(bh,(bdbh,bdbh,d**2,N),order='F')
b = np.zeros((bdbh*bda,bdbh*bda,d**2,d**2,N),dtype=complex)
for nx in range(d**2):
for nxp in range(d**2):
for x in range(N):
b[:,:,nx,nxp,x] = np.kron(bh[:,:,nx,x],a[:,:,nx,nxp,x])
return b
def fin_enlarge_bdl(cold,factor):
"""
Enlarge bond dimension of SLD MPO. Function for finite size systems.
Parameters:
cold: SLD MPO, expected list of length n of ndarrays of a shape (bd,bd,d,d) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d,d,n) for PBC
factor: factor which determine on average relation between old and newly added values of SLD MPO
Returns:
c: SLD MPO with bd += 1
"""
rng = np.random.default_rng()
if type(cold) is list:
n = len(cold)
if n == 1:
warnings.warn('Tensor networks with OBC and length one have to have bond dimension equal to one.')
else:
c = [0]*n
x = 0
d = np.shape(cold[x])[2]
bdl1 = 1
bdl2 = np.shape(cold[x])[1]+1
c[x] = np.zeros((bdl1,bdl2,d,d),dtype=complex)
for nx in range(d):
for nxp in range(d):
meanrecold = np.sum(np.abs(np.real(cold[x][:,:,nx,nxp])))/(bdl2-1)
meanimcold = np.sum(np.abs(np.imag(cold[x][:,:,nx,nxp])))/(bdl2-1)
c[x][:,:,nx,nxp] = (meanrecold*rng.random((bdl1,bdl2))+1j*meanimcold*rng.random((bdl1,bdl2)))*factor
c[x] = (c[x] + np.conj(np.moveaxis(c[x],2,3)))/2
c[x][0:bdl1-1,0:bdl2-1,:,:] = cold[x]
for x in range(1,n-1):
d = np.shape(cold[x])[2]
bdl1 = np.shape(cold[x])[0]+1
bdl2 = np.shape(cold[x])[1]+1
c[x] = np.zeros((bdl1,bdl2,d,d),dtype=complex)
for nx in range(d):
for nxp in range(d):
meanrecold = np.sum(np.abs(np.real(cold[x][:,:,nx,nxp])))/((bdl1-1)*(bdl2-1))
meanimcold = np.sum(np.abs(np.imag(cold[x][:,:,nx,nxp])))/((bdl1-1)*(bdl2-1))
c[x][:,:,nx,nxp] = (meanrecold*rng.random((bdl1,bdl2))+1j*meanimcold*rng.random((bdl1,bdl2)))*factor
c[x] = (c[x] + np.conj(np.moveaxis(c[x],2,3)))/2
c[x][0:bdl1-1,0:bdl2-1,:,:] = cold[x]
x = n-1
d = np.shape(cold[x])[2]
bdl1 = np.shape(cold[x])[0]+1
bdl2 = 1
c[x] = np.zeros((bdl1,bdl2,d,d),dtype=complex)
for nx in range(d):
for nxp in range(d):
meanrecold = np.sum(np.abs(np.real(cold[x][:,:,nx,nxp])))/(bdl1-1)
meanimcold = np.sum(np.abs(np.imag(cold[x][:,:,nx,nxp])))/(bdl1-1)
c[x][:,:,nx,nxp] = (meanrecold*rng.random((bdl1,bdl2))+1j*meanimcold*rng.random((bdl1,bdl2)))*factor
c[x] = (c[x] + np.conj(np.moveaxis(c[x],2,3)))/2
c[x][0:bdl1-1,0:bdl2-1,:,:] = cold[x]
elif type(cold) is np.ndarray:
n = np.shape(cold)[4]
d = np.shape(cold)[2]
bdl = np.shape(cold)[0]+1
c = np.zeros((bdl,bdl,d,d,n),dtype=complex)
for nx in range(d):
for nxp in range(d):
for x in range(n):
meanrecold = np.sum(np.abs(np.real(cold[:,:,nx,nxp,x])))/(bdl-1)**2
meanimcold = np.sum(np.abs(np.imag(cold[:,:,nx,nxp,x])))/(bdl-1)**2
c[:,:,nx,nxp,x] = (meanrecold*rng.random((bdl,bdl))+1j*meanimcold*rng.random((bdl,bdl)))*factor
c = (c + np.conj(np.moveaxis(c,2,3)))/2
c[0:bdl-1,0:bdl-1,:,:,:] = cold
return c
def fin_enlarge_bdpsi(a0old,factor):
"""
Enlarge bond dimension of wave function MPS. Function for finite size systems.
Parameters:
a0old: wave function MPS, expected list of length n of ndarrays of a shape (bd,bd,d) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d,n) for PBC
ratio: factor which determine on average relation between last and next to last values of diagonals of wave function MPS
Returns:
a0: wave function MPS with bd += 1
"""
rng = np.random.default_rng()
if type(a0old) is list:
n = len(a0old)
if n == 1:
warnings.warn('Tensor networks with OBC and length one have to have bond dimension equal to one.')
else:
a0 = [0]*n
x = 0
d = np.shape(a0old[x])[2]
bdpsi1 = 1
bdpsi2 = np.shape(a0old[x])[1]+1
a0[x] = np.zeros((bdpsi1,bdpsi2,d),dtype=complex)
for nx in range(d):
meanrea0old = np.sum(np.abs(np.real(a0old[x][:,:,nx])))/(bdpsi2-1)
meanima0old = np.sum(np.abs(np.imag(a0old[x][:,:,nx])))/(bdpsi2-1)
a0[x][:,:,nx] = (meanrea0old*rng.random((bdpsi1,bdpsi2))+1j*meanima0old*rng.random((bdpsi1,bdpsi2)))*factor
a0[x][0:bdpsi1-1,0:bdpsi2-1,:] = a0old[x]
for x in range(1,n-1):
d = np.shape(a0old[x])[2]
bdpsi1 = np.shape(a0old[x])[0]+1
bdpsi2 = np.shape(a0old[x])[1]+1
a0[x] = np.zeros((bdpsi1,bdpsi2,d),dtype=complex)
for nx in range(d):
meanrea0old = np.sum(np.abs(np.real(a0old[x][:,:,nx])))/((bdpsi1-1)*(bdpsi2-1))
meanima0old = np.sum(np.abs(np.imag(a0old[x][:,:,nx])))/((bdpsi1-1)*(bdpsi2-1))
a0[x][:,:,nx] = (meanrea0old*rng.random((bdpsi1,bdpsi2))+1j*meanima0old*rng.random((bdpsi1,bdpsi2)))*factor
a0[x][0:bdpsi1-1,0:bdpsi2-1,:] = a0old[x]
x = n-1
d = np.shape(a0old[x])[2]
bdpsi1 = np.shape(a0old[x])[0]+1
bdpsi2 = 1
a0[x] = np.zeros((bdpsi1,bdpsi2,d),dtype=complex)
for nx in range(d):
meanrea0old = np.sum(np.abs(np.real(a0old[x][:,:,nx])))/(bdpsi1-1)
meanima0old = np.sum(np.abs(np.imag(a0old[x][:,:,nx])))/(bdpsi1-1)
a0[x][:,:,nx] = (meanrea0old*rng.random((bdpsi1,bdpsi2))+1j*meanima0old*rng.random((bdpsi1,bdpsi2)))*factor
a0[x][0:bdpsi1-1,0:bdpsi2-1,:] = a0old[x]
tensors = [np.conj(a0[n-1]),a0[n-1]]
legs = [[-1,-3,1],[-2,-4,1]]
r1 = ncon(tensors,legs)
a0[n-1] = a0[n-1]/np.sqrt(np.linalg.norm(np.reshape(r1,-1,order='F')))
tensors = [np.conj(a0[n-1]),a0[n-1]]
legs = [[-1,-3,1],[-2,-4,1]]
r2 = ncon(tensors,legs)
for x in range(n-2,0,-1):
tensors = [np.conj(a0[x]),a0[x],r2]
legs = [[-1,2,1],[-2,3,1],[2,3,-3,-4]]
r1 = ncon(tensors,legs)
a0[x] = a0[x]/np.sqrt(np.linalg.norm(np.reshape(r1,-1,order='F')))
tensors = [np.conj(a0[x]),a0[x],r2]
legs = [[-1,2,1],[-2,3,1],[2,3,-3,-4]]
r2 = ncon(tensors,legs)
tensors = [np.conj(a0[0]),a0[0],r2]
legs = [[4,2,1],[5,3,1],[2,3,4,5]]
r1 = ncon(tensors,legs)
a0[0] = a0[0]/np.sqrt(np.abs(r1))
elif type(a0old) is np.ndarray:
n = np.shape(a0old)[3]
d = np.shape(a0old)[2]
bdpsi = np.shape(a0old)[0]+1
a0 = np.zeros((bdpsi,bdpsi,d,n),dtype=complex)
for nx in range(d):
for x in range(n):
meanrea0old = np.sum(np.abs(np.real(a0old[:,:,nx,x])))/(bdpsi-1)**2
meanima0old = np.sum(np.abs(np.imag(a0old[:,:,nx,x])))/(bdpsi-1)**2
a0[:,:,nx,x] = (meanrea0old*rng.random((bdpsi,bdpsi))+1j*meanima0old*rng.random((bdpsi,bdpsi)))*factor
a0[0:bdpsi-1,0:bdpsi-1,:,:] = a0old
if n == 1:
tensors = [np.conj(a0[:,:,:,0]),a0[:,:,:,0]]
legs = [[2,2,1],[3,3,1]]
r1 = ncon(tensors,legs)
a0[:,:,:,0] = a0[:,:,:,0]/np.sqrt(np.abs(r1))
else:
tensors = [np.conj(a0[:,:,:,n-1]),a0[:,:,:,n-1]]
legs = [[-1,-3,1],[-2,-4,1]]
r1 = ncon(tensors,legs)
a0[:,:,:,n-1] = a0[:,:,:,n-1]/np.sqrt(np.linalg.norm(np.reshape(r1,-1,order='F')))
tensors = [np.conj(a0[:,:,:,n-1]),a0[:,:,:,n-1]]
legs = [[-1,-3,1],[-2,-4,1]]
r2 = ncon(tensors,legs)
for x in range(n-2,0,-1):
tensors = [np.conj(a0[:,:,:,x]),a0[:,:,:,x],r2]
legs = [[-1,2,1],[-2,3,1],[2,3,-3,-4]]
r1 = ncon(tensors,legs)
a0[:,:,:,x] = a0[:,:,:,x]/np.sqrt(np.linalg.norm(np.reshape(r1,-1,order='F')))
tensors = [np.conj(a0[:,:,:,x]),a0[:,:,:,x],r2]
legs = [[-1,2,1],[-2,3,1],[2,3,-3,-4]]
r2 = ncon(tensors,legs)
tensors = [np.conj(a0[:,:,:,0]),a0[:,:,:,0],r2]
legs = [[4,2,1],[5,3,1],[2,3,4,5]]
r1 = ncon(tensors,legs)
a0[:,:,:,0] = a0[:,:,:,0]/np.sqrt(np.abs(r1))
return a0
#########################################
# 1.2.1 Problems with exact derivative. #
#########################################
def fin_FoM_FoMD_optbd(n,d,bc,ch,chp,cini=None,a0ini=None,imprecision=10**-2,bdlmax=100,alwaysbdlmax=False,lherm=True,bdpsimax=100,alwaysbdpsimax=False):
"""
Iterative optimization of FoM/FoMD over SLD MPO and initial wave function MPS and also check of convergence in bond dimensions. Function for finite size systems.
Parameters:
n: number of sites in TN
d: dimension of local Hilbert space (dimension of physical index)
bc: boundary conditions, 'O' for OBC, 'P' for PBC
ch: MPO for quantum channel, expected list of length n of ndarrays of a shape (bd,bd,d**2,d**2) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d**2,d**2,n) for PBC
chp: MPO for generalized derivative of quantum channel, expected list of length n of ndarrays of a shape (bd,bd,d**2,d**2) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d**2,d**2,n) for PBC
cini: initial MPO for SLD, expected list of length n of ndarrays of a shape (bd,bd,d,d) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d,d,n) for PBC
a0ini: initial MPS for initial wave function, expected list of length n of ndarrays of a shape (bd,bd,d) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d,n) for PBC
imprecision: expected imprecision of the end results, default value is 10**-2
bdlmax: maximal value of bd for SLD MPO, default value is 100
alwaysbdlmax: boolean value, True if maximal value of bd for SLD MPO have to be reached, otherwise False (default value)
lherm: boolean value, True (default value) when Hermitian gauge is imposed on SLD MPO, otherwise False
bdpsimax: maximal value of bd for initial wave function MPS, default value is 100
alwaysbdpsimax: boolean value, True if maximal value of bd for initial wave function MPS have to be reached, otherwise False (default value)
Returns:
result: optimal value of FoM/FoMD
resultm: matrix describing FoM/FoMD in function of bd of respectively SLD MPO [rows] and initial wave function MPS [columns]
c: optimal MPO for SLD
a0: optimal MPS for initial wave function
"""
while True:
if a0ini is None:
bdpsi = 1
a0 = np.zeros(d,dtype=complex)
for i in range(d):
a0[i] = np.sqrt(math.comb(d-1,i))*2**(-(d-1)/2) # prod
# a0[i] = np.sqrt(2/(d+1))*np.sin((1+i)*np.pi/(d+1)) # sine
if bc == 'O':
a0 = a0[np.newaxis,np.newaxis,:]
a0 = [a0]*n
elif bc == 'P':
a0 = a0[np.newaxis,np.newaxis,:,np.newaxis]
a0 = np.tile(a0,(1,1,1,n))
else:
a0 = a0ini
if bc == 'O':
bdpsi = max([np.shape(a0[i])[0] for i in range(n)])
a0 = [a0[i].astype(complex) for i in range(n)]
elif bc == 'P':
bdpsi = np.shape(a0)[0]
a0 = a0.astype(complex)
if cini is None:
bdl = 1
rng = np.random.default_rng()
if bc == 'O':
c = [0]*n
c[0] = (rng.random((1,bdl,d,d)) + 1j*rng.random((1,bdl,d,d)))/bdl
c[0] = (c[0] + np.conj(np.moveaxis(c[0],2,3)))/2
for x in range(1,n-1):
c[x] = (rng.random((bdl,bdl,d,d)) + 1j*rng.random((bdl,bdl,d,d)))/bdl
c[x] = (c[x] + np.conj(np.moveaxis(c[x],2,3)))/2
c[n-1] = (rng.random((bdl,1,d,d)) + 1j*rng.random((bdl,1,d,d)))/bdl
c[n-1] = (c[n-1] + np.conj(np.moveaxis(c[n-1],2,3)))/2
elif bc == 'P':
c = (rng.random((bdl,bdl,d,d,n)) + 1j*rng.random((bdl,bdl,d,d,n)))/bdl
c = (c + np.conj(np.moveaxis(c,2,3)))/2
else:
c = cini
if bc == 'O':
bdl = max([np.shape(c[i])[0] for i in range(n)])
c = [c[i].astype(complex) for i in range(n)]
elif bc == 'P':
bdl = np.shape(c)[0]
c = c.astype(complex)
resultm = np.zeros((bdlmax,bdpsimax),dtype=float)
resultm[bdl-1,bdpsi-1],c,a0 = fin_FoM_FoMD_optm(n,d,bc,c,a0,ch,chp,imprecision,lherm)
if bc == 'O' and n == 1:
resultm = resultm[0:bdl,0:bdpsi]
result = resultm[bdl-1,bdpsi-1]
return result,resultm,c,a0
factorv = np.array([0.5,0.25,0.1,1,0.01])
problem = False
while True:
while True:
if bdpsi == bdpsimax:
break
else:
a0old = a0
bdpsi += 1
i = 0
while True:
a0 = fin_enlarge_bdpsi(a0,factorv[i])
resultm[bdl-1,bdpsi-1],cnew,a0new = fin_FoM_FoMD_optm(n,d,bc,c,a0,ch,chp,imprecision,lherm)
if resultm[bdl-1,bdpsi-1] >= resultm[bdl-1,bdpsi-2]:
break
i += 1
if i == np.size(factorv):
problem = True
break
if problem:
break
if not(alwaysbdpsimax) and resultm[bdl-1,bdpsi-1] < (1+imprecision)*resultm[bdl-1,bdpsi-2]:
bdpsi += -1
a0 = a0old
a0copy = a0new
ccopy = cnew
break
else:
a0 = a0new
c = cnew
if problem:
break
if bdl == bdlmax:
if bdpsi == bdpsimax:
resultm = resultm[0:bdl,0:bdpsi]
result = resultm[bdl-1,bdpsi-1]
else:
a0 = a0copy
c = ccopy
resultm = resultm[0:bdl,0:bdpsi+1]
result = resultm[bdl-1,bdpsi]
break
else:
bdl += 1
i = 0
while True:
c = fin_enlarge_bdl(c,factorv[i])
resultm[bdl-1,bdpsi-1],cnew,a0new = fin_FoM_FoMD_optm(n,d,bc,c,a0,ch,chp,imprecision,lherm)
if resultm[bdl-1,bdpsi-1] >= resultm[bdl-2,bdpsi-1]:
a0 = a0new
c = cnew
break
i += 1
if i == np.size(factorv):
problem = True
break
if problem:
break
if not(alwaysbdlmax) and resultm[bdl-1,bdpsi-1] < (1+imprecision)*resultm[bdl-2,bdpsi-1]:
if bdpsi == bdpsimax:
resultm = resultm[0:bdl,0:bdpsi]
result = resultm[bdl-1,bdpsi-1]
else:
if resultm[bdl-1,bdpsi-1] < resultm[bdl-2,bdpsi]:
a0 = a0copy
c = ccopy
resultm = resultm[0:bdl,0:bdpsi+1]
bdl += -1
bdpsi += 1
result = resultm[bdl-1,bdpsi-1]
else:
resultm = resultm[0:bdl,0:bdpsi+1]
result = resultm[bdl-1,bdpsi-1]
break
if not(problem):
break
return result,resultm,c,a0
def fin_FoM_optbd(n,d,bc,a,b,cini=None,imprecision=10**-2,bdlmax=100,alwaysbdlmax=False,lherm=True):
"""
Optimization of FoM over SLD MPO and also check of convergence in bond dimension. Function for finite size systems.
Parameters:
n: number of sites in TN
d: dimension of local Hilbert space (dimension of physical index)
bc: boundary conditions, 'O' for OBC, 'P' for PBC
a: MPO for density matrix, expected list of length n of ndarrays of a shape (bd,bd,d,d) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d,d,n) for PBC
b: MPO for generalized derivative of density matrix, expected list of length n of ndarrays of a shape (bd,bd,d,d) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d,d,n) for PBC
cini: initial MPO for SLD, expected list of length n of ndarrays of a shape (bd,bd,d,d) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d,d,n) for PBC
imprecision: expected imprecision of the end results, default value is 10**-2
bdlmax: maximal value of bd for SLD MPO, default value is 100
alwaysbdlmax: boolean value, True if maximal value of bd for SLD MPO have to be reached, otherwise False (default value)
lherm: boolean value, True (default value) when Hermitian gauge is imposed on SLD MPO, otherwise False
Returns:
result: optimal value of FoM
resultv: vector describing FoM in function of bd of SLD MPO
c: optimal MPO for SLD
"""
while True:
if cini is None:
bdl = 1
rng = np.random.default_rng()
if bc == 'O':
c = [0]*n
c[0] = (rng.random((1,bdl,d,d)) + 1j*rng.random((1,bdl,d,d)))/bdl
c[0] = (c[0] + np.conj(np.moveaxis(c[0],2,3)))/2
for x in range(1,n-1):
c[x] = (rng.random((bdl,bdl,d,d)) + 1j*rng.random((bdl,bdl,d,d)))/bdl
c[x] = (c[x] + np.conj(np.moveaxis(c[x],2,3)))/2
c[n-1] = (rng.random((bdl,1,d,d)) + 1j*rng.random((bdl,1,d,d)))/bdl
c[n-1] = (c[n-1] + np.conj(np.moveaxis(c[n-1],2,3)))/2
elif bc == 'P':
c = (rng.random((bdl,bdl,d,d,n)) + 1j*rng.random((bdl,bdl,d,d,n)))/bdl
c = (c + np.conj(np.moveaxis(c,2,3)))/2
else:
c = cini
if bc == 'O':
bdl = max([np.shape(c[i])[0] for i in range(n)])
c = [c[i].astype(complex) for i in range(n)]
elif bc == 'P':
bdl = np.shape(c)[0]
c = c.astype(complex)
resultv = np.zeros(bdlmax,dtype=float)
if bc == 'O':
resultv[bdl-1],c = fin_FoM_OBC_optm(a,b,c,imprecision,lherm)
if n == 1:
resultv = resultv[0:bdl]
result = resultv[bdl-1]
return result,resultv,c
elif bc == 'P':
resultv[bdl-1],c = fin_FoM_PBC_optm(a,b,c,imprecision,lherm)
factorv = np.array([0.5,0.25,0.1,1,0.01])
problem = False
while True:
if bdl == bdlmax:
resultv = resultv[0:bdl]
result = resultv[bdl-1]
break
else:
bdl += 1
i = 0
while True:
c = fin_enlarge_bdl(c,factorv[i])
if bc == 'O':
resultv[bdl-1],cnew = fin_FoM_OBC_optm(a,b,c,imprecision,lherm)
elif bc == 'P':
resultv[bdl-1],cnew = fin_FoM_PBC_optm(a,b,c,imprecision,lherm)
if resultv[bdl-1] >= resultv[bdl-2]:
c = cnew
break
i += 1
if i == np.size(factorv):
problem = True
break
if problem:
break
if not(alwaysbdlmax) and resultv[bdl-1] < (1+imprecision)*resultv[bdl-2]:
resultv = resultv[0:bdl]
result = resultv[bdl-1]
break
if not(problem):
break
return result,resultv,c
def fin_FoMD_optbd(n,d,bc,c2d,cpd,a0ini=None,imprecision=10**-2,bdpsimax=100,alwaysbdpsimax=False):
"""
Optimization of FoMD over initial wave function MPS and also check of convergence in bond dimension. Function for finite size systems.
Parameters:
n: number of sites in TN
d: dimension of local Hilbert space (dimension of physical index)
bc: boundary conditions, 'O' for OBC, 'P' for PBC
c2d: MPO for square of dual of SLD, expected list of length n of ndarrays of a shape (bd,bd,d,d) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d,d,n) for PBC
cpd: MPO for dual of generalized derivative of SLD, expected list of length n of ndarrays of a shape (bd,bd,d,d) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d,d,n) for PBC
a0ini: initial MPS for initial wave function, expected list of length n of ndarrays of a shape (bd,bd,d) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d,n) for PBC
imprecision: expected imprecision of the end results, default value is 10**-2
bdpsimax: maximal value of bd for initial wave function MPS, default value is 100
alwaysbdpsimax: boolean value, True if maximal value of bd for initial wave function MPS have to be reached, otherwise False (default value)
Returns:
result: optimal value of FoMD
resultv: vector describing FoMD in function of bd of initial wave function MPS
a0: optimal MPS for initial wave function
"""
while True:
if a0ini is None:
bdpsi = 1
a0 = np.zeros(d,dtype=complex)
for i in range(d):
a0[i] = np.sqrt(math.comb(d-1,i))*2**(-(d-1)/2) # prod
# a0[i] = np.sqrt(2/(d+1))*np.sin((1+i)*np.pi/(d+1)) # sine
if bc == 'O':
a0 = a0[np.newaxis,np.newaxis,:]
a0 = [a0]*n
elif bc == 'P':
a0 = a0[np.newaxis,np.newaxis,:,np.newaxis]
a0 = np.tile(a0,(1,1,1,n))
else:
a0 = a0ini
if bc == 'O':
bdpsi = max([np.shape(a0[i])[0] for i in range(n)])
a0 = [a0[i].astype(complex) for i in range(n)]
elif bc == 'P':
bdpsi = np.shape(a0)[0]
a0 = a0.astype(complex)
resultv = np.zeros(bdpsimax,dtype=float)
if bc == 'O':
resultv[bdpsi-1],a0 = fin_FoMD_OBC_optm(c2d,cpd,a0,imprecision)
if n == 1:
resultv = resultv[0:bdpsi]
result = resultv[bdpsi-1]
return result,resultv,a0
elif bc == 'P':
resultv[bdpsi-1],a0 = fin_FoMD_PBC_optm(c2d,cpd,a0,imprecision)
factorv = np.array([0.5,0.25,0.1,1,0.01])
problem = False
while True:
if bdpsi == bdpsimax:
resultv = resultv[0:bdpsi]
result = resultv[bdpsi-1]
break
else:
bdpsi += 1
i = 0
while True:
a0 = fin_enlarge_bdpsi(a0,factorv[i])
if bc == 'O':
resultv[bdpsi-1],a0new = fin_FoMD_OBC_optm(c2d,cpd,a0,imprecision)
elif bc == 'P':
resultv[bdpsi-1],a0new = fin_FoMD_PBC_optm(c2d,cpd,a0,imprecision)
if resultv[bdpsi-1] >= resultv[bdpsi-2]:
a0 = a0new
break
i += 1
if i == np.size(factorv):
problem = True
break
if problem:
break
if not(alwaysbdpsimax) and resultv[bdpsi-1] < (1+imprecision)*resultv[bdpsi-2]:
resultv = resultv[0:bdpsi]
result = resultv[bdpsi-1]
break
if not(problem):
break
return result,resultv,a0
def fin_FoM_FoMD_optm(n,d,bc,c,a0,ch,chp,imprecision=10**-2,lherm=True):
"""
Iterative optimization of FoM/FoMD over SLD MPO and initial wave function MPS. Function for finite size systems.
Parameters:
n: number of sites in TN
d: dimension of local Hilbert space (dimension of physical index)
bc: boundary conditions, 'O' for OBC, 'P' for PBC
c: MPO for SLD, expected list of length n of ndarrays of a shape (bd,bd,d,d) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d,d,n) for PBC
a0: MPS for initial wave function, expected list of length n of ndarrays of a shape (bd,bd,d) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d,n) for PBC
ch: MPO for quantum channel, expected list of length n of ndarrays of a shape (bd,bd,d**2,d**2) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d**2,d**2,n) for PBC
chp: MPO for generalized derivative of quantum channel, expected list of length n of ndarrays of a shape (bd,bd,d**2,d**2) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d**2,d**2,n) for PBC
imprecision: expected imprecision of the end results, default value is 10**-2
lherm: boolean value, True (default value) when Hermitian gauge is imposed on SLD MPO, otherwise False
Returns:
fval: optimal value of FoM/FoMD
c: optimal MPO for SLD
a0: optimal MPS for initial wave function
"""
relunc_f = 0.1*imprecision
if bc == 'O':
chd = [0]*n
chpd = [0]*n
for x in range(n):
chd[x] = np.conj(np.moveaxis(ch[x],2,3))
chpd[x] = np.conj(np.moveaxis(chp[x],2,3))
elif bc == 'P':
chd = np.conj(np.moveaxis(ch,2,3))
chpd = np.conj(np.moveaxis(chp,2,3))
f = np.array([])
iter_f = 0
while True:
a0_dm = wave_function_to_density_matrix(a0)
a = channel_acting_on_operator(ch,a0_dm)
b = channel_acting_on_operator(chp,a0_dm)
if bc == 'O':
fom,c = fin_FoM_OBC_optm(a,b,c,imprecision,lherm)
elif bc == 'P':
fom,c = fin_FoM_PBC_optm(a,b,c,imprecision,lherm)
f = np.append(f,fom)
if iter_f >= 2 and np.std(f[-4:])/np.mean(f[-4:]) <= relunc_f:
break
if bc == 'O':
c2 = [0]*n
for x in range(n):
bdl1 = np.shape(c[x])[0]
bdl2 = np.shape(c[x])[1]
c2[x] = np.zeros((bdl1**2,bdl2**2,d,d),dtype=complex)
for nx in range(d):
for nxp in range(d):
for nxpp in range(d):
c2[x][:,:,nx,nxp] = c2[x][:,:,nx,nxp]+np.kron(c[x][:,:,nx,nxpp],c[x][:,:,nxpp,nxp])
elif bc == 'P':
bdl = np.shape(c)[0]
c2 = np.zeros((bdl**2,bdl**2,d,d,n),dtype=complex)
for nx in range(d):
for nxp in range(d):
for nxpp in range(d):
for x in range(n):
c2[:,:,nx,nxp,x] = c2[:,:,nx,nxp,x]+np.kron(c[:,:,nx,nxpp,x],c[:,:,nxpp,nxp,x])
c2d = channel_acting_on_operator(chd,c2)
cpd = channel_acting_on_operator(chpd,c)
if bc == 'O':
fomd,a0 = fin_FoMD_OBC_optm(c2d,cpd,a0,imprecision)
elif bc == 'P':
fomd,a0 = fin_FoMD_PBC_optm(c2d,cpd,a0,imprecision)
f = np.append(f,fomd)
iter_f += 1
fval = f[-1]
return fval,c,a0
def fin_FoM_OBC_optm(a,b,c,imprecision=10**-2,lherm=True):
"""
Optimization of FoM over MPO for SLD. Function for finite size systems with OBC.
Parameters:
a: MPO for density matrix, expected list of length n of ndarrays of a shape (bd,bd,d,d) (bd can vary between sites)
b: MPO for generalized derivative of density matrix, expected list of length n of ndarrays of a shape (bd,bd,d,d) (bd can vary between sites)
c: MPO for SLD, expected list of length n of ndarrays of a shape (bd,bd,d,d) (bd can vary between sites)
imprecision: expected imprecision of the end results, default value is 10**-2
lherm: boolean value, True (default value) when Hermitian gauge is imposed on SLD MPO, otherwise False
Returns:
fomval: optimal value of FoM
c: optimal MPO for SLD
"""
n = len(c)
tol_fom = 0.1*imprecision/n**2
if n == 1:
if np.shape(a[0])[0] == 1 and np.shape(b[0])[0] == 1 and np.shape(c[0])[0] == 1:
d = np.shape(c[0])[2]
tensors = [b[0][0,0,:,:]]
legs = [[-2,-1]]
l1 = ncon(tensors,legs)
l1 = np.reshape(l1,-1,order='F')
tensors = [a[0][0,0,:,:],np.eye(d)]
legs = [[-2,-3],[-4,-1]]
l2 = ncon(tensors,legs)
l2 = np.reshape(l2,(d*d,d*d),order='F')
dl2 = l2+l2.T
dl1 = 2*l1
dl2pinv = np.linalg.pinv(dl2,tol_fom)
dl2pinv = (dl2pinv+dl2pinv.T)/2
cv = dl2pinv @ dl1
c[0][0,0,:,:] = np.reshape(cv,(d,d),order='F')
if lherm:
c[0] = (c[0]+np.conj(np.moveaxis(c[0],2,3)))/2
cv = np.reshape(c[0],-1,order='F')
fomval = np.real(2*cv @ l1 - cv @ l2 @ cv)
else:
warnings.warn('Tensor networks with OBC and length one have to have bond dimension equal to one.')
else:
relunc_fom = 0.1*imprecision
l1f = [0]*n
l2f = [0]*n
fom = np.array([])
iter_fom = 0
while True:
tensors = [c[n-1],b[n-1]]
legs = [[-1,-3,1,2],[-2,-4,2,1]]
l1f[n-2] = ncon(tensors,legs)
l1f[n-2] = l1f[n-2][:,:,0,0]
tensors = [c[n-1],a[n-1],c[n-1]]
legs = [[-1,-4,1,2],[-2,-5,2,3],[-3,-6,3,1]]
l2f[n-2] = ncon(tensors,legs)
l2f[n-2] = l2f[n-2][:,:,:,0,0,0]
for x in range(n-2,0,-1):
tensors = [c[x],b[x],l1f[x]]
legs = [[-1,3,1,2],[-2,4,2,1],[3,4]]
l1f[x-1] = ncon(tensors,legs)
tensors = [c[x],a[x],c[x],l2f[x]]
legs = [[-1,4,1,2],[-2,5,2,3],[-3,6,3,1],[4,5,6]]
l2f[x-1] = ncon(tensors,legs)
bdl1,bdl2,d,d = np.shape(c[0])
tensors = [b[0],l1f[0]]
legs = [[-5,1,-4,-3],[-2,1]]
l1 = ncon(tensors,legs)
l1 = np.reshape(l1,-1,order='F')
tensors = [a[0],np.eye(d),l2f[0]]
legs = [[-9,1,-4,-7],[-8,-3],[-2,1,-6]]
l2 = ncon(tensors,legs)
l2 = np.reshape(l2,(bdl1*bdl2*d*d,bdl1*bdl2*d*d),order='F')
dl2 = l2+l2.T
dl1 = 2*l1
dl2pinv = np.linalg.pinv(dl2,tol_fom)
dl2pinv = (dl2pinv+dl2pinv.T)/2
cv = dl2pinv @ dl1
c[0] = np.reshape(cv,(bdl1,bdl2,d,d),order='F')
if lherm:
c[0] = (c[0]+np.conj(np.moveaxis(c[0],2,3)))/2
cv = np.reshape(c[0],-1,order='F')
fom = np.append(fom,np.real(2*cv @ l1 - cv @ l2 @ cv))
tensors = [c[0],b[0]]
legs = [[-3,-1,1,2],[-4,-2,2,1]]
l1c = ncon(tensors,legs)
l1c = l1c[:,:,0,0]
tensors = [c[0],a[0],c[0]]
legs = [[-4,-1,1,2],[-5,-2,2,3],[-6,-3,3,1]]
l2c = ncon(tensors,legs)
l2c = l2c[:,:,:,0,0,0]
for x in range(1,n-1):
bdl1,bdl2,d,d = np.shape(c[x])
tensors = [l1c,b[x],l1f[x]]
legs = [[-1,1],[1,2,-4,-3],[-2,2]]
l1 = ncon(tensors,legs)
l1 = np.reshape(l1,-1,order='F')
tensors = [l2c,a[x],np.eye(d),l2f[x]]
legs = [[-1,1,-5],[1,2,-4,-7],[-8,-3],[-2,2,-6]]
l2 = ncon(tensors,legs)
l2 = np.reshape(l2,(bdl1*bdl2*d*d,bdl1*bdl2*d*d),order='F')
dl2 = l2+l2.T
dl1 = 2*l1
dl2pinv = np.linalg.pinv(dl2,tol_fom)
dl2pinv = (dl2pinv+dl2pinv.T)/2
cv = dl2pinv @ dl1
c[x] = np.reshape(cv,(bdl1,bdl2,d,d),order='F')
if lherm:
c[x] = (c[x]+np.conj(np.moveaxis(c[x],2,3)))/2
cv = np.reshape(c[x],-1,order='F')
fom = np.append(fom,np.real(2*cv @ l1 - cv @ l2 @ cv))
tensors = [l1c,c[x],b[x]]
legs = [[3,4],[3,-1,1,2],[4,-2,2,1]]
l1c = ncon(tensors,legs)
tensors = [l2c,c[x],a[x],c[x]]
legs = [[4,5,6],[4,-1,1,2],[5,-2,2,3],[6,-3,3,1]]
l2c = ncon(tensors,legs)
bdl1,bdl2,d,d = np.shape(c[n-1])
tensors = [l1c,b[n-1]]
legs = [[-1,1],[1,-5,-4,-3]]
l1 = ncon(tensors,legs)
l1 = np.reshape(l1,-1,order='F')
tensors = [l2c,a[n-1],np.eye(d)]
legs = [[-1,1,-5],[1,-9,-4,-7],[-8,-3]]
l2 = ncon(tensors,legs)
l2 = np.reshape(l2,(bdl1*bdl2*d*d,bdl1*bdl2*d*d),order='F')
dl2 = l2+l2.T
dl1 = 2*l1
dl2pinv = np.linalg.pinv(dl2,tol_fom)
dl2pinv = (dl2pinv+dl2pinv.T)/2
cv = dl2pinv @ dl1
c[n-1] = np.reshape(cv,(bdl1,bdl2,d,d),order='F')
if lherm:
c[n-1] = (c[n-1]+np.conj(np.moveaxis(c[n-1],2,3)))/2
cv = np.reshape(c[n-1],-1,order='F')
fom = np.append(fom,np.real(2*cv @ l1 - cv @ l2 @ cv))
iter_fom += 1
if iter_fom >= 2 and all(fom[-2*n:] > 0) and np.std(fom[-2*n:])/np.mean(fom[-2*n:]) <= relunc_fom:
break
fomval = fom[-1]
return fomval,c
def fin_FoM_PBC_optm(a,b,c,imprecision=10**-2,lherm=True):
"""
Optimization of FoM over MPO for SLD. Function for finite size systems with PBC.
Parameters:
a: MPO for density matrix, expected ndarray of a shape (bd,bd,d,d,n)
b: MPO for generalized derivative of density matrix, expected ndarray of a shape (bd,bd,d,d,n)
c: MPO for SLD, expected ndarray of a shape (bd,bd,d,d,n)
imprecision: expected imprecision of the end results, default value is 10**-2
lherm: boolean value, True (default value) when Hermitian gauge is imposed on SLD MPO, otherwise False
Returns:
fomval: optimal value of FoM
c: optimal MPO for SLD
"""
n = np.shape(a)[4]
d = np.shape(a)[2]
bdr = np.shape(a)[0]
bdrp = np.shape(b)[0]
bdl = np.shape(c)[0]
tol_fom = 0.1*imprecision/n**2
if n == 1:
tensors = [b[:,:,:,:,0],np.eye(bdl)]
legs = [[1,1,-4,-3],[-2,-1]]
l1 = ncon(tensors,legs)
l1 = np.reshape(l1,-1,order='F')
tensors = [a[:,:,:,:,0],np.eye(d),np.eye(bdl),np.eye(bdl)]
legs = [[1,1,-4,-7],[-8,-3],[-2,-1],[-6,-5]]
l2 = ncon(tensors,legs)
l2 = np.reshape(l2,(bdl*bdl*d*d,bdl*bdl*d*d),order='F')
dl2 = l2+l2.T
dl1 = 2*l1
dl2pinv = np.linalg.pinv(dl2,tol_fom)
dl2pinv = (dl2pinv+dl2pinv.T)/2
cv = dl2pinv @ dl1
c[:,:,:,:,0] = np.reshape(cv,(bdl,bdl,d,d),order='F')
if lherm:
c[:,:,:,:,0] = (c[:,:,:,:,0]+np.conj(np.moveaxis(c[:,:,:,:,0],2,3)))/2
cv = np.reshape(c[:,:,:,:,0],-1,order='F')
fomval = np.real(2*cv @ l1 - cv @ l2 @ cv)
else:
relunc_fom = 0.1*imprecision
l1f = np.zeros((bdl,bdrp,bdl,bdrp,n-1),dtype=complex)
l2f = np.zeros((bdl,bdr,bdl,bdl,bdr,bdl,n-1),dtype=complex)
fom = np.array([])
iter_fom = 0
while True:
tensors = [c[:,:,:,:,n-1],b[:,:,:,:,n-1]]
legs = [[-1,-3,1,2],[-2,-4,2,1]]
l1f[:,:,:,:,n-2] = ncon(tensors,legs)
tensors = [c[:,:,:,:,n-1],a[:,:,:,:,n-1],c[:,:,:,:,n-1]]
legs = [[-1,-4,1,2],[-2,-5,2,3],[-3,-6,3,1]]
l2f[:,:,:,:,:,:,n-2] = ncon(tensors,legs)
for x in range(n-2,0,-1):
tensors = [c[:,:,:,:,x],b[:,:,:,:,x],l1f[:,:,:,:,x]]
legs = [[-1,3,1,2],[-2,4,2,1],[3,4,-3,-4]]
l1f[:,:,:,:,x-1] = ncon(tensors,legs)
tensors = [c[:,:,:,:,x],a[:,:,:,:,x],c[:,:,:,:,x],l2f[:,:,:,:,:,:,x]]
legs = [[-1,4,1,2],[-2,5,2,3],[-3,6,3,1],[4,5,6,-4,-5,-6]]
l2f[:,:,:,:,:,:,x-1] = ncon(tensors,legs)
tensors = [b[:,:,:,:,0],l1f[:,:,:,:,0]]
legs = [[2,1,-4,-3],[-2,1,-1,2]]
l1 = ncon(tensors,legs)
l1 = np.reshape(l1,-1,order='F')
tensors = [a[:,:,:,:,0],np.eye(d),l2f[:,:,:,:,:,:,0]]
legs = [[2,1,-4,-7],[-8,-3],[-2,1,-6,-1,2,-5]]
l2 = ncon(tensors,legs)
l2 = np.reshape(l2,(bdl*bdl*d*d,bdl*bdl*d*d),order='F')
dl2 = l2+l2.T
dl1 = 2*l1
dl2pinv = np.linalg.pinv(dl2,tol_fom)
dl2pinv = (dl2pinv+dl2pinv.T)/2
cv = dl2pinv @ dl1
c[:,:,:,:,0] = np.reshape(cv,(bdl,bdl,d,d),order='F')
if lherm:
c[:,:,:,:,0] = (c[:,:,:,:,0]+np.conj(np.moveaxis(c[:,:,:,:,0],2,3)))/2
cv = np.reshape(c[:,:,:,:,0],-1,order='F')
fom = np.append(fom,np.real(2*cv @ l1 - cv @ l2 @ cv))
tensors = [c[:,:,:,:,0],b[:,:,:,:,0]]
legs = [[-1,-3,1,2],[-2,-4,2,1]]
l1c = ncon(tensors,legs)
tensors = [c[:,:,:,:,0],a[:,:,:,:,0],c[:,:,:,:,0]]
legs = [[-1,-4,1,2],[-2,-5,2,3],[-3,-6,3,1]]
l2c = ncon(tensors,legs)
for x in range(1,n-1):
tensors = [l1c,b[:,:,:,:,x],l1f[:,:,:,:,x]]
legs = [[3,4,-1,1],[1,2,-4,-3],[-2,2,3,4]]
l1 = ncon(tensors,legs)
l1 = np.reshape(l1,-1,order='F')
tensors = [l2c,a[:,:,:,:,x],np.eye(d),l2f[:,:,:,:,:,:,x]]
legs = [[3,4,5,-1,1,-5],[1,2,-4,-7],[-8,-3],[-2,2,-6,3,4,5]]
l2 = ncon(tensors,legs)
l2 = np.reshape(l2,(bdl*bdl*d*d,bdl*bdl*d*d),order='F')
dl2 = l2+l2.T
dl1 = 2*l1
dl2pinv = np.linalg.pinv(dl2,tol_fom)
dl2pinv = (dl2pinv+dl2pinv.T)/2
cv = dl2pinv @ dl1
c[:,:,:,:,x] = np.reshape(cv,(bdl,bdl,d,d),order='F')
if lherm:
c[:,:,:,:,x] = (c[:,:,:,:,x]+np.conj(np.moveaxis(c[:,:,:,:,x],2,3)))/2
cv = np.reshape(c[:,:,:,:,x],-1,order='F')
fom = np.append(fom,np.real(2*cv @ l1 - cv @ l2 @ cv))
tensors = [l1c,c[:,:,:,:,x],b[:,:,:,:,x]]
legs = [[-1,-2,3,4],[3,-3,1,2],[4,-4,2,1]]
l1c = ncon(tensors,legs)
tensors = [l2c,c[:,:,:,:,x],a[:,:,:,:,x],c[:,:,:,:,x]]
legs = [[-1,-2,-3,4,5,6],[4,-4,1,2],[5,-5,2,3],[6,-6,3,1]]
l2c = ncon(tensors,legs)
tensors = [l1c,b[:,:,:,:,n-1]]
legs = [[-2,2,-1,1],[1,2,-4,-3]]
l1 = ncon(tensors,legs)
l1 = np.reshape(l1,-1,order='F')
tensors = [l2c,a[:,:,:,:,n-1],np.eye(d)]
legs = [[-2,2,-6,-1,1,-5],[1,2,-4,-7],[-8,-3]]
l2 = ncon(tensors,legs)
l2 = np.reshape(l2,(bdl*bdl*d*d,bdl*bdl*d*d),order='F')
dl2 = l2+l2.T
dl1 = 2*l1
dl2pinv = np.linalg.pinv(dl2,tol_fom)
dl2pinv = (dl2pinv+dl2pinv.T)/2
cv = dl2pinv @ dl1
c[:,:,:,:,n-1] = np.reshape(cv,(bdl,bdl,d,d),order='F')
if lherm:
c[:,:,:,:,n-1] = (c[:,:,:,:,n-1]+np.conj(np.moveaxis(c[:,:,:,:,n-1],2,3)))/2
cv = np.reshape(c[:,:,:,:,n-1],-1,order='F')
fom = np.append(fom,np.real(2*cv @ l1 - cv @ l2 @ cv))
iter_fom += 1
if iter_fom >= 2 and all(fom[-2*n:] > 0) and np.std(fom[-2*n:])/np.mean(fom[-2*n:]) <= relunc_fom:
break
fomval = fom[-1]
return fomval,c
def fin_FoMD_OBC_optm(c2d,cpd,a0,imprecision=10**-2):
"""
Optimization of FoMD over MPS for initial wave function. Function for finite size systems with OBC.
Parameters:
c2d: MPO for square of dual of SLD, expected list of length n of ndarrays of a shape (bd,bd,d,d) (bd can vary between sites)
cpd: MPO for dual of generalized derivative of SLD, expected list of length n of ndarrays of a shape (bd,bd,d,d) (bd can vary between sites)
a0: MPS for initial wave function, expected list of length n of ndarrays of a shape (bd,bd,d,d) (bd can vary between sites)
imprecision: expected imprecision of the end results, default value is 10**-2
Returns:
fomdval: optimal value of FoMD
a0: optimal MPS for initial wave function
"""
n = len(a0)
if n == 1:
if np.shape(c2d[0])[0] == 1 and np.shape(cpd[0])[0] == 1 and np.shape(a0[0])[0] == 1:
d = np.shape(a0[0])[2]
tensors = [c2d[0][0,0,:,:]]
legs = [[-1,-2]]
l2d = ncon(tensors,legs)
l2d = np.reshape(l2d,(d,d),order='F')
tensors = [cpd[0][0,0,:,:]]
legs = [[-1,-2]]
lpd = ncon(tensors,legs)
lpd = np.reshape(lpd,(d,d),order='F')
eiginput = 2*lpd-l2d
eiginput = (eiginput+np.conj(eiginput).T)/2
fomdval,a0v = np.linalg.eig(eiginput)
position = np.argmax(np.real(fomdval))
a0v = np.reshape(a0v[:,position],-1,order='F')
a0v = a0v/np.sqrt(np.abs(np.conj(a0v) @ a0v))
a0[0][0,0,:] = np.reshape(a0v,(d),order='F')
fomdval = np.real(fomdval[position])
else:
warnings.warn('Tensor networks with OBC and length one have to have bond dimension equal to one.')
else:
relunc_fomd = 0.1*imprecision
l2df = [0]*n
lpdf = [0]*n
fomd = np.array([])
iter_fomd = 0
while True:
tensors = [np.conj(a0[n-1]),c2d[n-1],a0[n-1]]
legs = [[-1,-4,1],[-2,-5,1,2],[-3,-6,2]]
l2df[n-2] = ncon(tensors,legs)
l2df[n-2] = l2df[n-2][:,:,:,0,0,0]
tensors = [np.conj(a0[n-1]),cpd[n-1],a0[n-1]]
legs = [[-1,-4,1],[-2,-5,1,2],[-3,-6,2]]
lpdf[n-2] = ncon(tensors,legs)
lpdf[n-2] = lpdf[n-2][:,:,:,0,0,0]
for x in range(n-2,0,-1):
tensors = [np.conj(a0[x]),c2d[x],a0[x],l2df[x]]
legs = [[-1,3,1],[-2,4,1,2],[-3,5,2],[3,4,5]]
l2df[x-1] = ncon(tensors,legs)
tensors = [np.conj(a0[x]),cpd[x],a0[x],lpdf[x]]
legs = [[-1,3,1],[-2,4,1,2],[-3,5,2],[3,4,5]]
lpdf[x-1] = ncon(tensors,legs)
bdpsi1,bdpsi2,d = np.shape(a0[0])
tensors = [c2d[0],l2df[0]]
legs = [[-7,1,-3,-6],[-2,1,-5]]
l2d = ncon(tensors,legs)
l2d = np.reshape(l2d,(bdpsi1*bdpsi2*d,bdpsi1*bdpsi2*d),order='F')
tensors = [cpd[0],lpdf[0]]
legs = [[-7,1,-3,-6],[-2,1,-5]]
lpd = ncon(tensors,legs)
lpd = np.reshape(lpd,(bdpsi1*bdpsi2*d,bdpsi1*bdpsi2*d),order='F')
eiginput = 2*lpd-l2d
eiginput = (eiginput+np.conj(eiginput).T)/2
fomdval,a0v = np.linalg.eig(eiginput)
position = np.argmax(np.real(fomdval))
a0v = np.reshape(a0v[:,position],-1,order='F')
a0v = a0v/np.sqrt(np.abs(np.conj(a0v) @ a0v))
a0[0] = np.reshape(a0v,(bdpsi1,bdpsi2,d),order='F')
fomd = np.append(fomd,np.real(fomdval[position]))
a0[0] = np.moveaxis(a0[0],2,0)
a0[0] = np.reshape(a0[0],(d*bdpsi1,bdpsi2),order='F')
u,s,vh = np.linalg.svd(a0[0],full_matrices=False)
a0[0] = np.reshape(u,(d,bdpsi1,np.shape(s)[0]),order='F')
a0[0] = np.moveaxis(a0[0],0,2)
tensors = [np.diag(s) @ vh,a0[1]]
legs = [[-1,1],[1,-2,-3]]
a0[1] = ncon(tensors,legs)
tensors = [np.conj(a0[0]),c2d[0],a0[0]]
legs = [[-4,-1,1],[-5,-2,1,2],[-6,-3,2]]
l2dc = ncon(tensors,legs)
l2dc = l2dc[:,:,:,0,0,0]
tensors = [np.conj(a0[0]),cpd[0],a0[0]]
legs = [[-4,-1,1],[-5,-2,1,2],[-6,-3,2]]
lpdc = ncon(tensors,legs)
lpdc = lpdc[:,:,:,0,0,0]
for x in range(1,n-1):
bdpsi1,bdpsi2,d = np.shape(a0[x])
tensors = [l2dc,c2d[x],l2df[x]]
legs = [[-1,1,-4],[1,2,-3,-6],[-2,2,-5]]
l2d = ncon(tensors,legs)
l2d = np.reshape(l2d,(bdpsi1*bdpsi2*d,bdpsi1*bdpsi2*d),order='F')
tensors = [lpdc,cpd[x],lpdf[x]]
legs = [[-1,1,-4],[1,2,-3,-6],[-2,2,-5]]
lpd = ncon(tensors,legs)
lpd = np.reshape(lpd,(bdpsi1*bdpsi2*d,bdpsi1*bdpsi2*d),order='F')
eiginput = 2*lpd-l2d
eiginput = (eiginput+np.conj(eiginput).T)/2
fomdval,a0v = np.linalg.eig(eiginput)
position = np.argmax(np.real(fomdval))
a0v = np.reshape(a0v[:,position],-1,order='F')
a0v = a0v/np.sqrt(np.abs(np.conj(a0v) @ a0v))
a0[x] = np.reshape(a0v,(bdpsi1,bdpsi2,d),order='F')
fomd = np.append(fomd,np.real(fomdval[position]))
a0[x] = np.moveaxis(a0[x],2,0)
a0[x] = np.reshape(a0[x],(d*bdpsi1,bdpsi2),order='F')
u,s,vh = np.linalg.svd(a0[x],full_matrices=False)
a0[x] = np.reshape(u,(d,bdpsi1,np.shape(s)[0]),order='F')
a0[x] = np.moveaxis(a0[x],0,2)
tensors = [np.diag(s) @ vh,a0[x+1]]
legs = [[-1,1],[1,-2,-3]]
a0[x+1] = ncon(tensors,legs)
tensors = [l2dc,np.conj(a0[x]),c2d[x],a0[x]]
legs = [[3,4,5],[3,-1,1],[4,-2,1,2],[5,-3,2]]
l2dc = ncon(tensors,legs)
tensors = [lpdc,np.conj(a0[x]),cpd[x],a0[x]]
legs = [[3,4,5],[3,-1,1],[4,-2,1,2],[5,-3,2]]
lpdc = ncon(tensors,legs)
bdpsi1,bdpsi2,d = np.shape(a0[n-1])
tensors = [l2dc,c2d[n-1]]
legs = [[-1,1,-4],[1,-7,-3,-6]]
l2d = ncon(tensors,legs)
l2d = np.reshape(l2d,(bdpsi1*bdpsi2*d,bdpsi1*bdpsi2*d),order='F')
tensors = [lpdc,cpd[n-1]]
legs = [[-1,1,-4],[1,-7,-3,-6]]
lpd = ncon(tensors,legs)
lpd = np.reshape(lpd,(bdpsi1*bdpsi2*d,bdpsi1*bdpsi2*d),order='F')
eiginput = 2*lpd-l2d
eiginput = (eiginput+np.conj(eiginput).T)/2
fomdval,a0v = np.linalg.eig(eiginput)
position = np.argmax(np.real(fomdval))
a0v = np.reshape(a0v[:,position],-1,order='F')
a0v = a0v/np.sqrt(np.abs(np.conj(a0v) @ a0v))
a0[n-1] = np.reshape(a0v,(bdpsi1,bdpsi2,d),order='F')
fomd = np.append(fomd,np.real(fomdval[position]))
iter_fomd += 1
for x in range(n-1,0,-1):
bdpsi1,bdpsi2,d = np.shape(a0[x])
a0[x] = np.moveaxis(a0[x],2,1)
a0[x] = np.reshape(a0[x],(bdpsi1,d*bdpsi2),order='F')
u,s,vh = np.linalg.svd(a0[x],full_matrices=False)
a0[x] = np.reshape(vh,(np.shape(s)[0],d,bdpsi2),order='F')
a0[x] = np.moveaxis(a0[x],1,2)
tensors = [a0[x-1],u @ np.diag(s)]
legs = [[-1,1,-3],[1,-2]]
a0[x-1] = ncon(tensors,legs)
if iter_fomd >= 2 and all(fomd[-2*n:] > 0) and np.std(fomd[-2*n:])/np.mean(fomd[-2*n:]) <= relunc_fomd:
break
fomdval = fomd[-1]
return fomdval,a0
def fin_FoMD_PBC_optm(c2d,cpd,a0,imprecision=10**-2):
"""
Optimization of FoMD over MPS for initial wave function. Function for finite size systems with PBC.
Parameters:
c2d: MPO for square of dual of SLD, expected ndarray of a shape (bd,bd,d,d,n)
cpd: MPO for dual of generalized derivative of SLD, expected ndarray of a shape (bd,bd,d,d,n)
a0: MPS for initial wave function, expected ndarray of a shape (bd,bd,d,n)
imprecision: expected imprecision of the end results, default value is 10**-2
Returns:
fomdval: optimal value of FoMD
a0: optimal MPS for initial wave function
"""
n = np.shape(c2d)[4]
d = np.shape(c2d)[2]
bdl2d = np.shape(c2d)[0]
bdlpd = np.shape(cpd)[0]
bdpsi = np.shape(a0)[0]
tol_fomd = 0.1*imprecision/n**2
if n == 1:
tensors = [c2d[:,:,:,:,0],np.eye(bdpsi),np.eye(bdpsi)]
legs = [[1,1,-3,-6],[-2,-1],[-5,-4]]
l2d = ncon(tensors,legs)
l2d = np.reshape(l2d,(bdpsi*bdpsi*d,bdpsi*bdpsi*d),order='F')
tensors = [cpd[:,:,:,:,0],np.eye(bdpsi),np.eye(bdpsi)]
legs = [[1,1,-3,-6],[-2,-1],[-5,-4]]
lpd = ncon(tensors,legs)
lpd = np.reshape(lpd,(bdpsi*bdpsi*d,bdpsi*bdpsi*d),order='F')
tensors = [np.eye(bdpsi),np.eye(bdpsi)]
legs = [[-2,-1],[-4,-3]]
psinorm = ncon(tensors,legs)
psinorm = np.reshape(psinorm,(bdpsi*bdpsi,bdpsi*bdpsi),order='F')
psinorm = (psinorm+np.conj(psinorm).T)/2
psinormpinv = np.linalg.pinv(psinorm,tol_fomd,hermitian=True)
psinormpinv = (psinormpinv+np.conj(psinormpinv).T)/2
psinormpinv = np.kron(np.eye(d),psinormpinv)
eiginput = 2*lpd-l2d
eiginput = (eiginput+np.conj(eiginput).T)/2
eiginput = psinormpinv @ eiginput
fomdval,a0v = np.linalg.eig(eiginput)
position = np.argmax(np.real(fomdval))
a0v = np.reshape(a0v[:,position],-1,order='F')
a0v = a0v/np.sqrt(np.abs(np.conj(a0v) @ np.kron(np.eye(d),psinorm) @ a0v))
a0[:,:,:,0] = np.reshape(a0v,(bdpsi,bdpsi,d),order='F')
fomdval = np.real(fomdval[position])
else:
relunc_fomd = 0.1*imprecision
l2df = np.zeros((bdpsi,bdl2d,bdpsi,bdpsi,bdl2d,bdpsi,n-1),dtype=complex)
lpdf = np.zeros((bdpsi,bdlpd,bdpsi,bdpsi,bdlpd,bdpsi,n-1),dtype=complex)
psinormf = np.zeros((bdpsi,bdpsi,bdpsi,bdpsi,n-1),dtype=complex)
fomd = np.array([])
iter_fomd = 0
while True:
tensors = [np.conj(a0[:,:,:,n-1]),c2d[:,:,:,:,n-1],a0[:,:,:,n-1]]
legs = [[-1,-4,1],[-2,-5,1,2],[-3,-6,2]]
l2df[:,:,:,:,:,:,n-2] = ncon(tensors,legs)
tensors = [np.conj(a0[:,:,:,n-1]),cpd[:,:,:,:,n-1],a0[:,:,:,n-1]]
legs = [[-1,-4,1],[-2,-5,1,2],[-3,-6,2]]
lpdf[:,:,:,:,:,:,n-2] = ncon(tensors,legs)
tensors = [np.conj(a0[:,:,:,n-1]),a0[:,:,:,n-1]]
legs = [[-1,-3,1],[-2,-4,1]]
psinormf[:,:,:,:,n-2] = ncon(tensors,legs)
for x in range(n-2,0,-1):
tensors = [np.conj(a0[:,:,:,x]),c2d[:,:,:,:,x],a0[:,:,:,x],l2df[:,:,:,:,:,:,x]]
legs = [[-1,3,1],[-2,4,1,2],[-3,5,2],[3,4,5,-4,-5,-6]]
l2df[:,:,:,:,:,:,x-1] = ncon(tensors,legs)
tensors = [np.conj(a0[:,:,:,x]),cpd[:,:,:,:,x],a0[:,:,:,x],lpdf[:,:,:,:,:,:,x]]
legs = [[-1,3,1],[-2,4,1,2],[-3,5,2],[3,4,5,-4,-5,-6]]
lpdf[:,:,:,:,:,:,x-1] = ncon(tensors,legs)
tensors = [np.conj(a0[:,:,:,x]),a0[:,:,:,x],psinormf[:,:,:,:,x]]
legs = [[-1,2,1],[-2,3,1],[2,3,-3,-4]]
psinormf[:,:,:,:,x-1] = ncon(tensors,legs)
tensors = [c2d[:,:,:,:,0],l2df[:,:,:,:,:,:,0]]
legs = [[2,1,-3,-6],[-2,1,-5,-1,2,-4]]
l2d = ncon(tensors,legs)
l2d = np.reshape(l2d,(bdpsi*bdpsi*d,bdpsi*bdpsi*d),order='F')
tensors = [cpd[:,:,:,:,0],lpdf[:,:,:,:,:,:,0]]
legs = [[2,1,-3,-6],[-2,1,-5,-1,2,-4]]
lpd = ncon(tensors,legs)
lpd = np.reshape(lpd,(bdpsi*bdpsi*d,bdpsi*bdpsi*d),order='F')
tensors = [psinormf[:,:,:,:,0]]
legs = [[-2,-4,-1,-3]]
psinorm = ncon(tensors,legs)
psinorm = np.reshape(psinorm,(bdpsi*bdpsi,bdpsi*bdpsi),order='F')
psinorm = (psinorm+np.conj(psinorm).T)/2
psinormpinv = np.linalg.pinv(psinorm,tol_fomd,hermitian=True)
psinormpinv = (psinormpinv+np.conj(psinormpinv).T)/2
psinormpinv = np.kron(np.eye(d),psinormpinv)
eiginput = 2*lpd-l2d
eiginput = (eiginput+np.conj(eiginput).T)/2
eiginput = psinormpinv @ eiginput
fomdval,a0v = np.linalg.eig(eiginput)
position = np.argmax(np.real(fomdval))
a0v = np.reshape(a0v[:,position],-1,order='F')
a0v = a0v/np.sqrt(np.abs(np.conj(a0v) @ np.kron(np.eye(d),psinorm) @ a0v))
a0[:,:,:,0] = np.reshape(a0v,(bdpsi,bdpsi,d),order='F')
fomd = np.append(fomd,np.real(fomdval[position]))
tensors = [np.conj(a0[:,:,:,0]),c2d[:,:,:,:,0],a0[:,:,:,0]]
legs = [[-1,-4,1],[-2,-5,1,2],[-3,-6,2]]
l2dc = ncon(tensors,legs)
tensors = [np.conj(a0[:,:,:,0]),cpd[:,:,:,:,0],a0[:,:,:,0]]
legs = [[-1,-4,1],[-2,-5,1,2],[-3,-6,2]]
lpdc = ncon(tensors,legs)
tensors = [np.conj(a0[:,:,:,0]),a0[:,:,:,0]]
legs = [[-1,-3,1],[-2,-4,1]]
psinormc = ncon(tensors,legs)
for x in range(1,n-1):
tensors = [l2dc,c2d[:,:,:,:,x],l2df[:,:,:,:,:,:,x]]
legs = [[3,4,5,-1,1,-4],[1,2,-3,-6],[-2,2,-5,3,4,5]]
l2d = ncon(tensors,legs)
l2d = np.reshape(l2d,(bdpsi*bdpsi*d,bdpsi*bdpsi*d),order='F')
tensors = [lpdc,cpd[:,:,:,:,x],lpdf[:,:,:,:,:,:,x]]
legs = [[3,4,5,-1,1,-4],[1,2,-3,-6],[-2,2,-5,3,4,5]]
lpd = ncon(tensors,legs)
lpd = np.reshape(lpd,(bdpsi*bdpsi*d,bdpsi*bdpsi*d),order='F')
tensors = [psinormc,psinormf[:,:,:,:,x]]
legs = [[1,2,-1,-3],[-2,-4,1,2]]
psinorm = ncon(tensors,legs)
psinorm = np.reshape(psinorm,(bdpsi*bdpsi,bdpsi*bdpsi),order='F')
psinorm = (psinorm+np.conj(psinorm).T)/2
psinormpinv = np.linalg.pinv(psinorm,tol_fomd,hermitian=True)
psinormpinv = (psinormpinv+np.conj(psinormpinv).T)/2
psinormpinv = np.kron(np.eye(d),psinormpinv)
eiginput = 2*lpd-l2d
eiginput = (eiginput+np.conj(eiginput).T)/2
eiginput = psinormpinv @ eiginput
fomdval,a0v = np.linalg.eig(eiginput)
position = np.argmax(np.real(fomdval))
a0v = np.reshape(a0v[:,position],-1,order='F')
a0v = a0v/np.sqrt(np.abs(np.conj(a0v) @ np.kron(np.eye(d),psinorm) @ a0v))
a0[:,:,:,x] = np.reshape(a0v,(bdpsi,bdpsi,d),order='F')
fomd = np.append(fomd,np.real(fomdval[position]))
tensors = [l2dc,np.conj(a0[:,:,:,x]),c2d[:,:,:,:,x],a0[:,:,:,x]]
legs = [[-1,-2,-3,3,4,5],[3,-4,1],[4,-5,1,2],[5,-6,2]]
l2dc = ncon(tensors,legs)
tensors = [lpdc,np.conj(a0[:,:,:,x]),cpd[:,:,:,:,x],a0[:,:,:,x]]
legs = [[-1,-2,-3,3,4,5],[3,-4,1],[4,-5,1,2],[5,-6,2]]
lpdc = ncon(tensors,legs)
tensors = [psinormc,np.conj(a0[:,:,:,x]),a0[:,:,:,x]]
legs = [[-1,-2,2,3],[2,-3,1],[3,-4,1]]
psinormc = ncon(tensors,legs)
tensors = [l2dc,c2d[:,:,:,:,n-1]]
legs = [[-2,2,-5,-1,1,-4],[1,2,-3,-6]]
l2d = ncon(tensors,legs)
l2d = np.reshape(l2d,(bdpsi*bdpsi*d,bdpsi*bdpsi*d),order='F')
tensors = [lpdc,cpd[:,:,:,:,n-1]]
legs = [[-2,2,-5,-1,1,-4],[1,2,-3,-6]]
lpd = ncon(tensors,legs)
lpd = np.reshape(lpd,(bdpsi*bdpsi*d,bdpsi*bdpsi*d),order='F')
tensors = [psinormc]
legs = [[-2,-4,-1,-3]]
psinorm = ncon(tensors,legs)
psinorm = np.reshape(psinorm,(bdpsi*bdpsi,bdpsi*bdpsi),order='F')
psinorm = (psinorm+np.conj(psinorm).T)/2
psinormpinv = np.linalg.pinv(psinorm,tol_fomd,hermitian=True)
psinormpinv = (psinormpinv+np.conj(psinormpinv).T)/2
psinormpinv = np.kron(np.eye(d),psinormpinv)
eiginput = 2*lpd-l2d
eiginput = (eiginput+np.conj(eiginput).T)/2
eiginput = psinormpinv @ eiginput
fomdval,a0v = np.linalg.eig(eiginput)
position = np.argmax(np.real(fomdval))
a0v = np.reshape(a0v[:,position],-1,order='F')
a0v = a0v/np.sqrt(np.abs(np.conj(a0v) @ np.kron(np.eye(d),psinorm) @ a0v))
a0[:,:,:,n-1] = np.reshape(a0v,(bdpsi,bdpsi,d),order='F')
fomd = np.append(fomd,np.real(fomdval[position]))
iter_fomd += 1
if iter_fomd >= 2 and all(fomd[-2*n:] > 0) and np.std(fomd[-2*n:])/np.mean(fomd[-2*n:]) <= relunc_fomd:
break
fomdval = fomd[-1]
return fomdval,a0
def fin_FoM_OBC_val(a,b,c):
"""
Calculate the value of FoM. Function for finite size systems with OBC.
Parameters:
a: MPO for density matrix, expected list of length n of ndarrays of a shape (bd,bd,d,d) (bd can vary between sites)
b: MPO for generalized derivative of density matrix, expected list of length n of ndarrays of a shape (bd,bd,d,d) (bd can vary between sites)
c: MPO for SLD, expected list of length n of ndarrays of a shape (bd,bd,d,d) (bd can vary between sites)
Returns:
fomval: value of FoM
"""
n = len(c)
if n == 1:
if np.shape(a[0])[0] == 1 and np.shape(b[0])[0] == 1 and np.shape(c[0])[0] == 1:
tensors = [c[0][0,0,:,:],b[0][0:,0,:,:]]
legs = [[1,2],[2,1]]
l1 = ncon(tensors,legs)
tensors = [c[0][0,0,:,:],[0][0,0,:,:],[0][0,0,:,:]]
legs = [[1,2],[2,3],[3,1]]
l2 = ncon(tensors,legs)
fomval = 2*l1-l2
else:
warnings.warn('Tensor networks with OBC and length one have to have bond dimension equal to one.')
else:
tensors = [c[n-1],b[n-1]]
legs = [[-1,-3,1,2],[-2,-4,2,1]]
l1 = ncon(tensors,legs)
l1 = l1[:,:,0,0]
tensors = [c[n-1],a[n-1],c[n-1]]
legs = [[-1,-4,1,2],[-2,-5,2,3],[-3,-6,3,1]]
l2 = ncon(tensors,legs)
l2 = l2[:,:,:,0,0,0]
for x in range(n-2,0,-1):
tensors = [c[x],b[x],l1]
legs = [[-1,3,1,2],[-2,4,2,1],[3,4]]
l1 = ncon(tensors,legs)
tensors = [c[x],a[x],c[x],l2]
legs = [[-1,4,1,2],[-2,5,2,3],[-3,6,3,1],[4,5,6]]
l2 = ncon(tensors,legs)
tensors = [c[0],b[0],l1]
legs = [[-1,3,1,2],[-2,4,2,1],[3,4]]
l1 = ncon(tensors,legs)
l1 = float(l1)
tensors = [c[0],a[0],c[0],l2]
legs = [[-1,4,1,2],[-2,5,2,3],[-3,6,3,1],[4,5,6]]
l2 = ncon(tensors,legs)
l2 = float(l2)
fomval = 2*l1-l2
return fomval
def fin_FoM_PBC_val(a,b,c):
"""
Calculate the value of FoM. Function for finite size systems with PBC.
Parameters:
a: MPO for a density matrix, expected ndarray of a shape (bd,bd,d,d,n)
b: MPO for generalized derivative of a density matrix, expected ndarray of a shape (bd,bd,d,d,n)
c: MPO for the SLD, expected ndarray of a shape (bd,bd,d,d,n)
Returns:
fomval: value of FoM
"""
n = np.shape(a)[4]
if n == 1:
tensors = [c[:,:,:,:,0],b[:,:,:,:,0]]
legs = [[3,3,1,2],[4,4,2,1]]
l1 = ncon(tensors,legs)
tensors = [c[:,:,:,:,0],a[:,:,:,:,0],c[:,:,:,:,0]]
legs = [[4,4,1,2],[5,5,2,3],[6,6,3,1]]
l2 = ncon(tensors,legs)
fomval = 2*l1-l2
else:
tensors = [c[:,:,:,:,n-1],b[:,:,:,:,n-1]]
legs = [[-1,-3,1,2],[-2,-4,2,1]]
l1 = ncon(tensors,legs)
tensors = [c[:,:,:,:,n-1],a[:,:,:,:,n-1],c[:,:,:,:,n-1]]
legs = [[-1,-4,1,2],[-2,-5,2,3],[-3,-6,3,1]]
l2 = ncon(tensors,legs)
for x in range(n-2,0,-1):
tensors = [c[:,:,:,:,x],b[:,:,:,:,x],l1]
legs = [[-1,3,1,2],[-2,4,2,1],[3,4,-3,-4]]
l1 = ncon(tensors,legs)
tensors = [c[:,:,:,:,x],a[:,:,:,:,x],c[:,:,:,:,x],l2]
legs = [[-1,4,1,2],[-2,5,2,3],[-3,6,3,1],[4,5,6,-4,-5,-6]]
l2 = ncon(tensors,legs)
tensors = [c[:,:,:,:,0],b[:,:,:,:,0],l1]
legs = [[5,3,1,2],[6,4,2,1],[3,4,5,6]]
l1 = ncon(tensors,legs)
tensors = [c[:,:,:,:,0],a[:,:,:,:,0],c[:,:,:,:,0],l2]
legs = [[7,4,1,2],[8,5,2,3],[9,6,3,1],[4,5,6,7,8,9]]
l2 = ncon(tensors,legs)
fomval = 2*l1-l2
return fomval
def fin_FoMD_OBC_val(c2d,cpd,a0):
"""
Calculate value of FoMD. Function for finite size systems with OBC.
Parameters:
c2d: MPO for square of dual of SLD, expected list of length n of ndarrays of a shape (bd,bd,d,d) (bd can vary between sites)
cpd: MPO for dual of generalized derivative of SLD, expected list of length n of ndarrays of a shape (bd,bd,d,d) (bd can vary between sites)
a0: MPS for initial wave function, expected list of length n of ndarrays of a shape (bd,bd,d,d) (bd can vary between sites)
Returns:
fomdval: value of FoMD
"""
n = len(a0)
if n == 1:
if np.shape(c2d[0])[0] == 1 and np.shape(cpd[0])[0] == 1 and np.shape(a0[0])[0] == 1:
tensors = [np.conj(a0[0][0,0,:]),c2d[0][0,0,:,:],a0[0][0,0,:]]
legs = [[1],[1,2],[2]]
l2d = ncon(tensors,legs)
tensors = [np.conj(a0[0][0,0,:]),cpd[0][0,0,:,:],a0[0][0,0,:]]
legs = [[1],[1,2],[2]]
lpd = ncon(tensors,legs)
fomdval = 2*lpd-l2d
else:
warnings.warn('Tensor networks with OBC and length one have to have bond dimension equal to one.')
else:
tensors = [np.conj(a0[n-1]),c2d[n-1],a0[n-1]]
legs = [[-1,-4,1],[-2,-5,1,2],[-3,-6,2]]
l2d = ncon(tensors,legs)
l2d = l2d[:,:,:,0,0,0]
tensors = [np.conj(a0[n-1]),cpd[n-1],a0[n-1]]
legs = [[-1,-4,1],[-2,-5,1,2],[-3,-6,2]]
lpd = ncon(tensors,legs)
lpd = lpd[:,:,:,0,0,0]
for x in range(n-2,0,-1):
tensors = [np.conj(a0[x]),c2d[x],a0[x],l2d]
legs = [[-1,3,1],[-2,4,1,2],[-3,5,2],[3,4,5]]
l2d = ncon(tensors,legs)
tensors = [np.conj(a0[x]),cpd[x],a0[x],lpd]
legs = [[-1,3,1],[-2,4,1,2],[-3,5,2],[3,4,5]]
lpd = ncon(tensors,legs)
tensors = [np.conj(a0[0]),c2d[0],a0[0],l2d]
legs = [[-1,3,1],[-2,4,1,2],[-3,5,2],[3,4,5]]
l2d = ncon(tensors,legs)
l2d = float(l2d)
tensors = [np.conj(a0[0]),cpd[0],a0[0],lpd]
legs = [[-1,3,1],[-2,4,1,2],[-3,5,2],[3,4,5]]
lpd = ncon(tensors,legs)
lpd = float(lpd)
fomdval = 2*lpd-l2d
return fomdval
def fin_FoMD_PBC_val(c2d,cpd,a0):
"""
Calculate the value of FoMD. Function for finite size systems with PBC.
Parameters:
c2d: MPO for square of dual of the SLD, expected ndarray of a shape (bd,bd,d,d,n)
cpd: MPO for dual of generalized derivative of the SLD, expected ndarray of a shape (bd,bd,d,d,n)
a0: MPS for the initial wave function, expected ndarray of a shape (bd,bd,d,n)
Returns:
fomdval: value of FoMD
"""
n = np.shape(c2d)[4]
if n == 1:
tensors = [np.conj(a0[:,:,:,0]),c2d[:,:,:,:,0],a0[:,:,:,0]]
legs = [[3,3,1],[4,4,1,2],[5,5,2]]
l2d = ncon(tensors,legs)
tensors = [np.conj(a0[:,:,:,0]),cpd[:,:,:,:,0],a0[:,:,:,0]]
legs = [[3,3,1],[4,4,1,2],[5,5,2]]
lpd = ncon(tensors,legs)
fomdval = 2*lpd-l2d
else:
tensors = [np.conj(a0[:,:,:,n-1]),c2d[:,:,:,:,n-1],a0[:,:,:,n-1]]
legs = [[-1,-4,1],[-2,-5,1,2],[-3,-6,2]]
l2d = ncon(tensors,legs)
tensors = [np.conj(a0[:,:,:,n-1]),cpd[:,:,:,:,n-1],a0[:,:,:,n-1]]
legs = [[-1,-4,1],[-2,-5,1,2],[-3,-6,2]]
lpd = ncon(tensors,legs)
for x in range(n-2,0,-1):
tensors = [np.conj(a0[:,:,:,x]),c2d[:,:,:,:,x],a0[:,:,:,x],l2d]
legs = [[-1,3,1],[-2,4,1,2],[-3,5,2],[3,4,5,-4,-5,-6]]
l2d = ncon(tensors,legs)
tensors = [np.conj(a0[:,:,:,x]),cpd[:,:,:,:,x],a0[:,:,:,x],lpd]
legs = [[-1,3,1],[-2,4,1,2],[-3,5,2],[3,4,5,-4,-5,-6]]
lpd = ncon(tensors,legs)
tensors = [np.conj(a0[:,:,:,0]),c2d[:,:,:,:,0],a0[:,:,:,0],l2d]
legs = [[6,3,1],[7,4,1,2],[8,5,2],[3,4,5,6,7,8]]
l2d = ncon(tensors,legs)
tensors = [np.conj(a0[:,:,:,0]),cpd[:,:,:,:,0],a0[:,:,:,0],lpd]
legs = [[6,3,1],[7,4,1,2],[8,5,2],[3,4,5,6,7,8]]
lpd = ncon(tensors,legs)
fomdval = 2*lpd-l2d
return fomdval
#################################################################
# 1.2.2 Problems with discrete approximation of the derivative. #
#################################################################
def fin2_FoM_FoMD_optbd(n,d,bc,ch,chp,epsilon,cini=None,a0ini=None,imprecision=10**-2,bdlmax=100,alwaysbdlmax=False,lherm=True,bdpsimax=100,alwaysbdpsimax=False):
"""
Iterative optimization of FoM/FoMD over SLD MPO and initial wave function MPS and also a check of convergence with increasing bond dimensions. Function for finite size systems. Version with two channels separated by epsilon.
Parameters:
n: number of sites in TN
d: dimension of the local Hilbert space (dimension of the physical index)
bc: boundary conditions, 'O' for OBC, 'P' for PBC
ch: MPO for a quantum channel at the value of estimated parameter phi=phi_0, expected list of length n of ndarrays of a shape (bd,bd,d**2,d**2) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d**2,d**2,n) for PBC
chp: MPO for a quantum channel at the value of estimated parameter phi=phi_0+epsilon, expected list of length n of ndarrays of a shape (bd,bd,d**2,d**2) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d**2,d**2,n) for PBC
epsilon: value of a separation between estimated parameters encoded in ch and chp, float
cini: initial MPO for the SLD, expected list of length n of ndarrays of a shape (bd,bd,d,d) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d,d,n) for PBC
a0ini: initial MPS for the initial wave function, expected list of length n of ndarrays of a shape (bd,bd,d) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d,n) for PBC
imprecision: expected imprecision of the end results, default value is 10**-2
bdlmax: maximal value of bd for SLD MPO, default value is 100
alwaysbdlmax: boolean value, True if the maximal value of bd for SLD MPO has to be reached, otherwise False (default value)
lherm: boolean value, True (default value) when Hermitian gauge is imposed on SLD MPO, otherwise False
bdpsimax: maximal value of bd for the initial wave function MPS, default value is 100
alwaysbdpsimax: boolean value, True if the maximal value of bd for initial wave function MPS has to be reached, otherwise False (default value)
Returns:
result: optimal value of FoM/FoMD
resultm: matrix describing FoM/FoMD as a function of bd of respectively SLD MPO [rows] and the initial wave function MPS [columns]
c: optimal MPO for SLD
a0: optimal MPS for initial wave function
"""
while True:
if a0ini is None:
bdpsi = 1
a0 = np.zeros(d,dtype=complex)
for i in range(d):
a0[i] = np.sqrt(math.comb(d-1,i))*2**(-(d-1)/2) # prod
# a0[i] = np.sqrt(2/(d+1))*np.sin((1+i)*np.pi/(d+1)) # sine
if bc == 'O':
a0 = a0[np.newaxis,np.newaxis,:]
a0 = [a0]*n
elif bc == 'P':
a0 = a0[np.newaxis,np.newaxis,:,np.newaxis]
a0 = np.tile(a0,(1,1,1,n))
else:
a0 = a0ini
if bc == 'O':
bdpsi = max([np.shape(a0[i])[0] for i in range(n)])
a0 = [a0[i].astype(complex) for i in range(n)]
elif bc == 'P':
bdpsi = np.shape(a0)[0]
a0 = a0.astype(complex)
if cini is None:
bdl = 1
rng = np.random.default_rng()
if bc == 'O':
c = [0]*n
c[0] = (rng.random((1,bdl,d,d)) + 1j*rng.random((1,bdl,d,d)))/bdl
c[0] = (c[0] + np.conj(np.moveaxis(c[0],2,3)))/2
for x in range(1,n-1):
c[x] = (rng.random((bdl,bdl,d,d)) + 1j*rng.random((bdl,bdl,d,d)))/bdl
c[x] = (c[x] + np.conj(np.moveaxis(c[x],2,3)))/2
c[n-1] = (rng.random((bdl,1,d,d)) + 1j*rng.random((bdl,1,d,d)))/bdl
c[n-1] = (c[n-1] + np.conj(np.moveaxis(c[n-1],2,3)))/2
elif bc == 'P':
c = (rng.random((bdl,bdl,d,d,n)) + 1j*rng.random((bdl,bdl,d,d,n)))/bdl
c = (c + np.conj(np.moveaxis(c,2,3)))/2
else:
c = cini
if bc == 'O':
bdl = max([np.shape(c[i])[0] for i in range(n)])
c = [c[i].astype(complex) for i in range(n)]
elif bc == 'P':
bdl = np.shape(c)[0]
c = c.astype(complex)
resultm = np.zeros((bdlmax,bdpsimax),dtype=float)
resultm[bdl-1,bdpsi-1],c,a0 = fin2_FoM_FoMD_optm(n,d,bc,c,a0,ch,chp,epsilon,imprecision,lherm)
if bc == 'O' and n == 1:
resultm = resultm[0:bdl,0:bdpsi]
result = resultm[bdl-1,bdpsi-1]
return result,resultm,c,a0
factorv = np.array([0.5,0.25,0.1,1,0.01])
problem = False
while True:
while True:
if bdpsi == bdpsimax:
break
else:
a0old = a0
bdpsi += 1
i = 0
while True:
a0 = fin_enlarge_bdpsi(a0,factorv[i])
resultm[bdl-1,bdpsi-1],cnew,a0new = fin2_FoM_FoMD_optm(n,d,bc,c,a0,ch,chp,epsilon,imprecision,lherm)
if resultm[bdl-1,bdpsi-1] >= resultm[bdl-1,bdpsi-2]:
break
i += 1
if i == np.size(factorv):
problem = True
break
if problem:
break
if not(alwaysbdpsimax) and resultm[bdl-1,bdpsi-1] < (1+imprecision)*resultm[bdl-1,bdpsi-2]:
bdpsi += -1
a0 = a0old
a0copy = a0new
ccopy = cnew
break
else:
a0 = a0new
c = cnew
if problem:
break
if bdl == bdlmax:
if bdpsi == bdpsimax:
resultm = resultm[0:bdl,0:bdpsi]
result = resultm[bdl-1,bdpsi-1]
else:
a0 = a0copy
c = ccopy
resultm = resultm[0:bdl,0:bdpsi+1]
result = resultm[bdl-1,bdpsi]
break
else:
bdl += 1
i = 0
while True:
c = fin_enlarge_bdl(c,factorv[i])
resultm[bdl-1,bdpsi-1],cnew,a0new = fin2_FoM_FoMD_optm(n,d,bc,c,a0,ch,chp,epsilon,imprecision,lherm)
if resultm[bdl-1,bdpsi-1] >= resultm[bdl-2,bdpsi-1]:
a0 = a0new
c = cnew
break
i += 1
if i == np.size(factorv):
problem = True
break
if problem:
break
if not(alwaysbdlmax) and resultm[bdl-1,bdpsi-1] < (1+imprecision)*resultm[bdl-2,bdpsi-1]:
if bdpsi == bdpsimax:
resultm = resultm[0:bdl,0:bdpsi]
result = resultm[bdl-1,bdpsi-1]
else:
if resultm[bdl-1,bdpsi-1] < resultm[bdl-2,bdpsi]:
a0 = a0copy
c = ccopy
resultm = resultm[0:bdl,0:bdpsi+1]
bdl += -1
bdpsi += 1
result = resultm[bdl-1,bdpsi-1]
else:
resultm = resultm[0:bdl,0:bdpsi+1]
result = resultm[bdl-1,bdpsi-1]
break
if not(problem):
break
return result,resultm,c,a0
def fin2_FoM_optbd(n,d,bc,a,b,epsilon,cini=None,imprecision=10**-2,bdlmax=100,alwaysbdlmax=False,lherm=True):
"""
Optimization of FoM over SLD MPO and also check of convergence in bond dimension. Function for finite size systems. Version with two states separated by epsilon.
Parameters:
n: number of sites in TN
d: dimension of local Hilbert space (dimension of physical index)
bc: boundary conditions, 'O' for OBC, 'P' for PBC
a: MPO for the density matrix at the value of estimated parameter phi=phi_0, expected list of length n of ndarrays of a shape (bd,bd,d,d) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d,d,n) for PBC
b: MPO for the density matrix at the value of estimated parameter phi=phi_0+epsilon, expected list of length n of ndarrays of a shape (bd,bd,d,d) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d,d,n) for PBC
epsilon: value of a separation between estimated parameters encoded in a and b, float
cini: initial MPO for SLD, expected list of length n of ndarrays of a shape (bd,bd,d,d) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d,d,n) for PBC
imprecision: expected imprecision of the end results, default value is 10**-2
bdlmax: maximal value of bd for SLD MPO, default value is 100
alwaysbdlmax: boolean value, True if maximal value of bd for SLD MPO have to be reached, otherwise False (default value)
lherm: boolean value, True (default value) when Hermitian gauge is imposed on SLD MPO, otherwise False
Returns:
result: optimal value of FoM
resultv: vector describing FoM as a function of bd of the SLD MPO
c: optimal MPO for SLD
"""
while True:
if cini is None:
bdl = 1
rng = np.random.default_rng()
if bc == 'O':
c = [0]*n
c[0] = (rng.random((1,bdl,d,d)) + 1j*rng.random((1,bdl,d,d)))/bdl
c[0] = (c[0] + np.conj(np.moveaxis(c[0],2,3)))/2
for x in range(1,n-1):
c[x] = (rng.random((bdl,bdl,d,d)) + 1j*rng.random((bdl,bdl,d,d)))/bdl
c[x] = (c[x] + np.conj(np.moveaxis(c[x],2,3)))/2
c[n-1] = (rng.random((bdl,1,d,d)) + 1j*rng.random((bdl,1,d,d)))/bdl
c[n-1] = (c[n-1] + np.conj(np.moveaxis(c[n-1],2,3)))/2
elif bc == 'P':
c = (rng.random((bdl,bdl,d,d,n)) + 1j*rng.random((bdl,bdl,d,d,n)))/bdl
c = (c + np.conj(np.moveaxis(c,2,3)))/2
else:
c = cini
if bc == 'O':
bdl = max([np.shape(c[i])[0] for i in range(n)])
c = [c[i].astype(complex) for i in range(n)]
elif bc == 'P':
bdl = np.shape(c)[0]
c = c.astype(complex)
resultv = np.zeros(bdlmax,dtype=float)
if bc == 'O':
resultv[bdl-1],c = fin2_FoM_OBC_optm(a,b,epsilon,c,imprecision,lherm)
if n == 1:
resultv = resultv[0:bdl]
result = resultv[bdl-1]
return result,resultv,c
elif bc == 'P':
resultv[bdl-1],c = fin2_FoM_PBC_optm(a,b,epsilon,c,imprecision,lherm)
factorv = np.array([0.5,0.25,0.1,1,0.01])
problem = False
while True:
if bdl == bdlmax:
resultv = resultv[0:bdl]
result = resultv[bdl-1]
break
else:
bdl += 1
i = 0
while True:
c = fin_enlarge_bdl(c,factorv[i])
if bc == 'O':
resultv[bdl-1],cnew = fin2_FoM_OBC_optm(a,b,epsilon,c,imprecision,lherm)
elif bc == 'P':
resultv[bdl-1],cnew = fin2_FoM_PBC_optm(a,b,epsilon,c,imprecision,lherm)
if resultv[bdl-1] >= resultv[bdl-2]:
c = cnew
break
i += 1
if i == np.size(factorv):
problem = True
break
if problem:
break
if not(alwaysbdlmax) and resultv[bdl-1] < (1+imprecision)*resultv[bdl-2]:
resultv = resultv[0:bdl]
result = resultv[bdl-1]
break
if not(problem):
break
return result,resultv,c
def fin2_FoMD_optbd(n,d,bc,c2d,cd,cpd,epsilon,a0ini=None,imprecision=10**-2,bdpsimax=100,alwaysbdpsimax=False):
"""
Optimization of FoMD over initial wave function MPS and also check of convergence when increasing the bond dimension. Function for finite size systems. Version with two dual SLDs separated by epsilon.
Parameters:
n: number of sites in TN
d: dimension of local Hilbert space (dimension of physical index)
bc: boundary conditions, 'O' for OBC, 'P' for PBC
c2d: MPO for square of dual of SLD at the value of estimated parameter phi=-phi_0, expected list of length n of ndarrays of a shape (bd,bd,d,d) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d,d,n) for PBC
cd: MPO for dual of SLD at the value of estimated parameter phi=-phi_0, expected list of length n of ndarrays of a shape (bd,bd,d,d) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d,d,n) for PBC
cpd: MPO for dual of SLD at the value of estimated parameter phi=-(phi_0+epsilon), expected list of length n of ndarrays of a shape (bd,bd,d,d) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d,d,n) for PBC
epsilon: value of a separation between estimated parameters encoded in cd and cpd, float
a0ini: initial MPS for initial wave function, expected list of length n of ndarrays of a shape (bd,bd,d) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d,n) for PBC
imprecision: expected imprecision of the end results, default value is 10**-2
bdpsimax: maximal value of bd for initial wave function MPS, default value is 100
alwaysbdpsimax: boolean value, True if maximal value of bd for initial wave function MPS have to be reached, otherwise False (default value)
Returns:
result: optimal value of FoMD
resultv: vector describing FoMD in function of bd of initial wave function MPS
a0: optimal MPS for initial wave function
"""
while True:
if a0ini is None:
bdpsi = 1
a0 = np.zeros(d,dtype=complex)
for i in range(d):
a0[i] = np.sqrt(math.comb(d-1,i))*2**(-(d-1)/2) # prod
# a0[i] = np.sqrt(2/(d+1))*np.sin((1+i)*np.pi/(d+1)) # sine
if bc == 'O':
a0 = a0[np.newaxis,np.newaxis,:]
a0 = [a0]*n
elif bc == 'P':
a0 = a0[np.newaxis,np.newaxis,:,np.newaxis]
a0 = np.tile(a0,(1,1,1,n))
else:
a0 = a0ini
if bc == 'O':
bdpsi = max([np.shape(a0[i])[0] for i in range(n)])
a0 = [a0[i].astype(complex) for i in range(n)]
elif bc == 'P':
bdpsi = np.shape(a0)[0]
a0 = a0.astype(complex)
resultv = np.zeros(bdpsimax,dtype=float)
if bc == 'O':
resultv[bdpsi-1],a0 = fin2_FoMD_OBC_optm(c2d,cd,cpd,epsilon,a0,imprecision)
if n == 1:
resultv = resultv[0:bdpsi]
result = resultv[bdpsi-1]
return result,resultv,a0
elif bc == 'P':
resultv[bdpsi-1],a0 = fin2_FoMD_PBC_optm(c2d,cd,cpd,epsilon,a0,imprecision)
factorv = np.array([0.5,0.25,0.1,1,0.01])
problem = False
while True:
if bdpsi == bdpsimax:
resultv = resultv[0:bdpsi]
result = resultv[bdpsi-1]
break
else:
bdpsi += 1
i = 0
while True:
a0 = fin_enlarge_bdpsi(a0,factorv[i])
if bc == 'O':
resultv[bdpsi-1],a0new = fin2_FoMD_OBC_optm(c2d,cd,cpd,epsilon,a0,imprecision)
elif bc == 'P':
resultv[bdpsi-1],a0new = fin2_FoMD_PBC_optm(c2d,cd,cpd,epsilon,a0,imprecision)
if resultv[bdpsi-1] >= resultv[bdpsi-2]:
a0 = a0new
break
i += 1
if i == np.size(factorv):
problem = True
break
if problem:
break
if not(alwaysbdpsimax) and resultv[bdpsi-1] < (1+imprecision)*resultv[bdpsi-2]:
resultv = resultv[0:bdpsi]
result = resultv[bdpsi-1]
break
if not(problem):
break
return result,resultv,a0
def fin2_FoM_FoMD_optm(n,d,bc,c,a0,ch,chp,epsilon,imprecision=10**-2,lherm=True):
"""
Iterative optimization of FoM/FoMD over SLD MPO and initial wave function MPS. Function for finite size systems. Version with two channels separated by epsilon.
Parameters:
n: number of sites in TN
d: dimension of local Hilbert space (dimension of physical index)
bc: boundary conditions, 'O' for OBC, 'P' for PBC
c: MPO for SLD, expected list of length n of ndarrays of a shape (bd,bd,d,d) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d,d,n) for PBC
a0: MPS for initial wave function, expected list of length n of ndarrays of a shape (bd,bd,d) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d,n) for PBC
ch: MPO for quantum channel at the value of estimated parameter phi=phi_0, expected list of length n of ndarrays of a shape (bd,bd,d**2,d**2) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d**2,d**2,n) for PBC
chp: MPO for quantum channel at the value of estimated parameter phi=phi_0+epsilon, expected list of length n of ndarrays of a shape (bd,bd,d**2,d**2) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d**2,d**2,n) for PBC
epsilon: value of a separation between estimated parameters encoded in ch and chp, float
imprecision: expected imprecision of the end results, default value is 10**-2
lherm: boolean value, True (default value) when Hermitian gauge is imposed on SLD MPO, otherwise False
Returns:
fval: optimal value of FoM/FoMD
c: optimal MPO for SLD
a0: optimal MPS for initial wave function
"""
relunc_f = 0.1*imprecision
if bc == 'O':
chd = [0]*n
chpd = [0]*n
for x in range(n):
chd[x] = np.conj(np.moveaxis(ch[x],2,3))
chpd[x] = np.conj(np.moveaxis(chp[x],2,3))
elif bc == 'P':
chd = np.conj(np.moveaxis(ch,2,3))
chpd = np.conj(np.moveaxis(chp,2,3))
f = np.array([])
iter_f = 0
while True:
a0_dm = wave_function_to_density_matrix(a0)
a = channel_acting_on_operator(ch,a0_dm)
b = channel_acting_on_operator(chp,a0_dm)
if bc == 'O':
fom,c = fin2_FoM_OBC_optm(a,b,epsilon,c,imprecision,lherm)
elif bc == 'P':
fom,c = fin2_FoM_PBC_optm(a,b,epsilon,c,imprecision,lherm)
f = np.append(f,fom)
if iter_f >= 2 and np.std(f[-4:])/np.mean(f[-4:]) <= relunc_f:
break
if bc == 'O':
c2 = [0]*n
for x in range(n):
bdl1 = np.shape(c[x])[0]
bdl2 = np.shape(c[x])[1]
c2[x] = np.zeros((bdl1**2,bdl2**2,d,d),dtype=complex)
for nx in range(d):
for nxp in range(d):
for nxpp in range(d):
c2[x][:,:,nx,nxp] = c2[x][:,:,nx,nxp]+np.kron(c[x][:,:,nx,nxpp],c[x][:,:,nxpp,nxp])
elif bc == 'P':
bdl = np.shape(c)[0]
c2 = np.zeros((bdl**2,bdl**2,d,d,n),dtype=complex)
for nx in range(d):
for nxp in range(d):
for nxpp in range(d):
for x in range(n):
c2[:,:,nx,nxp,x] = c2[:,:,nx,nxp,x]+np.kron(c[:,:,nx,nxpp,x],c[:,:,nxpp,nxp,x])
c2d = channel_acting_on_operator(chd,c2)
cd = channel_acting_on_operator(chd,c)
cpd = channel_acting_on_operator(chpd,c)
if bc == 'O':
fomd,a0 = fin2_FoMD_OBC_optm(c2d,cd,cpd,epsilon,a0,imprecision)
elif bc == 'P':
fomd,a0 = fin2_FoMD_PBC_optm(c2d,cd,cpd,epsilon,a0,imprecision)
f = np.append(f,fomd)
iter_f += 1
fval = f[-1]
return fval,c,a0
def fin2_FoM_OBC_optm(a,b,epsilon,c,imprecision=10**-2,lherm=True):
"""
Optimization of FoM over MPO for SLD. Function for finite size systems with OBC. Version with two states separated by epsilon.
Parameters:
a: MPO for the density matrix at the value of estimated parameter phi=phi_0, expected list of length n of ndarrays of a shape (bd,bd,d,d) (bd can vary between sites)
b: MPO for the density matrix at the value of estimated parameter phi=phi_0+epsilon, expected list of length n of ndarrays of a shape (bd,bd,d,d) (bd can vary between sites)
epsilon: value of a separation between estimated parameters encoded in a and b, float
c: MPO for the SLD, expected list of length n of ndarrays of a shape (bd,bd,d,d) (bd can vary between sites)
imprecision: expected imprecision of the end results, default value is 10**-2
lherm: boolean value, True (default value) when Hermitian gauge is imposed on SLD MPO, otherwise False
Returns:
fomval: optimal value of FoM
c: optimal MPO for SLD
"""
n = len(c)
tol_fom = 0.1*imprecision/n**2
if n == 1:
if np.shape(a[0])[0] == 1 and np.shape(b[0])[0] == 1 and np.shape(c[0])[0] == 1:
d = np.shape(c[0])[2]
tensors = [b[0][0,0,:,:]]
legs = [[-2,-1]]
l1 = ncon(tensors,legs)
l1 = np.reshape(l1,-1,order='F')
tensors = [a[0][0,0,:,:]]
legs = [[-2,-1]]
l1_0 = ncon(tensors,legs)
l1_0 = np.reshape(l1_0,-1,order='F')
tensors = [a[0][0,0,:,:],np.eye(d)]
legs = [[-2,-3],[-4,-1]]
l2 = ncon(tensors,legs)
l2 = np.reshape(l2,(d*d,d*d),order='F')
dl2 = l2+l2.T
dl1 = 2*(l1-l1_0)/epsilon
dl2pinv = np.linalg.pinv(dl2,tol_fom)
dl2pinv = (dl2pinv+dl2pinv.T)/2
cv = dl2pinv @ dl1
c[0][0,0,:,:] = np.reshape(cv,(d,d),order='F')
if lherm:
c[0] = (c[0]+np.conj(np.moveaxis(c[0],2,3)))/2
cv = np.reshape(c[0],-1,order='F')
fomval = np.real(2*cv @ (l1-l1_0)/epsilon - cv @ l2 @ cv)
else:
warnings.warn('Tensor networks with OBC and length one have to have bond dimension equal to one.')
else:
relunc_fom = 0.1*imprecision
l1f = [0]*n
l1_0f = [0]*n
l2f = [0]*n
fom = np.array([])
iter_fom = 0
while True:
tensors = [c[n-1],b[n-1]]
legs = [[-1,-3,1,2],[-2,-4,2,1]]
l1f[n-2] = ncon(tensors,legs)
l1f[n-2] = l1f[n-2][:,:,0,0]
tensors = [c[n-1],a[n-1]]
legs = [[-1,-3,1,2],[-2,-4,2,1]]
l1_0f[n-2] = ncon(tensors,legs)
l1_0f[n-2] = l1_0f[n-2][:,:,0,0]
tensors = [c[n-1],a[n-1],c[n-1]]
legs = [[-1,-4,1,2],[-2,-5,2,3],[-3,-6,3,1]]
l2f[n-2] = ncon(tensors,legs)
l2f[n-2] = l2f[n-2][:,:,:,0,0,0]
for x in range(n-2,0,-1):
tensors = [c[x],b[x],l1f[x]]
legs = [[-1,3,1,2],[-2,4,2,1],[3,4]]
l1f[x-1] = ncon(tensors,legs)
tensors = [c[x],a[x],l1_0f[x]]
legs = [[-1,3,1,2],[-2,4,2,1],[3,4]]
l1_0f[x-1] = ncon(tensors,legs)
tensors = [c[x],a[x],c[x],l2f[x]]
legs = [[-1,4,1,2],[-2,5,2,3],[-3,6,3,1],[4,5,6]]
l2f[x-1] = ncon(tensors,legs)
bdl1,bdl2,d,d = np.shape(c[0])
tensors = [b[0],l1f[0]]
legs = [[-5,1,-4,-3],[-2,1]]
l1 = ncon(tensors,legs)
l1 = np.reshape(l1,-1,order='F')
tensors = [a[0],l1_0f[0]]
legs = [[-5,1,-4,-3],[-2,1]]
l1_0 = ncon(tensors,legs)
l1_0 = np.reshape(l1_0,-1,order='F')
tensors = [a[0],np.eye(d),l2f[0]]
legs = [[-9,1,-4,-7],[-8,-3],[-2,1,-6]]
l2 = ncon(tensors,legs)
l2 = np.reshape(l2,(bdl1*bdl2*d*d,bdl1*bdl2*d*d),order='F')
dl2 = l2+l2.T
dl1 = 2*(l1-l1_0)/epsilon
dl2pinv = np.linalg.pinv(dl2,tol_fom)
dl2pinv = (dl2pinv+dl2pinv.T)/2
cv = dl2pinv @ dl1
c[0] = np.reshape(cv,(bdl1,bdl2,d,d),order='F')
if lherm:
c[0] = (c[0]+np.conj(np.moveaxis(c[0],2,3)))/2
cv = np.reshape(c[0],-1,order='F')
fom = np.append(fom,np.real(2*cv @ (l1-l1_0)/epsilon - cv @ l2 @ cv))
tensors = [c[0],b[0]]
legs = [[-3,-1,1,2],[-4,-2,2,1]]
l1c = ncon(tensors,legs)
l1c = l1c[:,:,0,0]
tensors = [c[0],a[0]]
legs = [[-3,-1,1,2],[-4,-2,2,1]]
l1_0c = ncon(tensors,legs)
l1_0c = l1_0c[:,:,0,0]
tensors = [c[0],a[0],c[0]]
legs = [[-4,-1,1,2],[-5,-2,2,3],[-6,-3,3,1]]
l2c = ncon(tensors,legs)
l2c = l2c[:,:,:,0,0,0]
for x in range(1,n-1):
bdl1,bdl2,d,d = | np.shape(c[x]) | numpy.shape |
import argparse
import os
import random
import shutil
import time
import numpy as np
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
from torch.utils.data import DataLoader
from torchvision import transforms
from data import TrashDataset
import resnet
from utils import indexes_to_one_hot
def get_arguments():
parser = argparse.ArgumentParser(description='RecycleNet')
parser.add_argument('--b', '--batch', type=int, default=16)
parser.add_argument('--gpu', type=str, help='0; 0,1; 0,3; etc', required=True)
parser.add_argument('--root_dir', type=str, default='data/')
parser.add_argument('--save_dir', type=str, default='save/')
parser.add_argument('--resume', type=str, default=None)
parser.add_argument('--lr', type=float, default=2e-4)
parser.add_argument('--momentum', default=0.9, type=float, metavar='M', help='momentum')
parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float, metavar='W', help='weight decay (default: 1e-4)')
parser.add_argument('--arch', type=str, default='resnet18_base', help='resnet18, 34, 50, 101, 152')
# parser.add_argument('--lr_finetune', type=float, default=5e-5)
# parser.add_argument('--save_model_interval', type=int, default=5000)
# parser.add_argument('--save_training_img_interval', type=int, default=5000)
# parser.add_argument('--vis_interval', type=int, default=5)
# parser.add_argument('--max_iter', type=int, default=1000000)
# parser.add_argument('--display_id', type=int, default=10)
parser.add_argument('--att_mode', type=str, default='ours', help='attention module mode: ours, cbam, se')
parser.add_argument('--use_att', action='store_true', help='use attention module')
parser.add_argument('--no_pretrain', action='store_false', help='training from scratch')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true', help='evaluate model on validation set')
parser.add_argument('--epochs', default=100, type=int, metavar='N', help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N', help='manual epoch number (useful on restarts)')
parser.add_argument('--adjust-freq', type=int, default=40, help='learning rate adjustment frequency (default: 40)')
parser.add_argument('--print-freq', '-p', default=10, type=int, metavar='N', help='print frequency (default: 10)')
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N', help='number of data loading workers (default: 4)')
parser.add_argument('--seed', default=1234, type=int, help='seed for initializing training. ')
return parser.parse_args()
def main():
args = get_arguments()
if args.seed is not None:
random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
BATCH_SIZE = args.b
GPU = args.gpu
ROOT_DIR = args.root_dir
MEAN = [0.485, 0.456, 0.406]
STD = [0.229, 0.224, 0.225]
os.environ['CUDA_VISIBLE_DEVICES'] = GPU
if torch.cuda.is_available():
print('using Cuda devices, num:', torch.cuda.device_count())
if not args.evaluate:
if not os.path.exists(args.save_dir):
os.makedirs(args.save_dir)
def ToCudaVariable(xs, volatile=False, requires_grad=True):
if torch.cuda.is_available():
return [Variable(x.cuda(), volatile=volatile, requires_grad=requires_grad) for x in xs]
else:
return [Variable(x, volatile=volatile, requires_grad=requires_grad) for x in xs]
if torch.cuda.is_available():
device = torch.device('cuda')
else:
device = torch.device('cpu')
if args.arch == 'resnet18_base':
model = nn.DataParallel( resnet.resnet18(pretrained=True if not args.resume else False, num_classes=6, use_att=args.use_att, att_mode=args.att_mode).to(device) )
elif args.arch == 'resnet34_base':
model = nn.DataParallel( resnet.resnet34(pretrained=not args.no_pretrain if not args.resume else False, num_classes=6, use_att=args.use_att,att_mode=args.att_mode).to(device) )
elif args.arch == 'resnet50_base':
model = nn.DataParallel( resnet.resnet50(pretrained=not args.no_pretrain if not args.resume else False, num_classes=6, use_att=args.use_att, att_mode=args.att_mode).to(device) )
elif args.arch == 'resnet101_base':
model = nn.DataParallel( resnet.resnet101(pretrained=not args.no_pretrain if not args.resume else False, num_classes=6, use_att=args.use_att, att_mode=args.att_mode).to(device) )
elif args.arch == 'resnet152_base':
model = nn.DataParallel( resnet.resnet152(pretrained=not args.no_pretrain if not args.resume else False, num_classes=6, use_att=args.use_att, att_mode=args.att_mode).to(device) )
print(model)
print('Number of model parameters: {}'.format(
sum([p.data.nelement() for p in model.parameters()])))
criterion = nn.CrossEntropyLoss().to(device)
# att_params = [p for n,p in model.named_parameters() if n.startswith('module.att') and p.requires_grad]
# non_att_params = [p for n,p in model.named_parameters() if not n.startswith('module.att') and p.requires_grad]
# params = [{'params': non_att_params, 'lr': args.lr / 10.0}, {'params': att_params}]
optimizer = torch.optim.SGD(model.parameters(), args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
best_acc1 = checkpoint['best_acc1']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
print('=> best accuracy {}'.format(best_acc1))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
train_img_transform = transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=MEAN, std=STD)])
train_dataset = TrashDataset(ROOT_DIR, train_img_transform, 'train')
train_loader = DataLoader(train_dataset, batch_size=BATCH_SIZE, shuffle=True, num_workers=args.workers, pin_memory=True)
val_img_transform = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=MEAN, std=STD)])
val_dataset = TrashDataset(ROOT_DIR, val_img_transform, 'val')
val_loader = DataLoader(val_dataset, batch_size=BATCH_SIZE, shuffle=False, num_workers=args.workers, pin_memory=True)
test_img_transform = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=MEAN, std=STD)])
test_dataset = TrashDataset(ROOT_DIR, test_img_transform, 'test')
test_loader = DataLoader(test_dataset, batch_size=BATCH_SIZE, shuffle=False, num_workers=args.workers, pin_memory=True)
if args.evaluate:
# validate(args, val_loader, model, criterion, device)
test(args, test_loader, model, criterion, device)
return
best_acc1 = 0
for epoch in range(args.start_epoch, args.epochs):
adjust_learning_rate(args, optimizer, epoch, args.adjust_freq)
train(args, train_loader, model, criterion, optimizer, epoch, device)
acc1 = validate(args, val_loader, model, criterion, device)
is_best = acc1 > best_acc1
best_acc1 = max(acc1, best_acc1)
save_checkpoint({
'epoch' : epoch + 1,
'arch' : args.arch,
'state_dict': model.state_dict(),
'best_acc1': best_acc1,
'optimizer': optimizer.state_dict(),
}, is_best, args.save_dir)
def train(args, train_loader, model, criterion, optimizer, epoch, device):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
model.train()
end = time.time()
for i, (input, target) in enumerate(train_loader):
data_time.update(time.time() - end)
if args.gpu is not None:
input = input.to(device)
target = torch.from_numpy( | np.asarray(target) | numpy.asarray |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Applies necessary calibration to the cubes and corrects NACO biases
@author: lewis, iain
"""
__author__ = '<NAME>, <NAME>'
__all__ = ['raw_dataset', 'find_nearest', 'find_filtered_max']
import pdb
import numpy as np
import pyprind
import os
import random
import matplotlib as mpl
mpl.use('Agg') #show option for plot is unavailable with this option, set specifically to save plots on m3
from matplotlib import pyplot as plt
from numpy import isclose
from vip_hci.fits import open_fits, write_fits
from vip_hci.preproc import frame_crop, cube_crop_frames, frame_shift,\
cube_subtract_sky_pca, cube_correct_nan, cube_fix_badpix_isolated,cube_fix_badpix_clump,\
cube_recenter_2dfit
from vip_hci.var import frame_center, get_annulus_segments, frame_filter_lowpass,\
mask_circle, dist, fit_2dgaussian, frame_filter_highpass, get_circle, get_square
from vip_hci.metrics import detection, normalize_psf
from vip_hci.conf import time_ini, time_fin, timing
from hciplot import plot_frames
from skimage.feature import register_translation
from photutils import CircularAperture, aperture_photometry
from astropy.stats import sigma_clipped_stats
from scipy.optimize import minimize
def find_shadow_list(self, file_list, threshold = 0, verbose = True, debug = False, plot = None):
"""
In coro NACO data there is a lyot stop causing a shadow on the detector
this method will return the radius and central position of the circular shadow
"""
cube = open_fits(self.inpath + file_list[0],verbose=debug)
nz, ny, nx = cube.shape
median_frame = np.median(cube, axis = 0)
median_frame = frame_filter_lowpass(median_frame, median_size = 7, mode = 'median')
median_frame = frame_filter_lowpass(median_frame, mode = 'gauss',fwhm_size = 5)
ycom,xcom = np.unravel_index(np.argmax(median_frame), median_frame.shape) #location of AGPM
if debug:
write_fits(self.outpath + 'shadow_median_frame', median_frame,verbose=debug)
shadow = np.where(median_frame >threshold, 1, 0) #lyot shadow
#create similar shadow centred at the origin
area = sum(sum(shadow))
r = np.sqrt(area/np.pi)
tmp = np.zeros([ny,nx])
tmp = mask_circle(tmp,radius = r, fillwith = 1)
tmp = frame_shift(tmp, ycom - ny/2 ,xcom - nx/2, imlib='opencv') # no vip_fft because the image isn't square
#measure translation
shift_yx, _, _ = register_translation(tmp, shadow,
upsample_factor= 100)
#express as a coordinate
y, x = shift_yx
cy = np.round(ycom-y)
cx = np.round(xcom-x)
if debug:
pdb.set_trace()
if verbose:
print('The centre of the shadow is','cy = ',cy,'cx = ',cx)
if plot == 'show':
plot_frames((median_frame, shadow, tmp),vmax=(np.percentile(median_frame,99.9),1,1),
vmin=(np.percentile(median_frame,0.1),0,0),label=('Median frame','Shadow',''),title='Shadow')
if plot == 'save':
plot_frames((median_frame, shadow, tmp), vmax=(np.percentile(median_frame,99.9),1,1),
vmin=(np.percentile(median_frame,0.1),0,0),label=('Median frame','Shadow',''),title='Shadow',
dpi=300, save = self.outpath + 'shadow_fit.pdf')
return cy, cx, r
def find_filtered_max(path, verbose = True, debug = False):
"""
This method will find the location of the max after low pass filtering.
It gives a rough approximation of the stars location, reliable in unsaturated frames where the star dominates.
Need to supply the path to the cube.
"""
cube = open_fits(path, verbose = debug)
#nz, ny, nx = cube.shape
#cy,cx = frame_center(cube, verbose = verbose) #find central pixel coordinates
# then the position will be that plus the relative shift in y and x
#rel_shift_x = rel_AGPM_pos_xy[0] # 6.5 is pixels from frame center to AGPM in y in an example data set, thus providing the relative shift
#rel_shift_y = rel_AGPM_pos_xy[1] # 50.5 is pixels from frame center to AGPM in x in an example data set, thus providing the relative shift
#y_tmp = cy + rel_shift_y
#x_tmp = cx + rel_shift_x
median_frame = np.median(cube, axis = 0)
# define a square of 100 x 100 with the center being the approximate AGPM/star position
#median_frame,cornery,cornerx = get_square(median_frame, size = size, y = y_tmp, x = x_tmp, position = True, verbose = True)
# apply low pass filter
#filter for the brightest source
median_frame = frame_filter_lowpass(median_frame, median_size = 7, mode = 'median')
median_frame = frame_filter_lowpass(median_frame, mode = 'gauss',fwhm_size = 5)
#obtain location of the bright source
ycom,xcom = np.unravel_index(np.argmax(median_frame), median_frame.shape)
if verbose:
print('The location of the star is','ycom =',ycom,'xcom =', xcom)
if debug:
pdb.set_trace
return [ycom, xcom]
def find_AGPM(path, rel_AGPM_pos_xy = (50.5, 6.5), size = 101, verbose = True, debug = False):
"""
added by Iain to prevent dust grains being picked up as the AGPM
This method will find the location of the AGPM or star (even when sky frames are mixed with science frames), by
using the known relative distance of the AGPM from the frame center in all VLT/NaCO datasets. It then creates a
subset square image around the expected location and applies a low pass filter + max search method and returns
the (y,x) location of the AGPM/star
Parameters
----------
path : str
Path to cube
rel_AGPM_pos_xy : tuple, float
relative location of the AGPM from the frame center in pixels, should be left unchanged. This is used to
calculate how many pixels in x and y the AGPM is from the center and can be applied to almost all datasets
with VLT/NaCO as the AGPM is always in the same approximate position
size : int
pixel dimensions of the square to sample for the AGPM/star (ie size = 100 is 100 x 100 pixels)
verbose : bool
If True extra messages are shown.
debug : bool, False by default
Enters pdb once the location has been found
Returns
----------
[ycom, xcom] : location of AGPM or star
"""
cube = open_fits(path,verbose = debug) # opens first sci/sky cube
cy,cx = frame_center(cube, verbose = verbose) #find central pixel coordinates
# then the position will be that plus the relative shift in y and x
rel_shift_x = rel_AGPM_pos_xy[0] # 6.5 is pixels from frame center to AGPM in y in an example data set, thus providing the relative shift
rel_shift_y = rel_AGPM_pos_xy[1] # 50.5 is pixels from frame center to AGPM in x in an example data set, thus providing the relative shift
#the center of the square to apply the low pass filter to - is the approximate position of the AGPM/star based on previous observations
y_tmp = cy + rel_shift_y
x_tmp = cx + rel_shift_x
median_frame = cube[-1]
# define a square of 100 x 100 with the center being the approximate AGPM/star position
median_frame,cornery,cornerx = get_square(median_frame, size = size, y = y_tmp, x = x_tmp, position = True, verbose = True)
# apply low pass filter
median_frame = frame_filter_lowpass(median_frame, median_size = 7, mode = 'median')
median_frame = frame_filter_lowpass(median_frame, mode = 'gauss',fwhm_size = 5)
# find coordinates of max flux in the square
ycom_tmp, xcom_tmp = np.unravel_index(np.argmax(median_frame), median_frame.shape)
# AGPM/star is the bottom-left corner coordinates plus the location of the max in the square
ycom = cornery+ycom_tmp
xcom = cornerx+xcom_tmp
if verbose:
print('The location of the AGPM/star is','ycom =',ycom,'xcom =', xcom)
if debug:
pdb.set_trace()
return [ycom, xcom]
def find_nearest(array, value, output='index', constraint=None):
"""
Function to find the index, and optionally the value, of an array's closest element to a certain value.
Possible outputs: 'index','value','both'
Possible constraints: 'ceil', 'floor', None ("ceil" will return the closest element with a value greater than 'value', "floor" the opposite)
"""
if type(array) is np.ndarray:
pass
elif type(array) is list:
array = np.array(array)
else:
raise ValueError("Input type for array should be np.ndarray or list.")
idx = (np.abs(array-value)).argmin()
if type == 'ceil' and array[idx]-value < 0:
idx+=1
elif type == 'floor' and value-array[idx] < 0:
idx-=1
if output=='index': return idx
elif output=='value': return array[idx]
else: return array[idx], idx
class raw_dataset:
"""
In order to successfully run the pipeline you must run the methods in following order:
1. dark_subtraction()
2. flat_field_correction()
3. correct_nan()
4. correct_bad_pixels()
5. first_frames_removal()
6. get_stellar_psf()
7. subtract_sky()
This will prevent any undefined variables.
"""
def __init__(self, inpath, outpath, dataset_dict,final_sz = None, coro = True):
self.inpath = inpath
self.outpath = outpath
self.final_sz = final_sz
self.coro = coro
sci_list = []
# get the common size (crop size)
with open(self.inpath+"sci_list.txt", "r") as f:
tmp = f.readlines()
for line in tmp:
sci_list.append(line.split('\n')[0])
nx = open_fits(self.inpath + sci_list[0],verbose = False).shape[2]
self.com_sz = np.array([int(nx - 1)])
write_fits(self.outpath + 'common_sz', self.com_sz, verbose = False)
#the size of the shadow in NACO data should be constant.
#will differ for NACO data where the coronagraph has been adjusted
self.shadow_r = 280 # shouldnt change for NaCO data
sci_list_mjd = [] # observation time of each sci cube
sky_list_mjd = [] # observation time of each sky cube
with open(self.inpath+"sci_list_mjd.txt", "r") as f:
tmp = f.readlines()
for line in tmp:
sci_list_mjd.append(float(line.split('\n')[0]))
with open(self.inpath+"sky_list_mjd.txt", "r") as f:
tmp = f.readlines()
for line in tmp:
sky_list_mjd.append(float(line.split('\n')[0]))
self.sci_list_mjd = sci_list_mjd
self.sky_list_mjd = sky_list_mjd
self.dataset_dict = dataset_dict
self.fast_reduction = dataset_dict['fast_reduction']
def get_final_sz(self, final_sz = None, verbose = True, debug = False):
"""
Update the cropping size as you wish
debug: enters Python debugger after finding the size
"""
if final_sz is None:
final_sz_ori = min(2*self.agpm_pos[0]-1,2*self.agpm_pos[1]-1,2*\
(self.com_sz-self.agpm_pos[0])-1,2*\
(self.com_sz-self.agpm_pos[1])-1, int(2*self.shadow_r))
else:
final_sz_ori = min(2*self.agpm_pos[0]-1,2*self.agpm_pos[1]-1,\
2*(self.com_sz-self.agpm_pos[0])-1,\
2*(self.com_sz-self.agpm_pos[1])-1,\
int(2*self.shadow_r), final_sz)
if final_sz_ori%2 == 0:
final_sz_ori -= 1
final_sz = int(final_sz_ori) # iain: added int() around final_sz_ori as cropping requires an integer
if verbose:
print('the final crop size is ', final_sz)
if debug:
pdb.set_trace()
return final_sz
def dark_subtract(self, bad_quadrant = [3], method = 'pca', npc_dark = 1, verbose = True, debug = False, plot = None, NACO = True):
"""
Dark subtraction of science, sky and flats using principal component analysis or median subtraction.
Unsaturated frames are always median dark subtracted.
All frames are also cropped to a common size.
Parameters:
***********
bad_quadrant : list, optional
list of bad quadrants to ignore. quadrants are in format 2 | 1 Default = 3 (inherently bad NaCO quadrant)
3 | 4
method : str, default = 'pca'
'pca' for dark subtraction via principal component analysis
'median' for median subtraction of dark
npc_dark : int, optional
number of principal components subtracted during dark subtraction. Default = 1 (most variance in the PCA library)
plot options : 'save' 'show' or None
Whether to show plot or save it, or do nothing
"""
self.com_sz = int(open_fits(self.outpath + 'common_sz',verbose=debug)[0])
crop = 0
if NACO:
mask_std = np.zeros([self.com_sz,self.com_sz])
cy,cx = frame_center(mask_std)
# exclude the negative dot if the frame includes it
if self.com_sz <=733:
mask_std[int(cy)-23:int(cy)+23,:] = 1
else:
crop = int((self.com_sz-733)/2)
mask_std[int(cy) - 23:int(cy) + 23, :-crop] = 1
write_fits(self.outpath + 'mask_std.fits',mask_std,verbose=debug)
sci_list = []
with open(self.inpath +"sci_list.txt", "r") as f:
tmp = f.readlines()
for line in tmp:
sci_list.append(line.split('\n')[0])
sky_list = []
with open(self.inpath +"sky_list.txt", "r") as f:
tmp = f.readlines()
for line in tmp:
sky_list.append(line.split('\n')[0])
unsat_list = []
with open(self.inpath +"unsat_list.txt", "r") as f:
tmp = f.readlines()
for line in tmp:
unsat_list.append(line.split('\n')[0])
unsat_dark_list = []
with open(self.inpath +"unsat_dark_list.txt", "r") as f:
tmp = f.readlines()
for line in tmp:
unsat_dark_list.append(line.split('\n')[0])
flat_list = []
with open(self.inpath +"flat_list.txt", "r") as f:
tmp = f.readlines()
for line in tmp:
flat_list.append(line.split('\n')[0])
flat_dark_list = []
with open(self.inpath +"flat_dark_list.txt", "r") as f:
tmp = f.readlines()
for line in tmp:
flat_dark_list.append(line.split('\n')[0])
sci_dark_list = []
with open(self.inpath +"sci_dark_list.txt", "r") as f:
tmp = f.readlines()
for line in tmp:
sci_dark_list.append(line.split('\n')[0])
if not os.path.isfile(self.inpath + sci_list[-1]):
raise NameError('Missing .fits. Double check the contents of the input path')
self.com_sz = int(open_fits(self.outpath + 'common_sz',verbose=debug)[0])
pixel_scale = self.dataset_dict['pixel_scale']
tmp = np.zeros([len(flat_dark_list), self.com_sz, self.com_sz])
master_all_darks = []
#cropping the flat dark cubes to com_sz
for fd, fd_name in enumerate(flat_dark_list):
tmp_tmp = open_fits(self.inpath+fd_name, header=False, verbose=debug)
tmp[fd] = frame_crop(tmp_tmp, self.com_sz, force = True , verbose= debug)
print(tmp[fd].shape)
master_all_darks.append(tmp[fd])
write_fits(self.outpath+'flat_dark_cube.fits', tmp, verbose=debug)
if verbose:
print('Flat dark cubes have been cropped and saved')
tmp = np.zeros([len(sci_dark_list), self.com_sz, self.com_sz])
#cropping the SCI dark cubes to com_sz
for sd, sd_name in enumerate(sci_dark_list):
tmp_tmp = open_fits(self.inpath+sd_name, header=False, verbose=debug)
n_dim = tmp_tmp.ndim
if sd == 0:
if n_dim == 2:
tmp = np.array([frame_crop(tmp_tmp, self.com_sz,
force = True, verbose=debug)])
master_all_darks.append(tmp)
print(tmp.shape)
else:
tmp = cube_crop_frames(tmp_tmp, self.com_sz, force = True, verbose=debug)
master_all_darks.append(tmp[-1])
print(tmp[-1].shape)
else:
if n_dim == 2:
tmp = np.append(tmp,[frame_crop(tmp_tmp, self.com_sz, force = True, verbose=debug)],axis=0)
master_all_darks.append(tmp)
print(tmp.shape)
else:
tmp = np.append(tmp,cube_crop_frames(tmp_tmp, self.com_sz, force = True, verbose=debug),axis=0)
master_all_darks.append(tmp[-1])
print(tmp[-1].shape)
write_fits(self.outpath + 'sci_dark_cube.fits', tmp, verbose=debug)
if verbose:
print('Sci dark cubes have been cropped and saved')
tmp = np.zeros([len(unsat_dark_list), self.com_sz, self.com_sz])
#cropping of UNSAT dark frames to the common size or less
#will only add to the master dark cube if it is the same size as the SKY and SCI darks
for sd, sd_name in enumerate(unsat_dark_list):
tmp_tmp = open_fits(self.inpath+sd_name, header=False, verbose=debug)
n_dim = tmp_tmp.ndim
if sd == 0:
if n_dim ==2:
ny, nx = tmp_tmp.shape
if nx < self.com_sz:
tmp = np.array([frame_crop(tmp_tmp, nx - 1, force = True, verbose = debug)])
print(tmp.shape)
else:
if nx>self.com_sz:
tmp = np.array([frame_crop(tmp_tmp, self.com_sz, force = True, verbose = debug)])
else:
tmp = np.array([tmp_tmp])
master_all_darks.append(tmp)
print(tmp.shape)
else:
nz, ny, nx = tmp_tmp.shape
if nx < self.com_sz:
tmp = cube_crop_frames(tmp_tmp, nx-1, force = True, verbose=debug)
print(tmp[-1].shape)
else:
if nx > self.com_sz:
tmp = cube_crop_frames(tmp_tmp, self.com_sz, force = True, verbose=debug)
else:
tmp = tmp_tmp
master_all_darks.append(np.median(tmp[-nz:],axis=0))
print(tmp[-1].shape)
else:
if n_dim == 2:
ny, nx = tmp_tmp.shape
if nx < self.com_sz:
tmp = np.append(tmp,[frame_crop(tmp_tmp, nx-1, force = True, verbose=debug)],axis=0)
print(tmp[-1].shape)
else:
if nx > self.com_sz:
tmp = np.append(tmp,[frame_crop(tmp_tmp, self.com_sz, force = True, verbose=debug)],axis=0)
else:
tmp = np.append(tmp,[tmp_tmp])
master_all_darks.append(tmp[-1])
print(tmp[-1].shape)
else:
nz, ny, nx = tmp_tmp.shape
if nx < self.com_sz:
tmp = np.append(tmp,cube_crop_frames(tmp_tmp, nx - 1, force = True, verbose=debug),axis=0)
print(tmp[-1].shape)
else:
if nx > self.com_sz:
tmp = np.append(tmp,cube_crop_frames(tmp_tmp, self.com_sz, force = True, verbose=debug),axis=0)
else:
tmp = np.append(tmp,tmp_tmp)
master_all_darks.append(np.median(tmp[-nz:],axis=0))
print(tmp[-1].shape)
write_fits(self.outpath+'unsat_dark_cube.fits', tmp, verbose=debug)
if verbose:
print('Unsat dark cubes have been cropped and saved')
if verbose:
print('Total of {} median dark frames. Saving dark cube to fits file...'.format(len(master_all_darks)))
#convert master all darks to numpy array here
master_all_darks = np.array(master_all_darks)
write_fits(self.outpath + "master_all_darks.fits", master_all_darks,verbose=debug)
#defining the mask for the sky/sci pca dark subtraction
_, _, self.shadow_r = find_shadow_list(self, sci_list,verbose=verbose, debug=debug,plot=plot)
if self.coro:
self.agpm_pos = find_AGPM(self.inpath + sci_list[0],verbose=verbose,debug=debug)
else:
raise ValueError('Pipeline does not handle non-coronagraphic data here yet')
mask_AGPM_com = np.ones([self.com_sz,self.com_sz])
cy,cx = frame_center(mask_AGPM_com)
inner_rad = 3/pixel_scale
outer_rad = self.shadow_r*0.8
if NACO:
mask_sci = np.zeros([self.com_sz,self.com_sz])
mask_sci[int(cy)-23:int(cy)+23,int(cx-outer_rad):int(cx+outer_rad)] = 1
write_fits(self.outpath + 'mask_sci.fits', mask_sci, verbose=debug)
# create mask for sci and sky
mask_AGPM_com = get_annulus_segments(mask_AGPM_com, inner_rad, outer_rad - inner_rad, mode='mask')[0]
mask_AGPM_com = frame_shift(mask_AGPM_com, self.agpm_pos[0]-cy, self.agpm_pos[1]-cx, border_mode='constant',
imlib='opencv')
#create mask for flats
mask_AGPM_flat = np.ones([self.com_sz,self.com_sz])
if verbose:
print('The masks for SCI, SKY and FLAT have been defined')
# will exclude a quadrant if specified by looping over the list of bad quadrants and filling the mask with zeros
if len(bad_quadrant) > 0 :
for quadrant in bad_quadrant:
if quadrant == 1:
mask_AGPM_com[int(cy)+1:,int(cx)+1:] = 0
mask_AGPM_flat[int(cy)+1:,int(cx)+1:] = 0
#mask_std[int(cy)+1:,int(cx)+1:] = 0
#mask_sci[int(cy)+1:,int(cx)+1:] = 0
if quadrant == 2:
mask_AGPM_com[int(cy)+1:,:int(cx)+1] = 0
mask_AGPM_flat[int(cy)+1:,:int(cx)+1] = 0
#mask_std[int(cy)+1:,:int(cx)+1] = 0
#mask_sci[int(cy)+1:,:int(cx)+1] = 0
if quadrant == 3:
mask_AGPM_com[:int(cy)+1,:int(cx)+1] = 0
mask_AGPM_flat[:int(cy)+1,:int(cx)+1] = 0
#mask_std[:int(cy)+1,:int(cx)+1] = 0
#mask_sci[:int(cy)+1,:int(cx)+1] = 0
if quadrant == 4:
mask_AGPM_com[:int(cy)+1,int(cx)+1:] = 0
mask_AGPM_flat[:int(cy)+1,int(cx)+1:] = 0
#mask_std[:int(cy)+1,int(cx)+1:] = 0
#mask_sci[:int(cy)+1,:int(cx)+1] = 0
# save the mask for checking/testing
write_fits(self.outpath + 'mask_AGPM_com.fits',mask_AGPM_com, verbose = debug)
write_fits(self.outpath + 'mask_AGPM_flat.fits',mask_AGPM_flat, verbose = debug)
write_fits(self.outpath + 'mask_std.fits', mask_std, verbose=debug)
write_fits(self.outpath + 'mask_sci.fits', mask_sci, verbose=debug)
if verbose:
print('Masks have been saved as fits file')
if method == 'median':
# median dark subtraction of SCI cubes
tmp_tmp_tmp = open_fits(self.outpath + 'sci_dark_cube.fits',verbose=debug)
tmp_tmp_tmp_median = np.median(tmp_tmp_tmp, axis=0)
tmp_tmp_tmp_median = np.median(tmp_tmp_tmp_median[np.where(mask_AGPM_com)]) # consider the median within the mask
for sc, fits_name in enumerate(sci_list):
tmp = open_fits(self.inpath + fits_name, header=False, verbose=debug)
tmp = cube_crop_frames(tmp, self.com_sz, force=True, verbose=debug)
tmp_tmp = tmp - tmp_tmp_tmp_median
write_fits(self.outpath + '1_crop_' + fits_name, tmp_tmp)
if verbose:
print('Dark has been median subtracted from SCI cubes')
if plot:
tmp_tmp_med = np.median(tmp, axis=0) # sci before subtraction
tmp_tmp_med_after = np.median(tmp_tmp, axis=0) # sci after dark subtract
if plot == 'show':
plot_frames((tmp_tmp_med, tmp_tmp_med_after, mask_AGPM_com), vmax=(np.percentile(tmp_tmp_med,99.9),
np.percentile(tmp_tmp_med_after,99.9), 1), vmin=(np.percentile(tmp_tmp_med,0.1),
np.percentile(tmp_tmp_med_after,0.1), 0), label=('Raw Sci', 'Sci Median Dark Subtracted',
'Pixel Mask'), title='Sci Median Dark Subtraction')
if plot == 'save':
plot_frames((tmp_tmp_med, tmp_tmp_med_after, mask_AGPM_com), vmax=(np.percentile(tmp_tmp_med,99.9),
np.percentile(tmp_tmp_med_after,99.9), 1), vmin=(np.percentile(tmp_tmp_med,0.1),
np.percentile(tmp_tmp_med_after,0.1), 0), label=('Raw Sci', 'Sci Median Dark Subtracted',
'Pixel Mask'), title='Sci Median Dark Subtraction',
dpi=300, save=self.outpath + 'SCI_median_dark_subtract.pdf')
# median dark subtract of sky cubes
tmp_tmp_tmp = open_fits(self.outpath + 'sci_dark_cube.fits',verbose=debug)
tmp_tmp_tmp_median = np.median(tmp_tmp_tmp, axis=0)
tmp_tmp_tmp_median = np.median(tmp_tmp_tmp_median[np.where(mask_AGPM_com)])
for sc, fits_name in enumerate(sky_list):
tmp = open_fits(self.inpath + fits_name, header=False, verbose=debug)
tmp = cube_crop_frames(tmp, self.com_sz, force=True, verbose=debug)
tmp_tmp = tmp - tmp_tmp_tmp_median
write_fits(self.outpath + '1_crop_' + fits_name, tmp_tmp)
if verbose:
print('Dark has been median subtracted from SKY cubes')
if plot:
tmp_tmp_med = np.median(tmp, axis=0) # sky before subtraction
tmp_tmp_med_after = np.median(tmp_tmp, axis=0) # sky after dark subtract
if plot == 'show':
plot_frames((tmp_tmp_med, tmp_tmp_med_after, mask_AGPM_com), vmax=(np.percentile(tmp_tmp_med,99.9),
np.percentile(tmp_tmp_med_after,99.9), 1), vmin=(np.percentile(tmp_tmp_med,0.1),
np.percentile(tmp_tmp_med_after,0.1), 0), label=('Raw Sky', 'Sky Median Dark Subtracted',
'Pixel Mask'), title='Sky Median Dark Subtraction')
if plot == 'save':
plot_frames((tmp_tmp_med, tmp_tmp_med_after, mask_AGPM_com), vmax=(np.percentile(tmp_tmp_med,99.9),
np.percentile(tmp_tmp_med_after,99.9), 1), vmin=(np.percentile(tmp_tmp_med,0.1),
np.percentile(tmp_tmp_med_after,0.1), 0), label=('Raw Sky', 'Sky Median Dark Subtracted',
'Pixel Mask'), title='Sky Median Dark Subtraction',
dpi=300, save=self.outpath + 'SKY_median_dark_subtract.pdf')
# median dark subtract of flat cubes
tmp_tmp = np.zeros([len(flat_list), self.com_sz, self.com_sz])
tmp_tmp_tmp = open_fits(self.outpath + 'flat_dark_cube.fits',verbose=debug)
tmp_tmp_tmp_median = np.median(tmp_tmp_tmp, axis=0)
tmp_tmp_tmp_median = np.median(tmp_tmp_tmp_median[np.where(mask_AGPM_flat)])
for sc, fits_name in enumerate(flat_list):
tmp = open_fits(self.inpath + fits_name, header=False, verbose=debug)
if tmp.ndim == 2:
tmp = frame_crop(tmp, self.com_sz, force=True, verbose=debug)
else:
tmp = cube_crop_frames(tmp, self.com_sz, force=True, verbose=debug)
tmp_tmp[sc] = tmp - tmp_tmp_tmp_median
write_fits(self.outpath + '1_crop_flat_cube.fits', tmp_tmp,verbose=debug)
if verbose:
print('Dark has been median subtracted from FLAT frames')
if plot:
tmp_tmp_med = np.median(tmp, axis=0) # flat cube before subtraction
tmp_tmp_med_after = np.median(tmp_tmp, axis=0) # flat cube after dark subtract
if plot == 'show':
plot_frames((tmp_tmp_med, tmp_tmp_med_after, mask_AGPM_flat), vmax=(np.percentile(tmp_tmp_med,99.9),
np.percentile(tmp_tmp_med_after,99.9), 1), vmin=(np.percentile(tmp_tmp_med,0.1),
np.percentile(tmp_tmp_med_after,0.1), 0), label=('Raw Flat', 'Flat Median Dark Subtracted',
'Pixel Mask'), title='Flat Median Dark Subtraction')
if plot == 'save':
plot_frames((tmp_tmp_med, tmp_tmp_med_after, mask_AGPM_flat), vmax=(np.percentile(tmp_tmp_med,99.9),
np.percentile(tmp_tmp_med_after,99.9), 1), vmin=(np.percentile(tmp_tmp_med,0.1),
np.percentile(tmp_tmp_med_after,0.1), 0), label=('Raw Flat', 'Flat Median Dark Subtracted',
'Pixel Mask'), title='Flat Median Dark Subtraction',
dpi=300, save=self.outpath + 'FLAT_median_dark_subtract.pdf')
#original code ####################
# #now begin the dark subtraction using PCA
# npc_dark=1 #The ideal number of components to consider in PCA
#
# #coordinate system for pca subtraction
# mesh = np.arange(0,self.com_sz,1)
# xv,yv = np.meshgrid(mesh,mesh)
#
# tmp_tmp = np.zeros([len(flat_list),self.com_sz,self.com_sz])
# tmp_tmp_tmp = open_fits(self.outpath+'flat_dark_cube.fits')
# tmp_tmp_tmp_median = np.median(tmp_tmp_tmp, axis = 0)
# #consider the difference in the medium of the frames without the lower left quadrant.
# tmp_tmp_tmp_median = tmp_tmp_tmp_median[np.where(np.logical_or(xv > cx, yv > cy))] # all but the bad quadrant in the bottom left
# diff = np.zeros([len(flat_list)])
# for fl, flat_name in enumerate(flat_list):
# tmp = open_fits(raw_path+flat_name, header=False, verbose=debug)
# #PCA works best if the flux is roughly on the same scale hence the difference is subtracted before PCA and added after.
# tmp_tmp[fl] = frame_crop(tmp, self.com_sz, force = True ,verbose=debug)
# tmp_tmp_tmp_tmp = tmp_tmp[fl]
# diff[fl] = np.median(tmp_tmp_tmp_median)-np.median(tmp_tmp_tmp_tmp[np.where(np.logical_or(xv > cx, yv > cy))])
# tmp_tmp[fl]+=diff[fl]
# if debug:
# print('difference w.r.t dark = ', diff)
# tmp_tmp_pca = cube_subtract_sky_pca(tmp_tmp, tmp_tmp_tmp,
# mask_AGPM_flat, ref_cube=None, ncomp=npc_dark)
# if debug:
# write_fits(self.outpath+'1_crop_flat_cube_diff.fits', tmp_tmp_pca)
# for fl, flat_name in enumerate(flat_list):
# tmp_tmp_pca[fl] = tmp_tmp_pca[fl]-diff[fl]
# write_fits(self.outpath+'1_crop_flat_cube.fits', tmp_tmp_pca)
# if verbose:
# print('Dark has been subtracted from FLAT cubes')
# end original code ###################
#vals version of above
# npc_dark=1
# tmp_tmp = np.zeros([len(flat_list),self.com_sz,self.com_sz])
# tmp_tmp_tmp = open_fits(self.outpath+'flat_dark_cube.fits')
# npc_flat = tmp_tmp_tmp.shape[0] #not used?
# diff = np.zeros([len(flat_list)])
# for fl, flat_name in enumerate(flat_list):
# tmp = open_fits(raw_path+flat_name, header=False, verbose=False)
# tmp_tmp[fl] = frame_crop(tmp, self.com_sz, force = True, verbose=False)# added force = True
# write_fits(self.outpath+"TMP_flat_test_Val.fits",tmp_tmp[fl])
# #diff[fl] = np.median(tmp_tmp_tmp)-np.median(tmp_tmp[fl])
# #tmp_tmp[fl]+=diff[fl]
# tmp_tmp[fl] = tmp_tmp[fl] - bias
# print(diff)
# tmp_tmp_pca = cube_subtract_sky_pca(tmp_tmp, tmp_tmp_tmp - bias, mask_AGPM_flat, ref_cube=None, ncomp=npc_dark)
# for fl, flat_name in enumerate(flat_list):
# tmp_tmp_pca[fl] = tmp_tmp_pca[fl]-diff[fl]
# write_fits(self.outpath+'1_crop_flat_cube.fits', tmp_tmp_pca)
# if verbose:
# print('Dark has been subtracted from FLAT cubes')
###############
########### new Val code
# create cube combining all darks
# master_all_darks = []
# #ntot_dark = len(sci_dark_list) + len(flat_dark_list) #+ len(unsat_dark_list)
# #master_all_darks = np.zeros([ntot_dark, self.com_sz, self.com_sz])
# tmp = open_fits(self.outpath + 'flat_dark_cube.fits', verbose = verbose)
#
# # add each frame to the list
# for frame in tmp:
# master_all_darks.append(frame)
#
# for idx,fname in enumerate(sci_dark_list):
# tmp = open_fits(self.inpath + fname, verbose=verbose)
# master_all_darks.append(tmp[-1])
#
# #tmp = open_fits(self.outpath + 'sci_dark_cube.fits', verbose = verbose) # changed from master_sci_dark_cube.fits to sci_dark_cube.fits
#
# #for frame in tmp:
# # master_all_darks.append(frame)
#
# if len(unsat_dark_list) > 0:
# for idx,fname in enumerate(unsat_dark_list):
# tmp = open_fits(self.inpath + fname, verbose=verbose)
# master_all_darks.append(tmp[-1])
# #tmp = open_fits(self.outpath + 'unsat_dark_cube.fits', verbose = verbose)
# #for frame in tmp:
# #master_all_darks.append(frame)
#
# #master_all_darks[:len(flat_dark_list)] = tmp.copy()
# #master_all_darks[len(flat_dark_list):] = tmp.copy()
if method == 'pca':
tmp_tmp_tmp = open_fits(self.outpath + 'master_all_darks.fits', verbose = debug) # the cube of all darks - PCA works better with a larger library of DARKs
tmp_tmp = np.zeros([len(flat_list), self.com_sz, self.com_sz])
diff = np.zeros([len(flat_list)])
bar = pyprind.ProgBar(len(flat_list), stream=1, title='Finding difference between DARKS and FLATS')
for fl, flat_name in enumerate(flat_list):
tmp = open_fits(self.inpath+flat_name, header=False, verbose=False)
tmp_tmp[fl] = frame_crop(tmp, self.com_sz, force=True, verbose=False) # added force = True
diff[fl] = np.median(tmp_tmp_tmp)-np.median(tmp_tmp[fl]) # median of pixels in all darks - median of all pixels in flat frame
tmp_tmp[fl]+=diff[fl] # subtracting median of flat from the flat and adding the median of the dark
bar.update()
#write_fits(self.outpath + 'TMP_cropped_flat.fits', tmp_tmp, verbose=verbose) # to check if the flats are aligned with the darks
#test_diff = np.linspace(np.average(diff),5000,50)
def _get_test_diff_flat(guess,verbose=False):
#tmp_tmp_pca = np.zeros([self.com_sz,self.com_sz])
#stddev = []
# loop over values around the median of diff to scale the frames accurately
#for idx,td in enumerate(test_diff):
tmp_tmp_pca = np.median(cube_subtract_sky_pca(tmp_tmp+guess, tmp_tmp_tmp,
mask_AGPM_flat, ref_cube=None, ncomp=npc_dark),axis=0)
tmp_tmp_pca-= np.median(diff)+guess # subtract the negative median of diff values and subtract test diff (aka add it back)
subframe = tmp_tmp_pca[np.where(mask_std)] # where mask_std is an optional argument
#subframe = tmp_tmp_pca[int(cy)-23:int(cy)+23,:-17] # square around center that includes the bad lines in NaCO data
#if idx ==0:
subframe = subframe.reshape((-1,self.com_sz-crop))
#stddev.append(np.std(subframe)) # save the stddev around this bad area
stddev = np.std(subframe)
write_fits(self.outpath + 'dark_flat_subframe.fits', subframe, verbose=debug)
#if verbose:
print('Guess = {}'.format(guess))
print('Stddev = {}'.format(stddev))
# for fl, flat_name in enumerate(flat_list):
# tmp_tmp_pca[fl] = tmp_tmp_pca[fl]-diff[fl]
#return test_diff[np.argmin[stddev]] # value of test_diff corresponding to lowest stddev
return stddev
# step_size1 = 50
# step_size2 = 10
# n_test1 = 50
# n_test2 = 50
# lower_diff = guess - (n_test1 * step_size1) / 2
# upper_diff = guess + (n_test1 * step_size1) / 2
#test_diff = np.arange(lower_diff, upper_diff, n_test1) - guess
# print('lower_diff:', lower_diff)
# print('upper_diff:', upper_diff)
# print('test_diff:', test_diff)
# chisquare = function that computes stddev, p = test_diff
#solu = minimize(chisquare, p, args=(cube, angs, etc.), method='Nelder-Mead', options=options)
if verbose:
print('FLATS difference w.r.t. DARKS:', diff)
print('Calculating optimal PCA dark subtraction for FLATS...')
guess = 0
solu = minimize(_get_test_diff_flat,x0=guess,args = (debug),method='Nelder-Mead',tol = 2e-4,options = {'maxiter':100, 'disp':verbose})
# guess = solu.x
# print('best diff:',guess)
# # lower_diff = guess - (n_test2 * step_size2) / 2
# # upper_diff = guess + (n_test2 * step_size2) / 2
# #
# # test_diff = np.arange(lower_diff, upper_diff, n_test2) - guess
# # print('lower_diff:', lower_diff)
# # print('upper_diff:', upper_diff)
# # print('test_diff:', test_diff)
#
# solu = minimize(_get_test_diff_flat, x0=test_diff, args=(), method='Nelder-Mead',
# options={'maxiter': 1})
best_test_diff = solu.x # x is the solution (ndarray)
best_test_diff = best_test_diff[0] # take out of array
if verbose:
print('Best difference (value) to add to FLATS is {} found in {} iterations'.format(best_test_diff,solu.nit))
# cond = True
# max_it = 3 # maximum iterations
# counter = 0
# while cond and counter<max_it:
# index,best_diff = _get_test_diff_flat(self,first_guess = np.median(diff), n_test = n_test1,lower_limit = 0.1*np.median(diff),upper_limit = 2)
# if index !=0 and index !=n_test1-1:
# cond = False
# else:
# first_guess =
# counter +=1
# if counter==max_it:
# print('##### Reached maximum iterations for finding test diff! #####')
# _,_ = _get_test_diff_flat(self, first_guess=best_diff, n_test=n_test2, lower_limit=0.8, upper_limit=1.2,plot=plot)
#write_fits(self.outpath + '1_crop_flat_cube_test_diff.fits', tmp_tmp_pca + td, verbose=debug)
# if verbose:
# print('stddev:', np.round(stddev, 3))
# print('Lowest standard dev is {} at frame {} with constant {}'.format(np.round(np.min(stddev), 2),
# np.round(np.argmin(stddev), 2) + 1,
# test_diff[np.argmin(stddev)]))
tmp_tmp_pca = cube_subtract_sky_pca(tmp_tmp + best_test_diff, tmp_tmp_tmp,
mask_AGPM_flat, ref_cube=None, ncomp=npc_dark)
bar = pyprind.ProgBar(len(flat_list), stream=1, title='Correcting FLATS via PCA dark subtraction')
for fl, flat_name in enumerate(flat_list):
tmp_tmp_pca[fl] = tmp_tmp_pca[fl] - diff[fl] - best_test_diff # add back the constant
bar.update()
write_fits(self.outpath + '1_crop_flat_cube.fits', tmp_tmp_pca, verbose=debug)
if plot:
tmp_tmp_med = np.median(tmp_tmp, axis=0) # flat before subtraction
tmp_tmp_pca = np.median(tmp_tmp_pca, axis=0) # flat after dark subtract
if plot == 'show':
plot_frames((tmp_tmp_med, tmp_tmp_pca, mask_AGPM_flat), vmax=(np.percentile(tmp_tmp_med,99.9),
np.percentile(tmp_tmp_pca,99.9), 1),
vmin=(np.percentile(tmp_tmp_med,0.1), np.percentile(tmp_tmp_pca,0.1), 0),
title='Flat PCA Dark Subtraction')
if plot == 'save':
plot_frames((tmp_tmp_med, tmp_tmp_pca, mask_AGPM_flat), vmax=(np.percentile(tmp_tmp_med,99.9),
np.percentile(tmp_tmp_pca,99.9), 1),
vmin=(np.percentile(tmp_tmp_med,0.1), np.percentile(tmp_tmp_pca,0.1), 0),
title='Flat PCA Dark Subtraction', dpi=300, save=self.outpath + 'FLAT_PCA_dark_subtract.pdf')
if verbose:
print('Flats have been dark corrected')
# ### ORIGINAL PCA CODE
#PCA dark subtraction of SCI cubes
#tmp_tmp_tmp = open_fits(self.outpath+'sci_dark_cube.fits')
tmp_tmp_tmp = open_fits(self.outpath + 'master_all_darks.fits', verbose =debug)
tmp_tmp_tmp_median = np.median(tmp_tmp_tmp,axis = 0) # median frame of all darks
tmp_tmp_tmp_median = np.median(tmp_tmp_tmp_median[np.where(mask_AGPM_com)]) # integer median of all the pixels within the mask
tmp_tmp = np.zeros([len(sci_list), self.com_sz, self.com_sz])
diff = np.zeros([len(sci_list)])
bar = pyprind.ProgBar(len(sci_list), stream=1, title='Finding difference between DARKS and SCI cubes. This may take some time.')
for sc, fits_name in enumerate(sci_list):
tmp = open_fits(self.inpath+fits_name, header=False, verbose=debug) # open science
tmp = cube_crop_frames(tmp, self.com_sz, force = True, verbose=debug) # crop science to common size
#PCA works best when the considering the difference
tmp_median = np.median(tmp,axis = 0) # make median frame from all frames in cube
#tmp_median = tmp_median[np.where(mask_AGPM_com)]
diff[sc] = tmp_tmp_tmp_median - np.median(tmp_median) # median pixel value of all darks minus median pixel value of sci cube
tmp_tmp[sc] = tmp_median + diff[sc]
# if sc==0 or sc==middle_idx or sc==len(sci_list)-1:
# tmp_tmp[counter] = tmp_median + diff[sc]
# counter = counter + 1
if debug:
print('difference w.r.t dark =', diff[sc])
bar.update()
write_fits(self.outpath + 'dark_sci_diff.fits',diff,verbose=debug)
write_fits(self.outpath + 'sci_plus_diff.fits',tmp_tmp,verbose=debug)
# with open(self.outpath + "dark_sci_diff.txt", "w") as f:
# for diff_sci in diff:
# f.write(str(diff_sci) + '\n')
if verbose:
print('SCI difference w.r.t. DARKS has been saved to fits file.')
print('SCI difference w.r.t. DARKS:', diff)
#lower_diff = 0.8*np.median(diff)
#upper_diff = 1.2*np.median(diff)
#test_diff = np.arange(abs(lower_diff),abs(upper_diff),50) - abs(np.median(diff)) # make a range of values in increments of 50 from 0.9 to 1.1 times the median
#print('test diff:',test_diff)
#tmp_tmp_pca = np.zeros([len(test_diff),self.com_sz,self.com_sz])
#best_idx = []
def _get_test_diff_sci(guess, verbose=False):
# tmp_tmp_pca = np.zeros([self.com_sz,self.com_sz])
# stddev = []
# loop over values around the median of diff to scale the frames accurately
# for idx,td in enumerate(test_diff):
tmp_tmp_pca = np.median(cube_subtract_sky_pca(tmp_tmp + guess, tmp_tmp_tmp,
mask_AGPM_com, ref_cube=None, ncomp=npc_dark), axis=0)
tmp_tmp_pca -= np.median(diff) + guess # subtract the negative median of diff values and subtract test diff (aka add it back)
subframe = tmp_tmp_pca[np.where(mask_sci)]
# subframe = tmp_tmp_pca[int(cy)-23:int(cy)+23,:-17] # square around center that includes the bad lines in NaCO data
# if idx ==0:
# stddev.append(np.std(subframe)) # save the stddev around this bad area
stddev = np.std(subframe)
if verbose:
print('Guess = {}'.format(guess))
print('Standard deviation = {}'.format(stddev))
subframe = subframe.reshape(46,-1) # hard coded 46 because the subframe size is hardcoded to center pixel +-23
write_fits(self.outpath + 'dark_sci_subframe.fits', subframe, verbose=debug)
# for fl, flat_name in enumerate(flat_list):
# tmp_tmp_pca[fl] = tmp_tmp_pca[fl]-diff[fl]
# return test_diff[np.argmin[stddev]] # value of test_diff corresponding to lowest stddev
return stddev
#test_sci_list = [sci_list[i] for i in [0,middle_idx,-1]]
#bar = pyprind.ProgBar(len(sci_list), stream=1, title='Testing diff for science cubes')
guess = 0
#best_diff = []
#for sc in [0,middle_idx,-1]:
if verbose:
print('Calculating optimal PCA dark subtraction for SCI cubes. This may take some time.')
solu = minimize(_get_test_diff_sci, x0=guess, args=(verbose), method='Nelder-Mead',tol = 2e-4,options = {'maxiter':100, 'disp':verbose})
best_test_diff = solu.x # x is the solution (ndarray)
best_test_diff = best_test_diff[0] # take out of array
#best_diff.append(best_test_diff)
if verbose:
print('Best difference (value) to add to SCI cubes is {} found in {} iterations'.format(best_test_diff,solu.nit))
#stddev = [] # to refresh the list after each loop
#tmp = open_fits(self.inpath+sci_list[sc], header=False, verbose=debug)
#tmp = cube_crop_frames(tmp, self.com_sz, force = True, verbose=debug)
#for idx,td in enumerate(test_diff):
#tmp_tmp_pca = np.median(cube_subtract_sky_pca(tmp_tmp[sc]+guess, tmp_tmp_tmp,mask_AGPM_com, ref_cube=None, ncomp=npc_dark),axis=0)
#tmp_tmp_pca-= np.median(diff)+td
#subframe = tmp_tmp_pca[np.where(mask_std)]
#subframe = tmp_tmp_pca[idx,int(cy)-23:int(cy)+23,:] # square around center that includes that bad lines
#stddev.append(np.std(subframe))
#best_idx.append(np.argmin(stddev))
#print('Best index of test diff: {} of constant: {}'.format(np.argmin(stddev),test_diff[np.argmin(stddev)]))
#bar.update()
#if sc == 0:
# write_fits(self.outpath+'1_crop_sci_cube_test_diff.fits', tmp_tmp_pca + td, verbose = debug)
# sci_list_mjd = np.array(self.sci_list_mjd) # convert list to numpy array
# xp = sci_list_mjd[np.array([0,middle_idx,-1])] # only get first, middle, last
# #fp = test_diff[np.array(best_idx)]
# fp = best_diff
# opt_diff = np.interp(x = sci_list_mjd, xp = xp, fp = fp, left=None, right=None, period=None) # optimal diff for each sci cube
if verbose:
print('Optimal constant to apply to each science cube: {}'.format(best_test_diff))
bar = pyprind.ProgBar(len(sci_list), stream=1, title='Correcting SCI cubes via PCA dark subtraction')
for sc,fits_name in enumerate(sci_list):
tmp = open_fits(self.inpath+fits_name, header=False, verbose=debug)
tmp = cube_crop_frames(tmp, self.com_sz, force = True, verbose=debug)
tmp_tmp_pca = cube_subtract_sky_pca(tmp +diff[sc] +best_test_diff, tmp_tmp_tmp,
mask_AGPM_com, ref_cube=None, ncomp=npc_dark)
tmp_tmp_pca = tmp_tmp_pca - diff[sc] - best_test_diff # add back the constant
write_fits(self.outpath+'1_crop_'+fits_name, tmp_tmp_pca, verbose = debug)
bar.update()
if verbose:
print('Dark has been subtracted from SCI cubes')
if plot:
tmp = np.median(tmp, axis = 0)
tmp_tmp_pca = np.median(tmp_tmp_pca,axis = 0)
if plot == 'show':
plot_frames((tmp, tmp_tmp_pca, mask_AGPM_com), vmax=(np.percentile(tmp, 99.9),
np.percentile(tmp_tmp_pca, 99.9), 1),
vmin=(np.percentile(tmp, 0.1), np.percentile(tmp_tmp_pca, 0.1), 0),
label=('Raw Science', 'Science PCA Dark Subtracted', 'Pixel Mask'),
title='Science PCA Dark Subtraction')
if plot == 'save':
plot_frames((tmp, tmp_tmp_pca, mask_AGPM_com), vmax=(np.percentile(tmp, 99.9),
np.percentile(tmp_tmp_pca, 99.9), 1),
vmin=(np.percentile(tmp, 0.1), np.percentile(tmp_tmp_pca, 0.1), 0),
label=('Raw Science', 'Science PCA Dark Subtracted', 'Pixel Mask'),
title='Science PCA Dark Subtraction',
dpi=300,save = self.outpath + 'SCI_PCA_dark_subtract.pdf')
#dark subtract of sky cubes
#tmp_tmp_tmp = open_fits(self.outpath+'sci_dark_cube.fits')
# tmp_tmp_tmp = open_fits(self.outpath+'master_all_darks.fits')
# tmp_tmp_tmp_median = np.median(tmp_tmp_tmp,axis = 0)
# tmp_tmp_tmp_median = np.median(tmp_tmp_tmp_median[np.where(mask_AGPM_com)])
#
# bar = pyprind.ProgBar(len(sky_list), stream=1, title='Correcting dark current in sky cubes')
# for sc, fits_name in enumerate(sky_list):
# tmp = open_fits(self.inpath+fits_name, header=False, verbose=debug)
# tmp = cube_crop_frames(tmp, self.com_sz, force = True, verbose=debug)
# tmp_median = np.median(tmp,axis = 0)
# tmp_median = tmp_median[np.where(mask_AGPM_com)]
# diff = tmp_tmp_tmp_median - np.median(tmp_median)
# if debug:
# print('difference w.r.t dark = ', diff)
# tmp_tmp = cube_subtract_sky_pca(tmp +diff +test_diff[np.argmin(stddev)], tmp_tmp_tmp,
# mask_AGPM_com, ref_cube=None, ncomp=npc_dark)
# if debug:
# write_fits(self.outpath+'1_crop_diff'+fits_name, tmp_tmp)
# write_fits(self.outpath+'1_crop_'+fits_name, tmp_tmp -diff -test_diff[np.argmin(stddev)], verbose = debug)
# bar.update()
# if verbose:
# print('Dark has been subtracted from SKY cubes')
# if plot:
# tmp = np.median(tmp, axis = 0)
# tmp_tmp = np.median(tmp_tmp-diff,axis = 0)
# if plot == 'show':
# plot_frames((tmp,tmp_tmp,mask_AGPM_com), vmax = (25000,25000,1), vmin = (-2500,-2500,0))
# if plot == 'save':
# plot_frames((tmp,tmp_tmp,mask_AGPM_com), vmax = (25000,25000,1), vmin = (-2500,-2500,0),save = self.outpath + 'SKY_PCA_dark_subtract')
tmp_tmp_tmp = open_fits(self.outpath + 'master_all_darks.fits', verbose = debug)
tmp_tmp_tmp_median = np.median(tmp_tmp_tmp,axis = 0) # median frame of all darks
tmp_tmp_tmp_median = np.median(tmp_tmp_tmp_median[np.where(mask_AGPM_com)]) # integer median of all the pixels within the mask
tmp_tmp = np.zeros([len(sky_list), self.com_sz, self.com_sz])
cy,cx = frame_center(tmp_tmp)
diff = np.zeros([len(sky_list)])
bar = pyprind.ProgBar(len(sky_list), stream=1, title='Finding difference between darks and sky cubes')
for sc, fits_name in enumerate(sky_list):
tmp = open_fits(self.inpath+fits_name, header=False, verbose=debug) # open sky
tmp = cube_crop_frames(tmp, self.com_sz, force = True, verbose=debug) # crop sky to common size
#PCA works best when the considering the difference
tmp_median = np.median(tmp,axis = 0) # make median frame from all frames in cube
#tmp_median = tmp_median[np.where(mask_AGPM_com)]
diff[sc] = tmp_tmp_tmp_median - np.median(tmp_median) # median pixel value of all darks minus median pixel value of sky cube
tmp_tmp[sc] = tmp_median + diff[sc]
if debug:
print('difference w.r.t dark =', diff[sc])
bar.update()
write_fits(self.outpath + 'dark_sci_diff.fits', diff, verbose=debug)
if verbose:
print('SKY difference w.r.t. DARKS has been saved to fits file.')
print('SKY difference w.r.t. DARKS:', diff)
def _get_test_diff_sky(guess, verbose=False):
# tmp_tmp_pca = np.zeros([self.com_sz,self.com_sz])
# stddev = []
# loop over values around the median of diff to scale the frames accurately
# for idx,td in enumerate(test_diff):
tmp_tmp_pca = np.median(cube_subtract_sky_pca(tmp_tmp + guess, tmp_tmp_tmp,
mask_AGPM_com, ref_cube=None, ncomp=npc_dark), axis=0)
tmp_tmp_pca -= np.median(diff) + guess # subtract the negative median of diff values and subtract test diff (aka add it back)
subframe = tmp_tmp_pca[np.where(mask_sci)]
# subframe = tmp_tmp_pca[int(cy)-23:int(cy)+23,:-17] # square around center that includes the bad lines in NaCO data
# if idx ==0:
# stddev.append(np.std(subframe)) # save the stddev around this bad area
stddev = np.std(subframe)
if verbose:
print('Guess = {}'.format(guess))
print('Standard deviation = {}'.format(stddev))
subframe = subframe.reshape(46,-1) # hard coded 46 because the subframe size is hardcoded to center pixel +-23
write_fits(self.outpath + 'dark_sky_subframe.fits', subframe, verbose=debug)
# for fl, flat_name in enumerate(flat_list):
# tmp_tmp_pca[fl] = tmp_tmp_pca[fl]-diff[fl]
# return test_diff[np.argmin[stddev]] # value of test_diff corresponding to lowest stddev
return stddev
guess = 0
if verbose:
print('Calculating optimal PCA dark subtraction for SKY cubes. This may take some time.')
solu = minimize(_get_test_diff_sky, x0=guess, args=(verbose), method='Nelder-Mead',tol = 2e-4,options = {'maxiter':100, 'disp':verbose})
best_test_diff = solu.x # x is the solution (ndarray)
best_test_diff = best_test_diff[0] # take out of array
#
# lower_diff = 0.9*np.median(diff)
# upper_diff = 1.1*np.median(diff)
# test_diff = np.arange(abs(lower_diff),abs(upper_diff),50) - abs(np.median(diff)) # make a range of values in increments of 50 from 0.9 to 1.1 times the median
# tmp_tmp_pca = np.zeros([len(test_diff),self.com_sz,self.com_sz])
# best_idx = []
#middle_idx = int(len(sky_list)/2)
#print('Testing diff for SKY cubes')
# for sc in [0,middle_idx,-1]:
# stddev = [] # to refresh the list after each loop
# tmp = open_fits(self.inpath+sky_list[sc], header=False, verbose=debug)
# tmp = cube_crop_frames(tmp, self.com_sz, force = True, verbose=debug)
#
# for idx,td in enumerate(test_diff):
# tmp_tmp_pca[idx] = np.median(cube_subtract_sky_pca(tmp+diff[sc]+td, tmp_tmp_tmp,
# mask_AGPM_com, ref_cube=None, ncomp=npc_dark),axis=0)
# tmp_tmp_pca[idx]-= np.median(diff)+td
#
# subframe = tmp_tmp_pca[idx,int(cy)-23:int(cy)+23,:] # square around center that includes that bad lines
# stddev.append(np.std(subframe))
# best_idx.append(np.argmin(stddev))
# print('Best index of test diff: {} of constant: {}'.format(np.argmin(stddev),test_diff[np.argmin(stddev)]))
# #bar.update()
# if sc == 0:
# write_fits(self.outpath+'1_crop_sky_cube_test_diff.fits', tmp_tmp_pca + td, verbose = debug)
# print('test')
# sky_list_mjd = np.array(self.sky_list_mjd) # convert list to numpy array
# xp = sky_list_mjd[np.array([0,middle_idx,-1])] # only get first, middle, last
# fp = test_diff[np.array(best_idx)]
#
# opt_diff = np.interp(x = sky_list_mjd, xp = xp, fp = fp, left=None, right=None, period=None) # optimal diff for each sci cube
# print('Opt diff',opt_diff)
# if debug:
# with open(self.outpath+"best_idx_sky.txt", "w") as f:
# for idx in best_idx:
# f.write(str(idx)+'\n')
# if verbose:
# print('Optimal constant: {}'.format(opt_diff))
if verbose:
print('Optimal constant to apply to each sky cube: {}'.format(best_test_diff))
bar = pyprind.ProgBar(len(sky_list), stream=1, title='Correcting SKY cubes via PCA dark subtraction')
for sc,fits_name in enumerate(sky_list):
tmp = open_fits(self.inpath+fits_name, header=False, verbose=debug)
tmp = cube_crop_frames(tmp, self.com_sz, force = True, verbose=debug)
tmp_tmp_pca = cube_subtract_sky_pca(tmp +diff[sc] +best_test_diff, tmp_tmp_tmp,
mask_AGPM_com, ref_cube=None, ncomp=npc_dark)
tmp_tmp_pca = tmp_tmp_pca - diff[sc] - best_test_diff # add back the constant
write_fits(self.outpath+'1_crop_'+fits_name, tmp_tmp_pca, verbose = debug)
if verbose:
print('Dark has been subtracted from SKY cubes')
if plot:
tmp = np.median(tmp, axis = 0)
tmp_tmp_pca = np.median(tmp_tmp_pca,axis = 0)
if plot == 'show':
plot_frames((tmp,tmp_tmp_pca,mask_AGPM_com), vmax = (np.percentile(tmp,99.9),
np.percentile(tmp_tmp_pca,99.9),1), vmin = (np.percentile(tmp,0.1),np.percentile(tmp_tmp_pca,0.1),0),
label=('Raw Sky','Sky PCA Dark Subtracted','Pixel Mask'),title='Sky PCA Dark Subtraction')
if plot == 'save':
plot_frames((tmp,tmp_tmp_pca,mask_AGPM_com), vmax = (np.percentile(tmp,99.9),
np.percentile(tmp_tmp_pca,99.9),1), vmin = (np.percentile(tmp,0.1),np.percentile(tmp_tmp_pca,0.1),0),
label=('Raw Sky','Sky PCA Dark Subtracted','Pixel Mask'),title='Sky PCA Dark Subtraction', dpi=300,
save = self.outpath + 'SKY_PCA_dark_subtract.pdf')
#median dark subtract of UNSAT cubes
tmp_tmp_tmp = open_fits(self.outpath+'unsat_dark_cube.fits',verbose=debug)
tmp_tmp_tmp = np.median(tmp_tmp_tmp,axis = 0)
# no need to crop the unsat frame at the same size as the sci images if they are smaller
bar = pyprind.ProgBar(len(unsat_list), stream=1, title='Correcting dark current in unsaturated cubes')
for un, fits_name in enumerate(unsat_list):
tmp = open_fits(self.inpath+fits_name, header=False, verbose = debug)
if tmp.shape[2] > self.com_sz:
nx_unsat_crop = self.com_sz
tmp = cube_crop_frames(tmp, nx_unsat_crop, force = True, verbose = debug)
tmp_tmp = tmp-tmp_tmp_tmp
elif tmp.shape[2]%2 == 0:
nx_unsat_crop = tmp.shape[2]-1
tmp = cube_crop_frames(tmp, nx_unsat_crop, force = True, verbose = debug)
tmp_tmp = tmp-tmp_tmp_tmp
else:
nx_unsat_crop = tmp.shape[2]
tmp_tmp = tmp-tmp_tmp_tmp
write_fits(self.outpath+'1_crop_unsat_'+fits_name, tmp_tmp, verbose = debug)
bar.update()
if verbose:
print('Dark has been subtracted from UNSAT cubes')
if plot:
tmp = np.median(tmp, axis = 0) # unsat before subtraction
tmp_tmp = np.median(tmp_tmp,axis = 0) # unsat after dark subtract
# plots unsat dark, raw unsat, dark subtracted unsat
if plot == 'show':
plot_frames((tmp_tmp_tmp,tmp,tmp_tmp),vmax=(np.percentile(tmp_tmp_tmp,99.9),
np.percentile(tmp,99.9),np.percentile(tmp_tmp,99.9)),vmin=(np.percentile(tmp_tmp_tmp,0.1),
np.percentile(tmp,0.1),np.percentile(tmp_tmp,0.1)), label= ('Raw Unsat Dark','Raw Unsat',
'Unsat Dark Subtracted'),title='Unsat Dark Subtraction')
if plot == 'save':
plot_frames((tmp_tmp_tmp,tmp,tmp_tmp),vmax=(np.percentile(tmp_tmp_tmp,99.9),
np.percentile(tmp,99.9),np.percentile(tmp_tmp,99.9)),vmin=( | np.percentile(tmp_tmp_tmp,0.1) | numpy.percentile |
import numpy as np
import pickle as pkl
import networkx as nx
import scipy.sparse as sp
from scipy.sparse.linalg.eigen.arpack import eigsh
import sys
import tensorflow as tf
def parse_index_file(filename):
"""Parse index file."""
index = []
for line in open(filename):
index.append(int(line.strip()))
return index
def sample_mask(idx, l):
"""Create mask."""
mask = np.zeros(l)
mask[idx] = 1
return np.array(mask, dtype=np.bool)
def sample_mask_sigmoid(idx, h, w):
"""Create mask."""
mask = np.zeros((h, w))
matrix_one = np.ones((h, w))
mask[idx, :] = matrix_one[idx, :]
return np.array(mask, dtype=np.bool)
def load_data_vis_multi(dataset_str, use_trainval, feat_suffix, label_suffix='ally_multi'):
"""Load data."""
names = [feat_suffix, label_suffix, 'graph']
objects = []
for i in range(len(names)):
with open("{}/ind.NELL.{}".format(dataset_str, names[i]), 'rb') as f:
print("{}/ind.NELL.{}".format(dataset_str, names[i]))
if sys.version_info > (3, 0):
objects.append(pkl.load(f, encoding='latin1'))
else:
objects.append(pkl.load(f))
allx, ally, graph = tuple(objects)
train_test_mask = []
with open("{}/ind.NELL.index".format(dataset_str), 'rb') as f:
train_test_mask = pkl.load(f)
features = allx # .tolil()
adj = nx.adjacency_matrix(nx.from_dict_of_lists(graph))
labels = np.array(ally)
idx_test = []
idx_train = []
idx_trainval = []
if use_trainval == True:
for i in range(len(train_test_mask)):
if train_test_mask[i] == 0:
idx_train.append(i)
if train_test_mask[i] == 1:
idx_test.append(i)
if train_test_mask[i] >= 0:
idx_trainval.append(i)
else:
for i in range(len(train_test_mask)):
if train_test_mask[i] >= 0:
idx_train.append(i)
if train_test_mask[i] == 1:
idx_test.append(i)
if train_test_mask[i] >= 0:
idx_trainval.append(i)
idx_val = idx_test
train_mask = sample_mask_sigmoid(idx_train, labels.shape[0], labels.shape[1])
val_mask = sample_mask_sigmoid(idx_val, labels.shape[0], labels.shape[1])
trainval_mask = sample_mask_sigmoid(idx_trainval, labels.shape[0], labels.shape[1])
y_train = np.zeros(labels.shape)
y_val = np.zeros(labels.shape)
y_trainval = np.zeros(labels.shape)
y_train[train_mask] = labels[train_mask]
y_val[val_mask] = labels[val_mask]
y_trainval[trainval_mask] = labels[trainval_mask]
return adj, features, y_train, y_val, y_trainval, train_mask, val_mask, trainval_mask
def sparse_to_tuple(sparse_mx):
"""Convert sparse matrix to tuple representation."""
def to_tuple(mx):
if not sp.isspmatrix_coo(mx):
mx = mx.tocoo()
coords = np.vstack((mx.row, mx.col)).transpose()
values = mx.data
shape = mx.shape
return coords, values, shape
if isinstance(sparse_mx, list):
for i in range(len(sparse_mx)):
sparse_mx[i] = to_tuple(sparse_mx[i])
else:
sparse_mx = to_tuple(sparse_mx)
return sparse_mx
def preprocess_features(features):
"""Row-normalize feature matrix and convert to tuple representation"""
rowsum = np.array(features.sum(1))
r_inv = np.power(rowsum, -1).flatten()
r_inv[np.isinf(r_inv)] = 0.
r_mat_inv = sp.diags(r_inv)
features = r_mat_inv.dot(features)
return sparse_to_tuple(features)
def preprocess_features_dense(features):
"""Row-normalize feature matrix and convert to tuple representation"""
rowsum = np.array(features.sum(1))
r_inv = np.power(rowsum, -1).flatten()
r_inv[np.isinf(r_inv)] = 0.
r_mat_inv = sp.diags(r_inv)
features = r_mat_inv.dot(features)
return features
def preprocess_features_dense2(features):
rowsum = np.array(features.sum(1))
r_inv = np.power(rowsum, -1).flatten()
r_inv[np.isinf(r_inv)] = 0.
r_mat_inv = sp.diags(r_inv)
features = r_mat_inv.dot(features)
div_mat = sp.diags(rowsum)
return features, div_mat
def normalize_adj(adj):
"""Symmetrically normalize adjacency matrix."""
adj = sp.coo_matrix(adj)
rowsum = np.array(adj.sum(1))
d_inv_sqrt = | np.power(rowsum, -0.5) | numpy.power |
import os
import matplotlib.pyplot as plt
import matplotlib
import numpy as np
from microfilm import microplot, colorify
image = 100*np.ones((3,3), dtype=np.uint8)
image[0,0] = 200
image2 = 100*np.ones((3,3), dtype=np.uint8)
image2[0,1] = 180
more_than_3d = np.zeros((5,3,3), dtype=np.uint8)
more_than_3d[0,0,0] = 1
more_than_3d[1,1,0] = 1
more_than_3d[2,2,0] = 1
more_than_3d[3,1,1] = 1
more_than_3d[4,2,1] = 1
def verify_image(microim):
# check image
assert np.any(microim.ax.get_images()[0].get_array()[:,:,0] > 0) == False, "Red should not be present"
np.testing.assert_array_equal(microim.ax.get_images()[0].get_array()[:,:,1], np.array([[0,1,0], [0,0,0], [0,0,0]]), "Green channel not correct")
np.testing.assert_array_equal(microim.ax.get_images()[0].get_array()[:,:,2], np.array([[1,0,0], [0,0,0], [0,0,0]]), "Blue channel not correct")
def verify_label(microim):
assert microim.ax.texts[0].get_text() == 'b', "Wrong channel label"
assert microim.ax.texts[1].get_text() == 'a', "Wrong channel label"
def test_microshow():
microim = microplot.microshow(
images=[image, image2], cmaps=['pure_blue', 'pure_green'], channel_names=['a', 'b'], channel_label_show=True, unit='mm',
scalebar_unit_per_pix=0.5, scalebar_size_in_units=1, scalebar_thickness=0.1, scalebar_color='red',
label_text='A', label_color='pink')
assert isinstance(microim, microplot.Microimage)
# check image
verify_image(microim)
# check channel labels
verify_label(microim)
# check scalebar
assert microim.ax.texts[2].get_text() == '1 mm', "Wrong scalebar legend"
assert microim.ax.patches[0].get_facecolor() == (1, 0, 0, 1), "Wrong scalebar color"
assert microim.ax.patches[0].get_width() == 2/3, "Wrong scalebar size"
# check label
assert microim.ax.texts[3].get_text() == 'A', "Wrong label"
assert microim.ax.texts[3].get_color() == 'pink', "Wrong label color"
def test_default_random_gradient():
# test that images with > 3 channels use random gradient by default
microim = microplot.microshow(more_than_3d)
assert microim.cmaps[4] == "ran_gradient", "Random gradient not select for d>3"
def test_mixed_cmaps():
# test that "name" cmaps and "object" cmaps can be mixed
summer_cmap = colorify.cmaps_def(cmap_name='summer')
microim = microplot.microshow(
images=[image, image2], cmaps=[summer_cmap, 'pure_blue'])
assert isinstance(microim.cmap_objects[0], matplotlib.colors.LinearSegmentedColormap), "Wrong colormap for summer cmap"
assert isinstance(microim.cmap_objects[1], matplotlib.colors.ListedColormap), "Wrong colormap for pure_blue cmap"
def test_add_scalebar():
microim = microplot.microshow(
images=[image, image2], cmaps=['pure_blue', 'pure_green'])
microim.add_scalebar(unit='mm', scalebar_unit_per_pix=0.5, scalebar_size_in_units=1, scalebar_thickness=0.1, scalebar_color='red')
# check scalebar
assert microim.ax.texts[0].get_text() == '1 mm', "Wrong scalebar legend"
assert microim.ax.patches[0].get_facecolor() == (1, 0, 0, 1), "Wrong scalebar color"
assert microim.ax.patches[0].get_width() == 2/3, "Wrong scalebar size"
def test_add_label():
microim = microplot.microshow(
images=[image, image2], cmaps=['pure_blue', 'pure_green'])
microim.add_label(label_text='A', label_color='pink')
# check label
assert microim.ax.texts[0].get_text() == 'A', "Wrong label"
assert microim.ax.texts[0].get_color() == 'pink', "Wrong label color"
def test_add_channel_labels():
microim = microplot.microshow(
images=[image, image2], cmaps=['pure_blue', 'pure_green'])
# check channel labels
microim.add_channel_labels(channel_names=['a', 'b'])
verify_label(microim)
assert microim.ax.texts[1].get_color() == (0.0, 0.0, 1.0, 1.0), "Wrong label color"
assert microim.ax.texts[0].get_color() == (0.0, 1.0, 0.0, 1.0), "Wrong label color"
def test_update():
microimage = microplot.Microimage(images=[image, image2], cmaps=['pure_blue', 'pure_green'])
assert microimage.ax is None
fig, ax = plt.subplots(1, 2)
microimage.update(ax[1])
verify_image(microimage)
def test_save():
microimage = microplot.microshow(images=[image, image2], cmaps=['pure_blue', 'pure_green'])
microimage.savefig('test_saveimage.png')
os.path.isfile('test_saveimage.png')
os.remove('test_saveimage.png')
def test_micropanel():
microimage1 = microplot.Microimage(images=[image, image2], cmaps=['pure_blue', 'pure_green'])
microimage2 = microplot.Microimage(images=[image, image2], cmaps=['pure_cyan', 'pure_magenta'])
micropanel = microplot.Micropanel(1, 2)
assert isinstance(micropanel, microplot.Micropanel)
micropanel.add_element([0,0], microimage1)
micropanel.add_element([0,1], microimage2)
# check grid shape
micropanel.microplots.shape == (1,2)
# Check that plots are in the correct place
np.testing.assert_array_equal(micropanel.microplots[0,0].ax.get_images()[0].get_array()[:,:,0], np.array([[0,0,0], [0,0,0], [0,0,0]]),
"Red channel in first panel not correct")
np.testing.assert_array_equal(micropanel.microplots[0,0].ax.get_images()[0].get_array()[:,:,1], np.array([[0,1,0], [0,0,0], [0,0,0]]),
"Green channel in first panel not correct")
np.testing.assert_array_equal(micropanel.microplots[0,0].ax.get_images()[0].get_array()[:,:,2], | np.array([[1,0,0], [0,0,0], [0,0,0]]) | numpy.array |
#===============================================================================
# This file is part of TEMPy.
#
# TEMPy is a software designed to help the user in the manipulation
# and analyses of macromolecular assemblies using 3D electron microscopy maps.
#
# Copyright 2015 Birkbeck College University of London.
#
# Authors: <NAME>, <NAME>, <NAME>,
# <NAME>, <NAME>, <NAME>
#
# This software is made available under GPL V3 license
# http://www.gnu.org/licenses/gpl-3.0.html
#
#
# Please cite your use of TEMPy in published work:
#
# <NAME>., <NAME>., <NAME>., <NAME>., <NAME>. & <NAME>. (2015). J. Appl. Cryst. 48.
#
#===============================================================================
import numpy
from numpy import array, fromfile, flipud,isnan, transpose
import struct as binary
import string
from TEMPy.EMMap import Map
class MapParser:
"""
A class to read various EM map file types into a Map object instance.
"""
def __init__(self):
## mapping of numpy type to MRC mode
self.numpy2mrc = {
## convert these to int8
numpy.uint8: 0,
numpy.bool: 0,
numpy.bool_: 0,
## convert these to int16
numpy.int16: 1,
numpy.int8: 1,
## convert these to float32
numpy.float32: 2,
numpy.float64: 2,
numpy.int32: 2,
numpy.int: 2,
## convert these to complex64
numpy.complex: 4,
numpy.complex64: 4,
numpy.complex128: 4,
## convert these to uint16
numpy.uint16: 6,
}
@staticmethod
def readMRCHeader(filename, endian = '<'):
"""
Gets the header information from the MRC map file.
Argument
*filename*
input MRC map file name
*endian*
Endianness: Little or big
Return:
A string containing the MRC header information.
"""
f = open(filename,'rb')
fm_string = endian+(10*'l')+(6*'f')+(3*'l')+(3*'f')+(27*'l')+(3*'f')+(4*'c')+'lfl'
header = list(binary.unpack(fm_string, f.read(224)))
notes = f.read(800)
notes = notes.replace(b'\x00', b'')
header.append(notes)
header = tuple(header)
f.close()
return header
@staticmethod
def get_endian(filename):
"""
Read an MRC map file
Arguments:
*filename*
input MRC map file name.
Return:
Endianness: Little or big
"""
h = MapParser.readMRCHeader(filename)
if 0 <= h[3] <= 6:
endian = '<'
else:
endian = '>'
return endian
@staticmethod
def readMRC(filename):
"""
Read an MRC map file
Arguments:
*filename*
input MRC map file name.
Return:
A Map instance containing the data read from MRC map file.
"""
print("READING MRC")
mrc2numpy = {
0: numpy.uint8,
1: numpy.int16,
2: numpy.float32,
# 3: complex made of two int16. No such thing in numpy
# however, we could manually build a complex array by reading two
# int16 arrays somehow.
4: numpy.complex64,
6: numpy.uint16, # according to UCSF
}
endian = MapParser.get_endian(filename)
header = MapParser.readMRCHeader(filename, endian)
box_size = tuple(flipud(header[0:3]))
origin = header[49:52] #ctrl UCSF
# READ ORIGIN BASED ON MRC2000/CCP4 format
nstart_index = header[4:7]
apix = header[10]/header[0]
nstart = (header[4]*float(apix),header[5]*float(apix),header[6]*float(apix))
crs_index = header[16:19]
if not (1 in (crs_index[0], crs_index[1], crs_index[2]) and 2 in (crs_index[0], crs_index[1], crs_index[2]) and 3 in (crs_index[0], crs_index[1], crs_index[2])):
crs_index = (1,2,3)
#print 'Axis order: ', crs_index
#print 'Nstart', nstart_index[crs_index[0]-1],nstart_index[crs_index[1]-1],nstart_index[crs_index[2]-1]
flag_orig = 0
list_orig = [0.0, 0.0, 0.0]
try:
if header[52:56] == (b'M',b'A',b'P',b' '):
#print 'MAP flag found (MRC2000)'
origin = header[49:52]
#print 'Origin record: ', origin
if (isnan(origin[0]) or isnan(origin[1]) or isnan(origin[2])) or (origin[0] == 0.0 and origin[1] == 0.0 and origin[2] == 0.0):
origin = (0.0, 0.0, 0.0)
#print 'ORIGIN record empty, Checking NSTART records'
flag_orig = 1
else:
flag_orig = 1
except IndexError:
origin = (0.0, 0.0, 0.0)
pass
if flag_orig == 1:
if (nstart[0] == 0 and nstart[1] == 0 and nstart[2] == 0) or (isnan(nstart[0]) or isnan(nstart[1]) or | isnan(nstart[2]) | numpy.isnan |
from typing import List, Tuple, Dict, Optional
import matplotlib.pyplot as plt
import numpy as np
from dataclasses import dataclass
from kino.geometry import Vector
from kino.geometry.point import Point
from kino.geometry.interpolation import lerp
from myterial import salmon_dark, blue_light
from slam.geometry import Line, distance, segments_intersection
from slam.obstacle import Obstacle
class Ray:
"""
A lidar ray.
"""
def __init__(
self,
agent,
angle: float, # relative to agent orientation
length: int,
):
self.agent = agent
self.angle_shift = angle
self.length = length
self.events_count = 0 # count every time an object is detected
self.sampled_distance: List[float] = [
self.length * p for p in np.linspace(0, 1, 5)
] # distance values from start to end
@property
def angle(self) -> float:
return self.agent.angle + self.angle_shift
@property
def p0(self) -> Point:
""" position of origin of ray """
return Point(*self.agent.head_position)
@property
def p1(self) -> Point:
"""
Position of point at end of ray
"""
x = self.length * np.cos(np.radians(self.angle)) + self.p0.x
y = self.length * np.sin(np.radians(self.angle)) + self.p0.y
return Point(x, y)
@property
def line(self) -> Line:
""" Line going through ray points
"""
return Line.from_points(self.p0, self.p1, color=salmon_dark)
def sample(self, n: int = 5) -> List[Point]:
"""
Sample n points along the ray from p0 to p1, included.
Returns points carrying information of their distance along the ray as well/
"""
pts: List[Point] = []
for p in np.linspace(0, 1, n):
pt = Point(
lerp(self.p0.x, self.p1.x, p), lerp(self.p0.y, self.p1.y, p)
)
pt.distance = lerp(0, self.length, p)
pts.append(pt)
return pts
def sample_at_distance(self, distance: float) -> Optional[Point]:
"""
Returns a point at a sampled distance along the ray
"""
if distance == 0:
factor: float = 0
elif distance > self.length:
return None
else:
factor = distance / self.length
pt = Point(
lerp(self.p0.x, self.p1.x, factor),
lerp(self.p0.y, self.p1.y, factor),
)
pt.distance = distance
return pt
def scan(self, obstacles: List[Obstacle]):
"""
Scans through a list of objects to find intersections
"""
intersection_points: List[Tuple[Point, float, Obstacle]] = []
for obj in obstacles:
# check if the object is closer than the ray length
dist = distance(self.p0, obj.COM)
if dist - (1.5 * obj.size) > self.length:
continue
# get the interesection between the ray line and
# each edge-line of the obstacle
obj_intersections: Dict[str, Tuple[Point, float, Obstacle]] = {}
for name, line in obj.lines.items():
# get vertice points
q0, q1 = obj.points[name[0]], obj.points[name[1]]
intersection = segments_intersection(self.p0, self.p1, q0, q1)
if intersection is None:
continue
# get distance from intersection
dist = distance(self.p0, intersection)
# all checks passed, keep point
obj_intersections[name] = (intersection, dist, obj)
if len(obj_intersections):
closest = np.argmin([v[1] for v in obj_intersections.values()])
intersection_points.append(
list(obj_intersections.values())[closest]
)
# keep the closest intersection points
if intersection_points:
closest = | np.argmin([v[1] for v in intersection_points]) | numpy.argmin |
import numpy as np
import random
from sklearn.neighbors import KNeighborsClassifier
import timeit
from . import heuristics
from ..io import portie
import sys
def baco(x_data, y_data, t_percent=40, heu_meth="method_1", ml_alg="knn1", iter_num=10):
(my_bool, msg_err) = check_baco_args(t_percent, heu_meth, ml_alg, iter_num)
if(not my_bool):
print("problem with arguments for abaco()!!!")
print(msg_err)
exit() #############
check = portie.CheckDataset(x_data, y_data)
(state, msg) = check.get_state()
if(not state): # data had problems
print("+++ " + msg + " +++")
exit() #############
train_percentage = 100 - int(t_percent)
time_temp = 0
start = timeit.default_timer()
(best_fitnesses_each_iter, average_fitnesses_each_iter, num_of_features_selected_by_best_ant_each_iter, best_fit_so_far, best_ant_road) = run_feature_selection(generations = iter_num, alpha = 1, beta = 0.5, T0 = 0.1, Min_T = 0.1, Max_T = 6, q = 0.95, Q = 0.3, heu_meth = heu_meth, ant_num = 50, feature_num = len(x_data[1]), dataset=x_data, targets=y_data, train_percentage=train_percentage)
end = timeit.default_timer()
time_temp = time_temp + (end - start)
# making new dataset :
new_dataset = make_new_dataset(best_ant_road, x_data)
acc_before_run = get_single_fit(x_data, y_data, train_percentage)
total_feature_num = len(x_data[1])
sample_num = len(x_data[:,1])
best_selected_features_num = np.sum(best_ant_road)
return (new_dataset, best_ant_road, acc_before_run, best_fit_so_far, total_feature_num, best_selected_features_num, best_fitnesses_each_iter, average_fitnesses_each_iter ,num_of_features_selected_by_best_ant_each_iter, time_temp, sample_num)
def check_baco_args(t_percent, heu_meth, ml_alg, iter_num):
msg_err = ""
try:
int(t_percent)
except Exception as e:
msg_err = "t_percent should be integer!"
return (False, msg_err)
try:
int(iter_num)
except Exception as e:
msg_err = "iter_num should be integer!"
return (False, msg_err)
if(iter_num > 100):
msg_err = "iter_num should be less than 100!"
return (False, msg_err)
if(iter_num < 5):
msg_err = "iter_num should be more than 5!"
return (False, msg_err)
# if(type(heu_meth) != "str" or "str" != type(ml_alg)):
if(heu_meth != "method_1" and heu_meth != "method_2" and heu_meth != "method_3" and heu_meth != "method_4" and heu_meth != "method_5"):
msg_err = "heu_meth isn't write, please check the docs!"
return (False, msg_err)
# should check the ml_alg tooooooooo
return (True, msg_err)
def run_feature_selection(generations, alpha, beta , T0, Min_T, Max_T, q, Q, heu_meth, ant_num, feature_num, dataset, targets, train_percentage):
best_fitnesses_each_iter = []
average_fitnesses_each_iter = []
num_of_features_selected_by_best_ant_each_iter = []
road_map = np.random.randint(2, size=ant_num*feature_num).reshape((ant_num, feature_num))
road_maps = np.zeros(ant_num*feature_num*generations, dtype="int64").reshape(generations, ant_num, feature_num)
best_roads_list = []
best_fit_so_far = 0
best_road_so_far = np.zeros(feature_num, dtype="int64")
np.set_printoptions(suppress=True, threshold=1000)
roads_T = np.zeros(feature_num*feature_num*4, dtype="float64").reshape(4, feature_num, feature_num) + T0
for i in range(0, generations):
# print("+++++++++ run : ("+ str(heu_meth) +") Iteration : (" + str(i+1) + ")+++++++++")
if(heu_meth == "method_1"):
roads_E = heuristics.hueristic_value_fscore(feature_num, dataset, targets)
elif(heu_meth == "method_2"):
roads_E = heuristics.heuristic_value_min_redundency(feature_num, dataset)
elif(heu_meth == "method_3"):
roads_E = heuristics.heuristic_value_min_redundency_max_relevence(feature_num, dataset)
elif(heu_meth == "method_4"):
roads_E = heuristics.heuristic_value_method_4(feature_num, dataset)
elif(heu_meth == "method_5"):
roads_E = heuristics.heuristic_value_mutual_info(feature_num, dataset)
(road_map, pointer) = baco_road_selection(roads_T, roads_E, alpha, beta, ant_num, feature_num)
(iter_best_fit, best_road_so_far, best_fit_so_far, iter_best_road, fitnesses, iter_average_fit, ants_num_of_features_selected) = do_calculations(road_map, dataset, targets, best_fit_so_far, best_road_so_far, train_percentage)
roads_T = trial_update(fitnesses, roads_T, Min_T, Max_T, Q, q, iter_best_road, feature_num)
road_maps[i] = road_map
best_fitnesses_each_iter.append(iter_best_fit)
average_fitnesses_each_iter.append(iter_average_fit)
num_of_features_selected_by_best_ant_each_iter.append(sum(best_road_so_far))
best_roads_list.append(best_road_so_far)
ccc = 0
maxx = max(best_fitnesses_each_iter)
for each in best_fitnesses_each_iter:
if(each == maxx):
my_indx = ccc
ccc = ccc + 1
return (best_fitnesses_each_iter, average_fitnesses_each_iter, num_of_features_selected_by_best_ant_each_iter, best_fit_so_far, best_roads_list[my_indx])
def get_accuracy_for_this_solution(train_dataset, train_targets, test_dataset, test_targets):
K = 1
knn = KNeighborsClassifier(n_neighbors=K)
knn.fit(train_dataset, train_targets) # X, Y
# evaluating our trained model
predicted_targets = knn.predict(test_dataset)
l = len(test_targets)
num_of_correct = 0
for i in range(l):
if(test_targets[i] == predicted_targets[i]):
num_of_correct = num_of_correct + 1
return num_of_correct/l
def separate_datasets(dataset, targets, train_percentage):
# in case you wanted the data to be random every single time you wanted get fitnesses
leng = len(dataset[:, 0])
s = int(leng*(train_percentage/100))
samples_list = random.sample(range(0, leng), s)
mask = np.zeros((leng), dtype=bool)
mask[samples_list] = True
train_dataset = dataset[mask, :]
test_dataset = dataset[~mask, :]
train_targets = targets[mask]
test_targets = targets[~mask]
return (train_dataset, test_dataset, train_targets, test_targets)
def get_fitnesses(road_map, dataset, targets, train_percentage):
total_feature_num = len(road_map[1])
total_sample_num = len(dataset[:,0])
num_of_features_selected = list()
fitnesses = list()
count = 0
for ant_solution in road_map:
count = count + 1
if np.sum(ant_solution) == 0:
print("all of row "+ str(count) +" was 0!!!")
fitnesses.append(0)
# print(np.sum(ant_solution)) ##### problemmmmmm
else:
new_dataset = np.zeros(total_sample_num, dtype="float64").reshape(total_sample_num, 1)
for i in range(0, len(ant_solution)):
if(ant_solution[i] == 1):
new_dataset = np.append(new_dataset, dataset[:, i].reshape(total_sample_num, 1), axis=1)
new_dataset = np.delete(new_dataset, 0, axis=1) # removing first column
num_of_features_selected.append(new_dataset.shape[1])
(train_dataset, test_dataset, train_targets, test_targets) = separate_datasets(new_dataset, targets, train_percentage)
fitnesses.append(get_accuracy_for_this_solution(train_dataset, train_targets, test_dataset, test_targets))
return num_of_features_selected, fitnesses
def make_new_dataset(solution_road, dataset):
total_sample_num = len(dataset[:,0])
new_dataset = np.zeros(total_sample_num, dtype="float64").reshape(total_sample_num, 1)
if np.sum(solution_road) == 0:
print("allll of it was 0!!!!")
return new_dataset
else:
for i in range(0, len(solution_road)):
if(solution_road[i] == 1):
new_dataset = np.append(new_dataset, dataset[:, i].reshape(total_sample_num, 1), axis=1)
new_dataset = np.delete(new_dataset, 0, axis=1) # removing first column
return new_dataset
def get_single_fit(dataset, targets, train_percentage):
(train_dataset, test_dataset, train_targets, test_targets) = separate_datasets(dataset, targets, train_percentage)
return get_accuracy_for_this_solution(train_dataset, train_targets, test_dataset, test_targets)
def roulette_wheel(probs, feature_num): # picking one item's index randomly but with considering probabilities.
sum = 0
zero_or_one = 1
r = np.random.random_sample()
for x in range(len(probs)):
sum = sum + probs[x]
if(r < sum):
index = x
# because it is now (feature_num + feature_num) long, we should correct it :
if(index >= feature_num):
index = index - feature_num
zero_or_one = 1
else:
zero_or_one = 0
return (index, zero_or_one)
def baco_road_selection(roads_T, roads_E, alpha, beta, ant_num, feature_num):
road_map = np.zeros(ant_num*feature_num, dtype="int64").reshape(ant_num, feature_num)
pointer = | np.zeros(ant_num*feature_num, dtype="int64") | numpy.zeros |
"""Calculate reflected line profiles in the galactic center."""
import fractions
import matplotlib.pyplot as plt
import numpy as np
from astropy import constants as c
from astropy import units as u
from astropy.table import Table
from matplotlib import cm
from matplotlib.gridspec import GridSpec
from PyAstronomy.modelSuite import KeplerEllipseModel
from pylab import (arccos, axis, clf, copy, cos, exp, figure, hist, plot, rand,
savefig, scatter, show, sin, sqrt, subplot, transpose,
xlabel, ylabel)
from scipy.integrate import quad
plt.rcParams['text.usetex'] = True
plt.rcParams['text.latex.unicode'] = True
# unit conversion: (use astropy for this?)
radians = np.pi / 180. # deg to radians
meters = 1.0 / (((1.0 * u.meter).si.value / (
1.0 * u.lyr / 365.25).si.value)) # light days to meters
kg = c.M_sun.si.value # kg/solar mass
grav = c.G.si.value # m^3/kg/s^2 gravitational constant
eV = (1.0 * u.eV).si.value # electron volt
h = c.h.si.value # Planck's constant
kb = c.k_B.si.value # Boltzmann's constant
cc = c.c.si.value # Speed of light
day = 86400. # seconds in a day
year = 3.154 * 10**7. # seconds in a year
m_to_km = 1.0e-3 # meters to km
pc = (1.0 * u.pc).si.value / (1.0 * u.meter).si.value # pc to meters
gc_dist = 8.0e3 * pc # in light days
def ionizing_luminosity_fraction(temp, cutoff=13.6):
"""Calculate the total ionizing luminosity given a temp/cutoff energey."""
nulow = cutoff * eV / h
nuhi = 100 * nulow
value = (2.0 * h / cc ** 2) * quad(
lambda nu: nu ** 3 / (np.expm1(
h * nu / (kb * temp))), nulow, nuhi)[0] / (
2 * (np.pi * kb * temp) ** 4 / (15 * h ** 3 * cc ** 2))
return value
def find_nearest_idx(array, value):
"""Find nearest value."""
idx = (np.abs(array - value)).argmin()
return idx
def lcm(a, b):
"""Lowest common multiple."""
return int(np.round(abs(a * b) / fractions.gcd(a, b))) if a and b else 0
def get_cmap(n, name='hsv'):
"""Return a function that maps 0, 1, ..., n-1 to a distinct RGB color.
The keyword argument name must be a standard mpl colormap name.
"""
return plt.cm.get_cmap(name, n)
def star_position(kems, times):
"""Return the star positions for a given time."""
# load in the star position file here:
# calculate the position in cartesian coords:
# random positions in a 20x20x20 box = x, y, z
"""
num_stars = 10
box_size = 20.
positions = np.transpose(np.array([
rand(num_stars) * box_size - box_size / 2.,
rand(num_stars) * box_size - box_size / 2.,
rand(num_stars) * box_size - box_size / 2.])) * meters
"""
if not isinstance(times, list):
time = [times]
else:
time = times
time = np.array(time)
positions = np.array([
k.evaluate(time).tolist() for k in kems]) * 1.9e14
rot = [[0, -1], [1, 0]]
positions = np.array([np.matmul(rot, pos[:2]).tolist() +
[pos[2]] for pos in positions])
# rotated_positions = [rotation_transform(x) for x in positions]
# print positions
# print positions[:,0] # all the x-coords
# print positions[0,:] # x,y,z of the first star
return positions
def star_luminosity(star_data):
"""Return the star luminosities."""
# load in the star luminosity file here:
# Find the dimmest star in Habibi:
min_l = np.inf
for xi, x in enumerate(star_data):
if x[2] is not None and x[2]['log_l'] < min_l:
min_l = x[2]['log_l']
min_k = x[2]['k_magnitude']
min_t = x[2]['temperature']
min_l = 10.0 ** min_l
# luminosities in solar luminosities.
luminosities = [
(min_l * 10.0 ** ((float(x[1]['kmag']) - float(min_k)) / 2.5))
if x[2] is None else (10.0 ** x[2]['log_l']) for x in star_data]
temps = [min_t if x[2] is None else x[2]['temperature'] for x in star_data]
luminosities *= np.array(list(map(
lambda x: ionizing_luminosity_fraction(x), temps)))
return luminosities
def rotate(x, y, co, si):
"""Rotate x, y position given cos/sin of angle."""
xx = co * x + si * y
yy = -si * x + co * y
return [xx, yy]
def rotate_vecs(u, aa, bb, transpose=False):
"""Rotate vector u to the direction of vector v."""
rots = []
if isinstance(u[0], (int, float)):
ui = [u]
else:
ui = u
if transpose:
ui = list(map(list, zip(*ui)))
a = np.array(aa) / np.linalg.norm(aa)
b = np.array(bb) / np.linalg.norm(bb)
v = np.cross(a, b)
vx = np.array([
[0, -v[2], v[1]],
[v[2], 0, -v[0]],
[-v[1], v[0], 0]
])
c = np.dot(a, b)
theta = np.arccos(c)
rot = np.eye(3) + np.sin(theta) * vx + np.matmul(vx, vx) * (1.0 - c)
for uu in ui:
rots.append(np.matmul(rot, np.array(uu)))
if transpose:
rots = list(map(list, zip(*rots)))
return tuple(rots)
def gas_model(num_clouds, params, other_params, lambdaCen, plot_flag=True):
"""Retrieve the gas positions and velocities."""
[mu, F, beta, theta_o,
kappa, mbh, f_ellip, f_flow, theta_e] = params
[angular_sd_orbiting, radial_sd_orbiting,
angular_sd_flowing, radial_sd_flowing] = other_params
# Schwarzschild radius
Rs = 2. * grav * mbh / cc**2.
# First calculate the geometry of the emission:
r = mu * F + (1. - F) * mu * beta**2. * \
np.random.gamma(beta**(-2.), 1, num_clouds)
phi = 2. * np.pi * rand(num_clouds)
x = r * cos(phi)
y = r * sin(phi)
z = r * 0.
# *pow(u3[i], openingBendPower));
angle = arccos(cos(theta_o) + (1. - cos(theta_o)) * rand(num_clouds))
cos1 = cos(angle)
sin1 = sin(angle)
u1 = rand(num_clouds)
cos2 = cos(2. * np.pi * u1)
sin2 = sin(2. * np.pi * u1)
# rotate to puff up:
[x, z] = rotate(x, z, cos1, sin1)
# rotate to restore axisymmetry:
[x, y] = rotate(x, y, cos2, sin2)
# rotate to observer plane:
x, y, z = rotate_vecs([x, y, z], [0, 0, 1], disk_ang_mom, transpose=True)
# weights for the different points
# w = 0.5 + kappa * x / sqrt(x * x + y * y + z * z)
# w /= sum(w)
# if plot_flag:
# # larger points correspond to more emission from the point
# ptsize = 5
# shade = 0.5
# clf()
# subplot(2, 2, 1) # edge-on view 1, observer at +infinity of x-axis
# scatter(x / meters, y / meters, ptsize, alpha=shade)
# xlabel('x')
# ylabel('y')
# subplot(2, 2, 2) # edge-on view 2, observer at +infinity of x-axis
# scatter(x / meters, z / meters, ptsize, alpha=shade)
# xlabel('x')
# ylabel('z')
# subplot(2, 2, 3) # view of observer looking at plane of sky
# scatter(y / meters, z / meters, ptsize, alpha=shade)
# xlabel('y')
# ylabel('z')
# subplot(2, 2, 4) # plot the radial distribution of emission
# hist(r / meters, 100)
# xlabel("r")
# ylabel("p(r)")
# show()
# Now calculate velocities of the emitting gas:
radius1 = sqrt(2. * grav * mbh / r)
radius2 = sqrt(grav * mbh / r)
vr = copy(x) * 0.
vphi = copy(x) * 0.
u5 = rand(num_clouds)
n1 = np.random.normal(size=num_clouds)
n2 = np.random.normal(size=num_clouds)
for i in range(0, num_clouds):
if u5[i] < f_ellip:
# we give this point particle a near-circular orbit
theta = 0.5 * np.pi + angular_sd_orbiting * n1[i]
vr[i] = radius1[i] * cos(theta) * exp(radial_sd_orbiting * n2[i])
vphi[i] = radius2[i] * sin(theta) * exp(radial_sd_orbiting * n2[i])
else:
if f_flow < 0.5:
# we give this point particle an inflowing orbit
theta = np.pi - theta_e + angular_sd_flowing * n1[i]
vr[i] = radius1[i] * cos(theta) * \
exp(radial_sd_flowing * n2[i])
vphi[i] = radius2[i] * \
sin(theta) * exp(radial_sd_flowing * n2[i])
else:
# we give this point particle an outflowing orbit
theta = 0. + theta_e + angular_sd_flowing * n1[i]
vr[i] = radius1[i] * cos(theta) * \
exp(radial_sd_flowing * n2[i])
vphi[i] = radius2[i] * \
sin(theta) * exp(radial_sd_flowing * n2[i])
# Convert vr, vphi to Cartesians:
vx = vr * cos(phi) - vphi * sin(phi)
vy = vr * sin(phi) + vphi * cos(phi)
vz = vr * 0.
# apply rotations
vx, vy, vz = rotate_vecs(
[vx, vy, vz], [0, 0, 1], disk_ang_mom, transpose=True)
vx = np.array(vx)
vy = np.array(vy)
vz = np.array(vz)
# Sign of vz depends on whether disk is co- or counter-rotating.
# Positive vz corresponds to counter-clockwise rotation about the +y axis.
wavelength_values = relativity(vz, r, Rs, lambdaCen)
return [x, y, z, vx, vy, vz, wavelength_values]
def compute_gas_flux(gas_coords, star_data, times, params, bins, fig_name,
plot_flag=True):
"""Calculate the flux contribution from each point particle.
Assumptions: light travel time from stars to gas plus the
recombination time is shorter than the time it takes the
stars to move in their orbits.
"""
[stellar_wind_radius, kappa] = params
# gas_flux = np.zeros((np.size(gas_coords[0]), np.size(times)))
# load in the star luminosities (if they are constant)
star_luminosities = star_luminosity(star_data)
# loop over times we want spectra
star_pos_models = [x[0] for x in star_data]
num_stars = len(star_position(star_pos_models, times[0]))
gas_flux = np.zeros((np.size(gas_coords[0]), np.size(times)))
star_gas_flux = np.zeros(
(np.size(gas_coords[0]), np.size(times), num_stars))
for i in range(np.size(times)):
star_positions = star_position(star_pos_models, times[i])
gas_flux_values = np.zeros(
(np.size(gas_coords[0]), np.size(star_positions)))
# loop over the stars
for j in range(len(star_positions)):
r = sqrt((star_positions[j, 0] - gas_coords[0])**2. +
(star_positions[j, 1] - gas_coords[1])**2. +
(star_positions[j, 2] - gas_coords[2])**2.)
exclude = np.zeros(len(gas_coords[0]))
exclude[r >= stellar_wind_radius * meters] = 1.0
# weights for the different points
w = 0.5 + kappa * (gas_coords[0] - star_positions[j, 0]) / r
# w /= sum(w)
gas_flux_values[:, j] = w * exclude * \
star_luminosities[j] / (r * r)
star_gas_flux[:, i, j] = gas_flux_values[:, j]
gas_flux[:, i] = np.sum(gas_flux_values, axis=1)
[spectra, wavelength_bins] = make_spectrum(gas_coords, gas_flux, times,
bins, plot_flag=False)
# time2 = time.clock()
# print(time2, time2-time1)
# exit()
current_star_positions = star_position(star_pos_models, 2018.0)
current_star_distances = [
np.linalg.norm(x) for x in current_star_positions]
csd_js = np.argsort(current_star_distances)
star_colors = np.array([
cm.plasma(2.0 * float(j) / (len(star_data) - 1))
for j in range(len(star_data))])
# make a spectrum for each star
star_spectra = make_star_spectrum(gas_coords, star_gas_flux, times,
bins, num_stars, plot_flag=False)
# make a light curve (integrate over wavelength) for each star
# sorted by color scheme!
star_lightcurve = np.sum(star_spectra[:, :, csd_js], axis=1)
full_lightcurve = np.sum(spectra, axis=1)
# normalize
star_lightcurve /= np.max(full_lightcurve)
full_lightcurve /= np.max(full_lightcurve)
###################################################################
# set up the plot first
if plot_flag:
shade = 0.5
min_ptsize = 1.0
max_ptsize = 8.0
ssize = 5.0
boxsize = 2.0
fig = figure(figsize=(9, 9), facecolor='white')
grid_width = lcm(2, len(selected_times))
h_width = int(np.round(grid_width / len(selected_times)))
gs = GridSpec(4, grid_width)
# edge-on view 1, observer at +infinity of x-axis
axy = subplot(gs[:2, :3], autoscale_on=False, aspect='equal')
for si, star in enumerate(np.array(star_data)[csd_js]):
elpts = [star_position([
np.array(star_pos_models)[csd_js][si]], t)[
0] / meters for t in np.linspace(0, star[1]['period'],
400)]
plot([x[0] for x in elpts], [x[1] for x in elpts],
lw=0.5, c=star_colors[si])
sxy = scatter([0.0], [0.0], alpha=shade,
edgecolors='black', linewidths=0.5)
pxy = scatter([0.0], [0.0], c='r', s=ssize ** 2,
edgecolors='black', linewidths=0.5)
axis('equal')
xlabel('$x$')
ylabel('$y$')
# edge-on view 2, observer at +infinity of x-axis
axz = subplot(gs[:2, 3:], autoscale_on=False, aspect='equal')
for si, star in enumerate(np.array(star_data)[csd_js]):
elpts = [star_position([
np.array(star_pos_models)[csd_js][si]], t)[
0] / meters for t in np.linspace(0, star[1]['period'],
150)]
plot([x[0] for x in elpts], [x[2] for x in elpts],
lw=0.5, c=star_colors[si])
sxz = scatter([0.0], [0.0], alpha=shade,
edgecolors='black', linewidths=0.5)
pxz = scatter([0.0], [0.0], c='r', s=ssize ** 2,
edgecolors='black', linewidths=0.5)
axis('equal')
xlabel('$x$')
ylabel('$z$')
# view of observer looking at plane of sky
# ayz = subplot(2, 3, 3, autoscale_on=False, aspect='equal')
# for si, star in enumerate(np.array(star_data)[csd_js]):
# elpts = [star_position([
# np.array(star_pos_models)[csd_js][si]], t)[
# 0] / meters for t in np.linspace(0, star[1]['period'],
# 150)]
# plot([x[1] for x in elpts], [x[2] for x in elpts],
# lw=0.5, c=star_colors[si])
# syz = scatter([0.0], [0.0], alpha=shade,
# edgecolors='black', linewidths=0.5)
# pyz = scatter([0.0], [0.0], c='r', s=ssize ** 2,
# edgecolors='black', linewidths=0.5)
# axis('equal')
# xlabel('$y$')
# ylabel('$z$')
# avpl = subplot(2, 3, 4) # plot the vx vs. gas flux
# vpl = scatter([0.0], [0.0], alpha=shade, s=min_ptsize,
# edgecolors='black', linewidths=0.5)
# xlabel("$v_x \\,\\,\\, {\\rm (10,000 km/s)}$")
# ylabel("$\\rm Gas \\,\\,\\, Flux \\,\\,\\, (normalized)$")
ahpl = subplot(gs[2, :]) # light curve of star fluxes
plot(times, | np.log10(full_lightcurve) | numpy.log10 |
"""
VizWiz data loader for OpenVQA
Written by <NAME>
"""
import os
import numpy as np
import glob, json, re, en_vectors_web_lg
from openvqa.core.base_dataset import BaseDataSet
from openvqa.utils.ans_punct import prep_ans
class DataSet(BaseDataSet):
def __init__(self, __C):
super(DataSet, self).__init__()
self.__C = __C
# --------------------------
# ---- Raw data loading ----
# --------------------------
# Loading all image paths
frcn_feat_path_list = \
glob.glob(__C.FEATS_PATH[__C.DATASET]['train'] + '/*.npz') + \
glob.glob(__C.FEATS_PATH[__C.DATASET]['val'] + '/*.npz') + \
glob.glob(__C.FEATS_PATH[__C.DATASET]['test'] + '/*.npz')
# Loading question word list
# For VizWiz, assume that annotation files are located under raw_path
# folders for train, val, and test sets, respectively.
stat_ques_list = \
json.load(open(__C.RAW_PATH[__C.DATASET]['train'], 'r')) + \
json.load(open(__C.RAW_PATH[__C.DATASET]['val'], 'r')) + \
json.load(open(__C.RAW_PATH[__C.DATASET]['test'], 'r'))
# Loading answer word list
# Each of VizWiz annotation files include both questions and answers.
stat_ans_list = \
json.load(open(__C.RAW_PATH[__C.DATASET]['train'], 'r')) + \
json.load(open(__C.RAW_PATH[__C.DATASET]['val'], 'r'))
# Loading question and answer list
self.ques_list = []
self.ans_list = []
split_list = __C.SPLIT[__C.RUN_MODE].split('+')
for split in split_list:
self.ques_list += json.load(open(__C.RAW_PATH[__C.DATASET][split], 'r'))
if __C.RUN_MODE in ['train']:
self.ans_list += json.load(open(__C.RAW_PATH[__C.DATASET][split], 'r'))
# Define run data size
if __C.RUN_MODE in ['train']:
self.data_size = self.ans_list.__len__()
else:
self.data_size = self.ques_list.__len__()
print(' ========== Dataset size:', self.data_size)
# ------------------------
# ---- Data statistic ----
# ------------------------
# {image id} -> {image feature absolutely path}
# In VizWiz, image id = image filename (without extension)
self.iid_to_frcn_feat_path = self.img_feat_path_load(frcn_feat_path_list)
# {question id} -> {question}
# In VizWiz, question id = image id
self.qid_to_ques = self.ques_load(self.ques_list)
# Tokenize
self.token_to_ix, self.pretrained_emb = self.tokenize(stat_ques_list, __C.USE_GLOVE)
self.token_size = self.token_to_ix.__len__()
print(' ========== Question token vocab size:', self.token_size)
# Answers statistic
# self.ans_to_ix, self.ix_to_ans = self.ans_stat('openvqa/datasets/vqa/answer_dict.json')
# TODO: what value should we use for "ans_freq" in the function below?
self.ans_to_ix, self.ix_to_ans = self.ans_stat(stat_ans_list, ans_freq=8)
self.ans_size = self.ans_to_ix.__len__()
print(' ========== Answer token vocab size (occur more than {} times):'.format(8), self.ans_size)
print('Finished!')
print('')
def img_feat_path_load(self, path_list):
iid_to_path = {}
for ix, path in enumerate(path_list):
# filename without extension is iid in VizWiz
iid = os.path.basename(path).split('.')[0]
# iid = str(int(path.split('/')[-1].split('_')[-1].split('.')[0]))
# print(iid)
iid_to_path[iid] = path
return iid_to_path
def ques_load(self, ques_list):
qid_to_ques = {}
for each in ques_list:
# filename without extension is qid in VizWiz
qid = each['image'].split('.')[0]
# ques = each['question']
# print(qid, ques)
# assign 'each' here to each qid for compability. Some functions
# later try loading a question from each using a key 'question'.
qid_to_ques[qid] = each
return qid_to_ques
def tokenize(self, stat_ques_list, use_glove):
token_to_ix = {
'PAD': 0,
'UNK': 1,
'CLS': 2,
}
spacy_tool = None
pretrained_emb = []
if use_glove:
spacy_tool = en_vectors_web_lg.load()
pretrained_emb.append(spacy_tool('PAD').vector)
pretrained_emb.append(spacy_tool('UNK').vector)
pretrained_emb.append(spacy_tool('CLS').vector)
for ques in stat_ques_list:
words = re.sub(
r"([.,'!?\"()*#:;])",
'',
ques['question'].lower()
).replace('-', ' ').replace('/', ' ').split()
for word in words:
if word not in token_to_ix:
token_to_ix[word] = len(token_to_ix)
if use_glove:
pretrained_emb.append(spacy_tool(word).vector)
pretrained_emb = np.array(pretrained_emb)
return token_to_ix, pretrained_emb
def ans_stat(self, stat_ans_list, ans_freq):
ans_to_ix = {}
ix_to_ans = {}
ans_freq_dict = {}
for ans in stat_ans_list:
# VizWiz does not have "multiple_choice_answer" annotation.
# TODO: what would be the right behavior for VizWiz then?
# ans_proc = prep_ans(ans['multiple_choice_answer'])
for each in ans['answers']:
ans_proc = prep_ans(each['answer'])
if ans_proc not in ans_freq_dict:
ans_freq_dict[ans_proc] = 1
else:
ans_freq_dict[ans_proc] += 1
ans_freq_filter = ans_freq_dict.copy()
for ans in ans_freq_dict:
if ans_freq_dict[ans] <= ans_freq:
ans_freq_filter.pop(ans)
for ans in ans_freq_filter:
ix_to_ans[ans_to_ix.__len__()] = ans
ans_to_ix[ans] = ans_to_ix.__len__()
return ans_to_ix, ix_to_ans
# def ans_stat(self, json_file):
# ans_to_ix, ix_to_ans = json.load(open(json_file, 'r'))
# return ans_to_ix, ix_to_ans
# ----------------------------------------------
# ---- Real-Time Processing Implementations ----
# ----------------------------------------------
def load_ques_ans(self, idx):
if self.__C.RUN_MODE in ['train']:
ans = self.ans_list[idx]
# ans is a dictionary following the structure below:
# {
# "image": (str),
# "question": (str),
# "answers": [{
# "answer": (str),
# "answer_confidence": (str)
# }, ...],
# "answer_type": (str),
# "answerable": (int)
# }
iid = ans["image"].split(".")[0]
# qid is iid in VizWiz
ques = self.qid_to_ques[iid]
# Process question
ques_ix_iter = self.proc_ques(ques, self.token_to_ix, max_token=14)
# Process answer
ans_iter = self.proc_ans(ans, self.ans_to_ix)
return ques_ix_iter, ans_iter, iid
else:
ques = self.ques_list[idx]
# ques is a dictionary following the structure below:
# {
# "image": (str),
# "question": (str),
# "answers": [{
# "answer": (str),
# "answer_confidence": (str)
# }, ...],
# "answer_type": (str),
# "answerable": (int)
# }
iid = ques["image"].split(".")[0]
ques_ix_iter = self.proc_ques(ques, self.token_to_ix, max_token=14)
return ques_ix_iter, np.zeros(1), iid
def load_img_feats(self, idx, iid):
frcn_feat = np.load(self.iid_to_frcn_feat_path[iid])
frcn_feat_x = frcn_feat['x'].transpose((1, 0))
frcn_feat_iter = self.proc_img_feat(frcn_feat_x, img_feat_pad_size=self.__C.FEAT_SIZE['vqa']['FRCN_FEAT_SIZE'][0])
bbox_feat_iter = self.proc_img_feat(
self.proc_bbox_feat(
frcn_feat['bbox'],
(frcn_feat['image_h'], frcn_feat['image_w'])
),
img_feat_pad_size=self.__C.FEAT_SIZE['vqa']['BBOX_FEAT_SIZE'][0]
)
grid_feat_iter = np.zeros(1)
return frcn_feat_iter, grid_feat_iter, bbox_feat_iter
# ------------------------------------
# ---- Real-Time Processing Utils ----
# ------------------------------------
def proc_img_feat(self, img_feat, img_feat_pad_size):
if img_feat.shape[0] > img_feat_pad_size:
img_feat = img_feat[:img_feat_pad_size]
img_feat = np.pad(
img_feat,
((0, img_feat_pad_size - img_feat.shape[0]), (0, 0)),
mode='constant',
constant_values=0
)
return img_feat
def proc_bbox_feat(self, bbox, img_shape):
if self.__C.BBOX_NORMALIZE:
bbox_nm = | np.zeros((bbox.shape[0], 4), dtype=np.float32) | numpy.zeros |
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 5 18:56:30 2018
@author: <NAME>
"""
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
import numpy as np
from sklearn.decomposition import PCA
from random import seed
from random import random
from random import gauss
import random
import copy
from scipy import stats
def data_rand(N,M,sigma,Groups=2):
# create the data container
data_rand = []
Labels = []
# seed random number generator
# generate random numbers between 0-1
for _ in range(M):
mean_random = random.randint(50,150)#create one mean value
v = []#create the sample points for each variable
for k in range(N):
v.append(gauss(mean_random, random.randint(sigma,2*sigma)))
data_rand.append(v)
for _ in range(N):
Labels.append(random.randint(0,Groups-1))
return data_rand,Labels
def add_signifficance(data,Labels,Groups,averageSig,sigma,sigvars):
sig = []
for j in Groups:
if j>0:
for v in sigvars:
k = random.randint(averageSig-2*sigma,averageSig+2*sigma) + gauss(0, random.randint(sigma,2*sigma))
sig.append(k)
data[Labels==j,v] = data[Labels==j,v] + k
return data,sig
def JSDe(X,Y,w,k):
#project the data to k
N,M = | np.shape(X) | numpy.shape |
import numpy as np
def to_column_matrix(arr_or_mat):
if len(arr_or_mat.shape) == 1:
arr_or_mat.shape = [len(arr_or_mat), 1]
elif | np.shape(arr_or_mat) | numpy.shape |
import numpy as np
import matplotlib.pyplot as plt
from scipy.spatial.distance import cdist
import sys
from plot_area import plot_area
COLOR = ['tab:blue', 'tab:orange', 'tab:green']
class BaseClassifier:
d = -1
c = -1
def __init__(self, d):
super().__init__()
if (d <= 0):
raise RuntimeError('Classifier.D/C cannot be negative or zero')
self.d = d
def evaluate(self, feature):
if (np.shape(feature)[1] != self.d):
raise RuntimeError('Size of feature does not fit')
if (self.c < 0):
raise RuntimeError('Evaluate with an untrained classifier')
return 0
def train(self, data):
return self
class AugmentedSpaceClassifier(BaseClassifier):
training_features = np.array([])
training_classifications = np.array([])
classifications = np.array([])
weights_group = | np.array([]) | numpy.array |
#!/usr/bin/python
import os, sys
import json
import numpy as np
import re
from collections import Counter
import itertools
import collections
from sklearn.metrics import accuracy_score
'''
Student Names: <NAME>, <NAME>
Student IDs: 21239691, 21240858
GitHub repositories : https://github.com/vishwasmj/ARC_2
https://github.com/AngithaM/ARC-1
*******************************************************************
Summary/reflection
Similarities:
1. Most of the tasks have input and output array of the same shape or easily determinable shape. This means one can be transformed into another or we can make modifications on a shape to get the final output.
2. There were a lot of common tasks such as identifying shapes, copying parts of a pattern, flipping, finding the position of the non-background squares.
3. Matching and extracting patterns are also typical, and so is identifying the background when it is non-black.
4. Group the colors, and it’s corresponding positions to manipulate them as per the task.
5. Some cases involved finding boundaries of various shapes, including asymmetric shapes, which are often more challenging than others.
6. Also, we look for specific shapes like squares and transform them as per the task
7. All the tasks were present in the form of Numpy arrays as inputs. Most patterns involve symmetrical elements like quadrilaterals, diagonals, crosses, and other designs
Differences:
1. While the input was a NumPy array, not all were squares, and the shape changes with in the same task at times.
2. The output and the input were not of the same dimension, and hence, not all the results can be obtained by manipulating the input.
3. Within the same task, different patterns need to be considered separately to solve it. While some patterns can be solved by locating their centers and sizes, others within the same pattern might require additional manipulation.
4. In some rare cases, backtracking might also be required when the next move is ambiguous.
Libraries used: For most of the problems, array manipulation was the most important and common task and numpy libraries were extremely helpful. Various numpy methods were used to match patterns, identify locations of points and so on. Aside from this, some of the normal python libraries were used like collections itertools and counters.
************************************************************************************************************
Summary/reflection
All the similarities and differences are indications of “general fluid intelligence” that the ARC is trying to determine using several geometrical and topological aspects that humans can solve easily. <NAME>, in his paper on ARC, defines intelligence as the rate at which a learner turns its experience and priors into new skills at valuable tasks that involve uncertainty and adaptation. Here, intelligent machines should identify the intent behind the patterns and start to move towards the solution in a generalized manner with a small number of train cases.
Manually coding these helps us identify what can be generalized and how we can approach each of the problems in a similar way. This is the first step to actually solving it.
Of course, this is not in scope at the moment. But using a simple case of a neural network in the last example, we're showing that it is possible to solve tasks generally even with so few training data. Neural networks are data hungry and
as expected do not give good outputs.
In theory can even use Genetic algorithms to find the solution. This might be better suited to problem with inputs being transformed to outputs as the neighbour function here would make a small step in the right direction from the input until it reaches the output. At the very least, we might need the output shape so as to determine our decision variables. The objective function in this case will be to minimize the difference between the input array and the output array. Of course, while this might seem like a good idea, it might not necessarily work with the test data.
"""
### YOUR CODE HERE: write at least three functions which solve
### specific tasks by transforming the input x and returning the
### result. Name them according to the task ID as in the three
### examples below. Delete the three examples. The tasks you choose
### must be in the data/training directory, not data/evaluation.
'''
# ---------------------------------------------------solve_83302e8f start-----------------------------------------
def solve_83302e8f(x):
"""
Difficulty: High
The Problem: There is a square grid containing squares. With in these squares there are smaller
equally sized squares. These squares are surrounded by a single square boundaries of various colours
The task is to colour the squares which are completely surrounded by boundaries in green and the
others in yellow.
Assumptions: The Grid is always square. The smaller ones are also squares.
The Approach: Identify the size of the grid
Identify the size of the inner square
Identify the boundary colour
Then for each inner square along with boundary, check if the number of black coloured or
non-yellow coloured squares are equal to (size of the inner square)^2.
If so, we colour it green, else yellow.
Note: All test cases passed
Arguments: x, the nd array representing the square grid
return: x, the resultant array with the transformations applied.
"""
X_1 = x.copy() # just duplicating x
grid_len, grid_width = X_1.shape # Getting the grid size
# make sure its a square
assert grid_len == grid_width, "Grid not square, can't proceed any further"
# look for the size of the square inside
inner_sq_found = -1
# length of the square is found by traversing diagonally until a non-black square is encountered
# Here, it is observed that the pattern never has a missing corner. This logic will fail in case, this is not true
i = j = 0
while (inner_sq_found == -1 and i < grid_len):
if X_1[i][j] == 0:
i = j = i + 1
else:
size_square = i
bound_colour = X_1[i][j]
inner_sq_found = 0
assert inner_sq_found != -1, "Pattern not square, can't proceed any further"
# checking each sub grid square to see if there is more than size_square*2 black squares in the grid
# +1 is added to include the boundaries as well when you consider square size.
for i in range(0, grid_len, size_square + 1):
for j in range(0, grid_len, size_square + 1):
X_1 = colour_row(X_1, i, j, size_square, grid_len, bound_colour)
x = X_1
return x
def colour_row(x, r_i, r_j, size_square, grid_len, bound_colour):
"""
This function takes in the below arguments and takes each subset of the grid at a time including the
boundaries in all directions. It then evaluates how many black squares are present and if there are any
yellow filled squares in the boundary indicating a break in the square. If the number of squares are
as expected, then colours it green else yellow
Arguments:
x : The grid modified so far
r_i : starting point of the row
r_j : starting point of the column
size_square : The size of the smaller square to be coloured
grid_len : total Grid length
bound_colour : Boundary colour
Return: x, the modified array
"""
# counting black squares in a subset. initialize to 0
count_black = 0
# expected square count for a fully filled case.
count_square = size_square * size_square
# to hold all the black within the range
dict_black = {}
#
yellow_filled = 0
# variables set to control the range
# the last column and row will have no boundary on the right or botton respectively
# so handling the boundary conditions
if r_i + size_square + 1 < grid_len:
r_i_x = r_i + size_square + 1
# to include the already considered boundary for the next square
if r_i != 0:
r_i = r_i - 1
# if last row, then change it to length of the grid
else:
r_i_x = grid_len
# to include the already considered boundary for the next square
r_i = r_i - 1
if r_j + size_square < grid_len:
r_j_x = r_j + size_square + 1
if r_j != 0:
r_j = r_j - 1
else:
r_j_x = grid_len
r_j = r_j - 1
# consider each subgrid of rows and columns, including all the boundaries
for i in range(r_i, r_i_x):
for j in range(r_j, r_j_x):
# if black, count it
if x[i][j] == 0:
count_black = count_black + 1
# so that we can change the colour of it later
if x[i][j] in dict_black:
dict_black[x[i][j]].append((i, j))
else:
dict_black[x[i][j]] = [(i, j)]
# if there is any non black non-background(boundary) colour, then it's part of the broken square
# hence, filled yellow
elif x[i][j] != bound_colour:
yellow_filled = 1
# if there are exactly length of square*2 squares and no boundary is filled already, then
# it's a perfect square and is hence coloured green
if count_black == count_square and yellow_filled == 0:
for i, j in dict_black[0]:
x[i][j] = 3
# else it is coloured yellow
else:
for i, j in dict_black[0]:
x[i][j] = 4
return x
# ---------------------------------------------------solve_83302e8f end-----------------------------------------
# ---------------------------------------------------solve_c8cbb738 start---------------------------------------
def solve_c8cbb738(x):
"""
Difficulty: High
The problem: We have a space with a background colour and several squares of different colours arranged in
various patterns. The patterns can either be a square, cross or rectangle. All of them will have to be arranged
into a square in such a way that all their centers are alligned.
Note:Rectangle 2 is longer in the vertical direction and rectangle 1 is longer in the horizontal direction
Assumptions: Only the three shapes and the rectangle shape in two different forms will be present in the
input pattern.
Testing:All test cases passed
Approach:Step 1: Identify the background colour.
Step 2: Create a dictionary with non background colours and their positions
Step 3:Using this Dictionary, identify the shapes and their size
Step 4:Create a new Matrix based on the shape, and position the various shapes in it.
Argument: x, the n-d array representing the space
return: x, after the above transformation is done.
"""
# Step 1: Find the background colour or the most common colour
list = x.tolist()
# looks for a row that contains only one value and assigns that as the background value.
# This is used to avaoid ambiguity in case too many shapes are present and we can't tell the backgroung colour
# by any one row.
for i in range(len(list)):
# checks if the row has only one colour present
if (len(Counter(list[i]).most_common()) == 1):
# if only one colour, that is assumed to be the background colour and
c = Counter(list[i]).most_common()
# assigned values
background_colour = c[0][0]
# once background values is found, no further iteration required.
break
# Step 2: find other colours and their shapes
a_dict = {}
# find the location of all the non-background colour values and add them to a dictionary
# with it's position and colour
for i in range(len(list)):
for j in range(len(list[i])):
if list[i][j] != background_colour:
if list[i][j] in a_dict:
a_dict[list[i][j]].append((i, j))
else:
a_dict[list[i][j]] = [(i, j)]
# Step3: create new square using this dictionary value.
square = get_outputMatrix(a_dict, background_colour)
# changing the float values to int
x = square.astype(int)
return x
def get_outputMatrix(a_dict, background_colour):
"""
The major task here is to find the shapes of each of the items with different colours.
We assume that it can be any one of the three shapes:- square,cross and rectangle(two variations)
First differenciate the squares and rectangles from the cross by checking for 2 values present on the
same row and same column. Then we check the length and breadth to tell apart the sqaure and rectangles.
Once we seperate everything, we plot it.
Arguments: a_dict-The dictionary with all the positions different shapes by their colour as key
background_colour- background colour of the initial matrix
Return:The new matrix with the centres alligned
"""
# initially we take that none of the shapes are present.
square_colour = rect_1_col = rect_2_col = cross_col = 0
# num of shapes present in the fig
size_dict = len(a_dict.keys())
for key in a_dict:
# all the points for a particular shape
list1 = a_dict[key]
row1 = 0
col1 = 0
# -1 as we're accessing elements using i+1
for i in range(len(list1) - 1):
# checking for squares on the same row
if (list1[i][0] == list1[i + 1][0]):
row1 = row1 + 1
# checking for squares on the same column
# comparing 1st and 3rd and 2nd and 4th elements
if (i == 0 or i == 1):
if (list1[i][1] == list1[i + 2][1]):
col1 = col1 + 1
# two pair of items on the same row and along the same column
if (row1 == col1) and (col1 == 2):
# finding length and breadth
length = list1[1][1] - list1[0][1]
breadth = list1[2][0] - list1[0][0]
# condition for square
if length == breadth:
square_colour = key
size_sq = abs(length)
# rectangle that's longer than broader
elif length > breadth:
rect_1_col = key
# rectangle that's broader than longer
else:
rect_2_col = key
# if it doesnt have 2 pairs in the same row and column, it's a cross
else:
cross_col = key
# create a new matrix that can hold the aligned centers
square = np.zeros((size_sq + 1, size_sq + 1))
# change the background colour
square = np.where(square == 0, background_colour, square)
i = 0
j = 0
# if square present, plot the square positions
if square_colour > 0:
square[i][j] = square_colour
square[i + size_sq][j] = square_colour
square[i][j + size_sq] = square_colour
square[i + size_sq][j + size_sq] = square_colour
# if cross present, plot the cross positions
if cross_col > 0:
square[int(size_sq / 2)][0] = cross_col
square[0][int(size_sq / 2)] = cross_col
square[size_sq][int(size_sq / 2)] = cross_col
square[int(size_sq / 2)][size_sq] = cross_col
# if rectangle 2 present, plot it's positions
if rect_2_col > 0:
square[size_sq][int(size_sq / 2) + 1] = rect_2_col
square[size_sq][int(size_sq / 2) - 1] = rect_2_col
square[0][int(size_sq / 2) + 1] = rect_2_col
square[0][int(size_sq / 2) - 1] = rect_2_col
# if rectangle 1 present, plot it's positions
if rect_1_col > 0:
square[int(size_sq / 2) - 1][0] = rect_1_col
square[int(size_sq / 2) + 1][0] = rect_1_col
square[int(size_sq / 2) - 1][size_sq] = rect_1_col
square[int(size_sq / 2) + 1][size_sq] = rect_1_col
# return the transformed square
return square
# ---------------------------------------------------solve_c8cbb738 end---------------------------------------
# ---------------------------------------------------solve_f35d900a start-------------------------------------
def solve_f35d900a(x):
"""
Difficulty: High
The Problem: The input grid is a rectangular (list of lists) matrix with variable shape, with numbers ranging
from 0 to 9. (inclusive). Different colors of the color spectrum are represented by the integers.
The task is to identify the different colours and their respective positions in the input grid,
create a sqaure matrix around each coloured element such that the colour of the sqaure matrix should
be of the colour of the element present in its sequential position. Then create a horizontal and
vertical connection amongst all the square matrices, the connection is a step by step increment
from middle element of each square matrix towards each other at the same time.
Assumptions: The colour for connecting two sqaure matrix is always 5 (Silver).
The Approach: Identify the lists where there is a coloured element.
Idenitfy the coloured elemets index, row and column values.
Create square matrix around each coloured element.
Identify the index of zero elements between two non zero elements.
Create hoorizontal connection, Colour code the zero elements connecting the two coloured matrix on the row.
Create Vertical connection, Colour code the zero elements connecting the two coloured matrix on the columns.
Fill the created grid with diagonal elements.
Note: All test cases passed
Arguments: x, the nd array representing the input grid
return: x, the resultant array with the transformations applied.
"""
# Create a copy of input array
ip_1 = x.copy()
# COnvert array to a rectangular grid
ip_1 = ip_1.tolist()
# Find the position of coloured elements in the inpput grid
pos_input = list(position_of_ele_in_ip(sublist) for sublist in ip_1)
# Identifying the elements position ,column value holding the element and sublists where the elements are present
pos_index, ele_present_index, ele_present_col_num = identifying_col_pos_ele(ip_1)
# Colours of the elements
colour_codes = get_colour_codes(ip_1)
# New grid having square matrix around the coloured elements
ip_1 = create_shape_for_ip_ele(pos_index, colour_codes, ip_1)
# Remove duplicate rows where element is present in the grid
ele_present_index = list(dict.fromkeys(ele_present_index))
# Creating horizontal connections
ip_1 = create_horizontal_connections(ip_1, ele_present_index)
# Remove duplicate columns where element is present in the grid
ele_present_col_num = list(dict.fromkeys(ele_present_col_num))
# Create horizontal connections
x = create_vertical_connections(ele_present_col_num, ip_1)
return np.array(x)
# Identify the positions of the different colours present in the input grid
def position_of_ele_in_ip(sublist_seq):
"""
The major task here is to iterate through each input list and store the position index of different colours.
Arguments: sublist_seq- A sublist of input grid.
Return:Position of coloured elements present in each sublist
"""
# list to store the index of the element
pos_of_ele = []
# Iterate through sublists of the input
for index, val in enumerate(sublist_seq):
# If the value present in the index is greater than 0
if val > 0:
# Store the index
pos_of_ele.append(index)
return pos_of_ele
def identifying_col_pos_ele(ip_1):
"""
The task of this function is to identify the coloured elements position ,column value holding the element and sublists where the elements are present
Arguments: sublist_seq- A sublist of input grid.
Return:Position of coloured elements present in each sublist, Value of the element and column number of the element present
"""
# Row and column in which the colours are present
pos_index = []
# Rows where the colours are present
ele_present_index = []
# Column index where the colours are present
ele_present_columns = []
# Iterate over each sublist to find the elements
for index, sublist in enumerate(ip_1):
for n, k in enumerate(sublist):
if k > 0:
a = [index, n]
pos_index.append(a)
ele_present_index.append(index)
ele_present_columns.append(n)
return pos_index, ele_present_index, ele_present_columns
# Idenitfy different colours present in the input grip
def get_colour_codes(ip_1):
"""
The task of this function is to identify different type of colours of the elements present in the input grid
Arguments: sublist_seq- A sublist of input grid.
Return:Colour code value of each coloured element
"""
# Store colour codes
colour_codes = []
for i in range(len(ip_1)):
# check for any value in the row that has value other than 0
if any(list(ip_1[i])) != 0:
# index of the first colour present in the sublist
first_colour_indx = np.min(np.nonzero(ip_1[i]))
# get the colour code of first colour based on its index
first_colour = ip_1[i][first_colour_indx]
# index of the first colour present in the sublist
second_colour_indx = np.max(np.nonzero(ip_1[i]))
# get the colour code of second colour based on its index
second_colour = ip_1[i][second_colour_indx]
colour_codes.append(second_colour)
colour_codes.append(first_colour)
return colour_codes
def create_shape_for_ip_ele(pos_index, colour_codes, ip_1):
"""
The task of this function is to identify the boundaries of a sqaure matrix around the given position of the coloured element
and find the position values of boundaries and fill them with a colour code of the next sequential coloured element of the list.
Fill the square matrix position for all the coloured elements.
Arguments: ip_1- Entire manipulated input grid.
pos_index - Which gives us the row and position where the coloured element is present
colour_codes - Colour code of the next coloured element
Return: Manipulated input grid containing coloured square matrix with current coloured element in its centre position
"""
# Iterate over element present index and colour codes
for i, j in zip(pos_index, colour_codes):
# Row of the coloured element
a = i[0]
# Column of the coloured element
b = i[1]
# Previous row of the coloured element
a1 = a - 1
# succeeding row of the coloured element
a2 = a + 1
# previous column position index
b1 = b - 1
# succeeding column position index
b2 = b + 1
# Manipulating positions
# Filling the identified positions of the sqaure matrix values with colour codes
# Fill horizontal position
ip_1[a][b1] = j
ip_1[a][b2] = j
# Fill the previous row positions
ip_1[a1][b] = j
ip_1[a1][b1] = j
ip_1[a1][b2] = j
# Fill next row positions
ip_1[a2][b] = j
ip_1[a2][b1] = j
ip_1[a2][b2] = j
return ip_1
#
def find_enclosed_zeroes(lst):
"""
The task of this function is to find the number of enclosed zeros between two non-zero elements of a list.
We first identify the first non-zero element of the list and then find the last position value of the non-zero value.
Once we get a list with zero value present between two non zero values, we idenitfy the index value of the elements which will
help us determine the link between two lists in the grid.
Arguments: lst- sublist of the input grid.
Return: Index of the zero elements presents between two non zero values and the position of the first non-zero value.
"""
# Identify First non zero and last non zero element in the list
try:
first_nonzero = next(
i
for (i, e) in enumerate(lst)
if e != 0
)
last_nonzero = next(
len(lst) - i - 1
for (i, e) in enumerate(reversed(lst))
if e != 0
)
except StopIteration:
return lst[:]
# Include the element present in the last non-zero position
last_nonzero_pos = last_nonzero + 1
first_nonzero_pos = first_nonzero
# Find the index of the elements holding 0 values between two non-zero elements
idx_list = [idx for idx, val in enumerate(lst[:first_nonzero_pos] + lst[first_nonzero_pos:last_nonzero_pos]) if
val == 0]
return idx_list, first_nonzero_pos
# Create a horizontal connection between the created square matrices
def create_horizontal_connections(ip_1, ele_present_index):
"""
The task of this function is to create horizontal connections between two square matrices present in the same row.
We first identify the position of the zero values between two non zero values, then divide the list containiing the position
of non zero elements into two parts. We then reverse the second list and find the alternate position index of zeros which needs
to be filled in order to make connections. We then take the positions which needs to colour coded and then fill the index values
of the grid with silver colour code(5) to make connections.
Arguments: ip_1- Manipulated input grid.
ele_present_index - Rows and column value where coloured elements are present
Return: manipulated input grid containing horizontal connection between the sequential sqaure matrices
"""
for i in ele_present_index:
# Get the index values of the zeros present between two non zero elements
lst_containing_zero_ele_index, first_non_zero_pos = find_enclosed_zeroes(ip_1[i])
# remove/ keep only zeros present after the first non zero element
pos_list_clean = []
for k in lst_containing_zero_ele_index:
if k > first_non_zero_pos:
pos_list_clean.append(k)
# Breaking the zero element positions into two seperate lists
first_half_lst = pos_list_clean[:len(pos_list_clean) // 2]
second_half_lst = pos_list_clean[len(pos_list_clean) // 2:]
## Identifying the alternate positions of zero element that has to be coloured from first list
ele_to_colour_from_first_lst = first_half_lst[::2]
# Reversing the second half of the list
# As we need to increment from two ends of the matrix
new_second_half_lst = second_half_lst[::-1]
# Identifying the alternate positions of zero element that has to be coloured from second list
ele_to_colour_from_second_list = new_second_half_lst[::2]
# Colour code the connecting elements
for j in ele_to_colour_from_first_lst:
ip_1[i][j] = 5
for k in ele_to_colour_from_second_list:
ip_1[i][k] = 5
return ip_1
# Create a vertical connection between the created square matrices
def create_vertical_connections(ele_present_columns, ip_1):
"""
The task of this function is to create verticalconnections between two square matrices present in the same column.
We first identify the position of the zero values between two non zero values, then divide the list containiing the position
of non zero elements into two parts. We then reverse the second list and find the alternate position index of zeros which needs
to be filled in order to make connections. We then take the positions which needs to colour coded and then fill the index values
of the grid with silver colour code(5) to make connections.
Arguments: ip_1- Manipulated input grid.
ele_present_index - Rows and column value where coloured elements are present
Return: manipulated input grid containing vertical connection between the sqaure matrices.
"""
# iterate over columns of the coloured elements
for j in ele_present_columns:
# Get column values of coloured element
col_list = []
for i in range(len(ip_1)):
col_list.append(ip_1[i][j])
# Get the index values of the zeros present between two non zero elements
lst_containing_zero_ele_index, first_non_zero_pos = find_enclosed_zeroes(col_list)
# remove/ keep only zeros present after the first non zero element
pos_list_clean = []
for k in lst_containing_zero_ele_index:
if k > first_non_zero_pos:
pos_list_clean.append(k)
first_half_lst = pos_list_clean[:len(pos_list_clean) // 2]
second_half_lst = pos_list_clean[len(pos_list_clean) // 2:]
# Identifying the alternate positions of zero element that has to be coloured from first list
ele_to_colour_from_first_lst = first_half_lst[::2]
# Reversing the second half of the list
# As we need to increment from two ends of the matrix
new_second_half_lst = second_half_lst[::-1]
# Identifying the alternate positions of zero element that has to be coloured from second list
ele_to_colour_from_second_list = new_second_half_lst[::2]
# Colour code the connecting elements from first element
for n in ele_to_colour_from_first_lst:
ip_1[n][j] = 5
# Colour code the connecting elements from the second element
for m in ele_to_colour_from_second_list:
ip_1[m][j] = 5
return ip_1
# ---------------------------------------------------solve_f35d900a end------------------------------------------------
# ---------------------------------------------------solve_ded97339 start-------------------------------------
def solve_ded97339(x):
"""
Difficulty- medium to high
Problem description: Stars in the night sky! A grid of black squares represents the sky and the tiny blue
squares, the stars. The task is to find the constellations hidden in the sky and connect them.
How do we do this? We need to identify the ones that belong and a constellation and ones that do not.
On observation, we can see that there is a simple common rule. All stars belonging to a constellation are
on the same row or column.
All the other starts are loners.
Assumptions: There are no other colours on the grid besides black and blue.
Solution: First find the non-black squres. Then find the ones that are on the same row or column. Connect the ones in
the same row, then connect the ones in the same column.
Arguments: x the nd array
return: x the transformed array
"""
start_pos = []
# find all the non-black squares
row, column = np.where(x > 0)
# get a list of their co-ordinates
for r, c in zip(row, column):
start_pos.append((r, c))
if len(row) != len(set(row)) and len(column) != len(set(column)):
# checking for perpendicular elements by checking for items on the same row
b = [item for item, count in collections.Counter(row).items() if count > 1]
# checking for points on the same column and creating a list
c = [item for item, count in collections.Counter(column).items() if count > 1]
row_list = []
col_list = []
# finding all the items perpendicular along the row and columns seperately
for (k, h) in start_pos:
if k in b:
row_list.append((k, h))
if h in c:
col_list.append((k, h))
# find start and end of all the elements in the same row and fill up
for i in range(len(row_list) - 1):
if row_list[i][0] == row_list[i + 1][0]:
a = row_list[i]
b = row_list[i + 1]
for i in range(a[1], b[1]):
x[a[0], i] = 8
# find start and end of all the elements in the same column and fill up
for a, b in itertools.product(col_list, col_list):
if a != b and a[0] < b[0] and a[1] == b[1]:
for i in range(a[0], b[0]):
x[i, a[1]] = 8
return x
# ------------------------------------------------------solve_ded97339- end------------------------------------------
# ---------------------------------------------------solve_d0f5fe59 start-----------------------------------------------
def solve_d0f5fe59(x):
"""
Difficulty: Medium-to-difficult
The Problem: The input grid is a rectangular (list of lists) matrix of variable shape, with a single integer number.
Blue colour of the color spectrum are represented by the integer 8.
The task is to identify the boundaries between different shapes, identify and differentiate different shapes,
and create a matrix whose diagnol elements reprents one shape.The created matrix should have a size of
(identified shape * identified shape).
Assumptions: The colour inside the input grid is always 8.
The number of different shapes in each given input list is not more than 2.
The Approach: Identify the lists where there is a coloured element.
Idenitfy the different boundaries of the all the shapes.
Order the elements by checking their boundaries according to its connection with respect to the next elements.
Check for link between the elements of each list.
Identify number of shapes from the link between elements of the grid.
Create a grid having number of shapes present inside the input grid as the shape.
Fill the created grid with diagonal elements.
Note: All test cases passed
Arguments: x, the nd array representing the input grid
return: x, the resultant array with the transformations applied.
"""
# Identify the lists where there is a coloured element
pos_of_ele = list(position_of_elements(sublist) for sublist in x)
# Remove the null lists from the returned list of list position of elements
clean_pos_of_ele = list(filter(lambda x: x, pos_of_ele))
# Identify the different boundaries of the all the shapes
identified_boundaries_lst = identify_boundaries_of_the_shape(clean_pos_of_ele)
# Order the returned list according to its connection with respect to the elements
ordered_boundaries_lst = ordered_lst_ele(identified_boundaries_lst)
# Check for link between the elements of each list
ele_link = link_between_the_elements(ordered_boundaries_lst)
# Identify number of shapes from the link between elements of the list of list
identified_shapes = identify_diff_shapes(ele_link)
# Create an empty grid having number of shapes present inside the input grid as the shape
empty_op_grid = np.zeros(shape=(len(identified_shapes), len(identified_shapes)))
# Fill the created grid with diagonal elements
x = creating_diagnol_matrix(empty_op_grid)
return np.array(x)
# Check which list of list has elements present inside them
def position_of_elements(sublist_seq):
# list to store the index of the element
pos_of_ele = []
# Iterate through sublists of the input
for index, val in enumerate(sublist_seq):
# If the value present in the index is greater than 0
if val > 0:
# Store the index
pos_of_ele.append(index)
return pos_of_ele
# Identify the different boundaries of the shape
def identify_boundaries_of_the_shape(pos_clean_ip):
"""
The task of this function is to identify the boundaries of different shapes present in the input grid. Identify the list of elements
having no null lists amongst them, if the values in sublist are having a difference of more than 1 then it means that the elements present
in the list may belong to a different shape or belongs to same shape with any other element connected to it.We further investigate these type
of sublists. We check these sublists with their previous sublist value to check if any element is having a connection with the disconnected
element, if not then we seperate the elements from the list else we keep them in the same list.
Arguments: pos_clean_ip- List of elements without containing null list amongst them.
Return: boundary splits - list of list , Where each sublist doesnt belong to more than one different shape.
"""
boundary_splits = []
for index, sublist in enumerate(pos_clean_ip):
# Check for difference between the elements of the sublist
chck_or_diff_bw_ele = np.diff(sublist)
# Store it as a list
ele_diff = list(chck_or_diff_bw_ele)
# Store the elements after classifying that they belong to different shapes
split_ele_lst = []
# Check if the difference between the elements of the sublist is greater than 1
if (all(ele == 1 for ele in ele_diff)) == False:
# Check if the previous index is not 0
if index != 0:
# get the previous sublist
before_sublist = pos_clean_ip[index - 1]
# Find if all the elements present in the current sublist are also present in the previous sublist
result = all(elem in before_sublist for elem in sublist)
# Split the elements in the same row belonging to different shape
if result == False:
num_of_splits = 0
# Number of splits that should take place in the sublist
for j in range(len(sublist) - 1):
difference_of_ele = abs(sublist[j] - sublist[j + 1])
if difference_of_ele > 1:
num_of_splits = num_of_splits + 1
pos_to_split = []
# Find the position where the split should take place
for k, n in enumerate(ele_diff):
if n > 1:
pos_to_split.append(k)
# Split the sublist into further subist based on different identified position boundaries
for pos_split in pos_to_split:
size = len(sublist)
# Find the idex which is matching the position to split
idx_list = [idx + 1 for idx, val in enumerate(sublist) if idx == pos_split]
# Split and merge the values present in the position split
split_based_on_pos = [sublist[i: j] for i, j in zip([0] + idx_list, idx_list
+ ([size] if idx_list[-1] != size else []))]
split_ele_lst.append(split_based_on_pos)
# If there is no elements in sublist to split, then append the sublist
if not split_ele_lst:
boundary_splits.append(sublist)
else:
# Append the "split and merged list" to the sublist
for i in range(len(split_ele_lst)):
for j in range(len(split_ele_lst) + 1):
sub_split_lst = split_ele_lst[i][j]
boundary_splits.append(sub_split_lst)
return boundary_splits
# Identify the link between the elements of the list
def link_between_the_elements(final_list):
"""
The task of this function is to identify the relationship between a current sublist and its succeeding sublist.
Then we store how many elements are matching between the lists.
Arguments: final_list- manipulated input grid
Return: ele_link - list of list holding elements that are having connections with the elementsts in the successive list.
"""
ele_link = []
# Iterate over each row of the boundary list
for index in range(len(final_list) - 1):
# Elements matching in the current list and next sublistr
elements_matching = len([x for x in final_list[index] if x in final_list[index + 1]])
ele_link.append(elements_matching)
return ele_link
# Check if the list created after spliting is in the correct order
def ordered_lst_ele(ident_boud_lst):
"""
The task of this function is to identify if the elements boundaries list created are in a proper order i.e., to check if the connected elements
are present next to each other in the list. If the current element is having connections with the element in successive second index position,
then we change the position of the lists.
Arguments: ident_boud_lst- Identified boundary list
Return: ident_boud_lst - correctly ordered boundary list.
"""
# Iterate over the created list
for index, val in enumerate(ident_boud_lst):
current_sublist = ident_boud_lst[index]
index_1 = index + 1
if index_1 < (len(ident_boud_lst) - 1):
next_sublist = ident_boud_lst[index + 1]
# check if there is any elements matching between current list and next sublist
if len(set(current_sublist) & set(next_sublist)) == 0:
index_2 = index + 2
if index_2 < (len(ident_boud_lst) - 1):
# check if there is any match of elements on the next to next sublist
nxt_to_nxt_sublist = ident_boud_lst[index_2]
if len(set(current_sublist) & set(nxt_to_nxt_sublist)) != 0:
# If there is an element matching the element in our current list then change the
# position of the sublists
ident_boud_lst[index_2], ident_boud_lst[index_1] = ident_boud_lst[index_1], ident_boud_lst[
index_2]
return ident_boud_lst
# Idenitfy different shapes based on the link between the elements
def identify_diff_shapes(store_link):
size = len(store_link)
# If there is no connection between the shapes then the difference between the list is represented by 0
# Find the occourance of the value 0 in the list having the list of elements mapping the of boundaries
boundary_idx_list = [idx + 1 for idx, val in enumerate(store_link) if val == 0]
# Create sublists representing different shapes present in boundary list
shapes_present_in_grid = [store_link[i: j] for i, j in
zip([0] + boundary_idx_list, boundary_idx_list +
([size] if boundary_idx_list[-1] != size else []))]
return shapes_present_in_grid
# Creating a diagnal matrix whose diagnol elements represents different shapes present in the input grid
def creating_diagnol_matrix(empty_op_grid):
len_of_seq = len(empty_op_grid)
# assigning iter the value of length of the matrix
i = len_of_seq
pos_counter = [0]
pos_counter_len = len(pos_counter)
puzzle_ele = []
# Colour code the diagnol elements into blue clour
target = [8]
# Iterating till the index is 1
while (i >= 1):
i = i - 1
# Elements in the row
curr_lst_ele = empty_op_grid[i]
# Assigning colour value to the diagnol index of the elements
for x, y in zip(pos_counter, target):
if x < len_of_seq:
curr_lst_ele[x] = y
# Storing the assigned values to the list
puzzle_ele.append(curr_lst_ele)
# Increasing the counter to get the dignol positions for that colour in each row
pos_counter = [x + 1 for x in pos_counter]
manipulated_puzzle_op = [arr.tolist() for arr in puzzle_ele]
return manipulated_puzzle_op
# ---------------------------------------------------solve_d0f5fe59 end------------------------------------------------
# ---------------------------------------------------solve_ae3edfdc start-------------------------------------
def solve_ae3edfdc(x):
"""
Difficulty: Medium
The problem description: Gravity- well, not the regular kind. There are two centres of gravity, blue and
red - which has the ability to bring orange and green squares in it's path towards itself, so that it
occupies the closest postition wrt to it. The one condition, the attracted squares must be
perpendicular to the centre of gravity to get attracted to it.
Assumptions: There are no other colours in the space. The non-centres are always perpendicular to the centres.
The approach: Locate all the colourfull squares in the 'space'. Then locate the centres of gravity.
Pair them up together as blue to orange and red to green. Check along the perpendicular path.
If there are any squares in it's path, move it to the closest position in the same line.
Testing:All test cases passed
Argument: x, the n-d array representing the space
return: x, after the above transformation is done.
"""
# find all the squares where colour not equal to black
row, column = np.where(x > 0)
colour_dict = {}
# put them all into one dictionary
for r, c in zip(row, column):
if x[r][c] in colour_dict:
colour_dict[x[r][c]].append((r, c))
else:
colour_dict[x[r][c]] = [(r, c)]
# -------------------Hardcoding the colours for the centres and it's pairs
center1 = 2
center2 = 1
pair1 = 3
pair2 = 7
# -----------------
# Creating two dictionaries based on the centre-pair value
keyPair1 = [center1, pair1]
keyPair2 = [center2, pair2]
d1 = {x: colour_dict[x] for x in colour_dict if x in keyPair1}
d2 = {x: colour_dict[x] for x in colour_dict if x in keyPair2}
# moving the position of the first centre-pair pair
half_done = match_pattern(d1, x, keyPair1)
# sending the half transformed to transform the rest
final = match_pattern(d2, half_done, keyPair2)
x = final
return x
def match_pattern(dict_fig, x, keyPair):
# get the row and column of the centre
r = dict_fig[keyPair[0]][0][0]
c = dict_fig[keyPair[0]][0][1]
# for every square belonging to this key-pair
for v in dict_fig[keyPair[1]]:
# if in the same row as the centre of gravity but before it
if v[0] == r and v[1] < c:
# closest point to centre on the same side
x[r][c - 1] = keyPair[1]
# set the old position to 0
x[v[0]][v[1]] = 0
# if in the same row as the centre of gravity but after it
elif v[0] == r and v[1] > c:
# closest point to centre on the same side
x[r][c + 1] = keyPair[1]
x[v[0]][v[1]] = 0
# if in the same column as the centre of gravity but above it
elif v[1] == c and v[0] < c:
x[r - 1][c] = keyPair[1]
x[v[0]][v[1]] = 0
# if in the same column as the centre of gravity but below it
elif v[1] == c and v[0] > c:
x[r + 1][c] = keyPair[1]
x[v[0]][v[1]] = 0
else:
# not per assumption
raise Exception("Pattern not handled")
return x
# ---------------------------------------------------solve_ae3edfdc end------------------------------------------------
# ---------------------------------------------------solve_feca6190 start----------------------------------------------
def solve_feca6190(x):
"""
Difficulty: Medium
The Problem: The input grid is a rectangular (list of lists) matrix with variable shape, with numbers ranging
from 0 to 9. (inclusive). Different colors of the color spectrum are represented by the integers.
The task is to determine the color schemes in the input grid, generate a matrix whose shape is given by
multiplication of size of input matrix and the number of colors present inside the grid.Next step is to
fill the formed matrix diagonally with color from the input grid, starting with the index value of the
color in the input grid.
Assumptions: The Input Grid is always of shape 1 * 5.
There cannot be more than five colours present in the grid.
No Colour is repeated.
The Approach: Identify the size of the grid.
Identify the colours present in the grid.
Identify the position of the colour in the grid.
Create an empty array whose shape would be of the size (number of colours * grid size,number of
colours * grid size).
Fill the first array with colours as per their index position of the input grid.
Identify the dignol positions for the elements in the first array and fill them with integers
present in the starting array.
Inverse the matrix to get the matching output.
Note: All test cases passed
Arguments: x, the nd array representing the input grid
return: x, the resultant array with the transformations applied.
"""
# Finding the number of colours and length of the input grid
# Returns list of tuple
colour_count_and_len_of_ip = list(number_of_colours(sublist) for sublist in x)
# Finding the index of the colours and return the positions as a list
colour_pos_in_input = list(position_of_colours(sublist) for sublist in x)
# Colour code present in the input grid and return the list of colours
col_codes = list(colour_code_count(sublist) for sublist in x)
# Create an empty matrix based on the number of colours and length of the input array
manipulation_grid = create_the_empty_grid(colour_count_and_len_of_ip)
# Manipulate the empty array
# Identify the diagonal positions in the grid
# Fill the diagonal positions with its respective colour codes
x = create_output_grid(manipulation_grid, colour_pos_in_input, col_codes)
return | np.array(x) | numpy.array |
# occiput
# Martinos Center for Biomedical Imaging, Harvard University/MGH, Boston
# March 2015, Boston
from __future__ import absolute_import
__all__ = ['KSpace', 'MR_Static_Scan', 'MR_Dynamic_Scan']
# Import occiput:
from ...Core import Image3D
from ...Visualization.Colors import *
from ...DataSources.Synthetic.Shapes import uniform_cylinder
from ...Visualization.Visualization import ipy_table, has_ipy_table, svgwrite, has_svgwrite
from ...Core.NiftyPy_wrap import has_NiftyPy, INTERPOLATION_POINT, INTERPOLATION_LINEAR
from ...DataSources.FileSources.vNAV import load_vnav_mprage
#from ...DataSources.FileSources.MR_motion_sensors import load_motion_sensor_data
from ...DataSources.FileSources.Volume import import_nifti
# Import DisplayNode for IPython integration
from ....DisplayNode import DisplayNode
# Import other modules
from PIL import ImageDraw
from PIL import Image as PIL
from numpy import isscalar, linspace, int32, uint32, ones, zeros, pi, float32, where, ndarray, nan, inf, exp, asarray, \
complex64, complex128, complex, abs, angle, real, imag
from numpy.fft import fftn, ifftn, ifft2, fftshift
from numpy.random import randint
from numpy.random import randint
from numpy import asfortranarray, asfortranarray
from scipy import optimize
import scipy
import scipy.signal
import scipy.io
import scipy.ndimage
import os
import h5py
# Import ilang (inference language; optimisation)
from .MR_ilang import MR_Static_Gaussian, MR_Dynamic_Gaussian, ProbabilisticGraphicalModel
from ....ilang.Samplers import Sampler
# Set verbose level
from ...global_settings import *
set_verbose_no_printing()
# set_verbose_high()
INTERPOLATION_COMPLEXPLANE = 0
INTERPOLATION_POLAR = 1
class SequenceParameters():
def __init__(self):
self.Tr = 0.020
self.name = 'Flash'
class KSpace:
def __init__(self, filename=None):
self.parameters = SequenceParameters()
self.data = None
self.motion = None
self.motion_events_indexes = None
self.filename = None
if filename is not None:
self.load_from_file(filename)
def load_from_file(self, filename, motion_data_file=None):
self.filename = filename
mat = scipy.io.loadmat(filename)
self.data = mat['data']
# fftshift
for i in range(self.data.shape[0]):
self.data[i, :, :, :] = fftshift(self.data[i, :, :, :])
# self.data = self.data.squeeze()
if motion_data_file is not None:
self.load_motion_from_file(motion_data_file)
def load_motion_from_file(self, filename):
motion = load_motion_sensor_data(filename)
self.motion = motion # FIXME: define motion information
motion_events = self.motion.extract_motion_events()
motion_events_indexes = where(motion_events != 0)[0].tolist()
self.motion_events_indexes = motion_events_indexes
def get_static_data(self):
data = complex64(zeros((self.data.shape[1], self.data.shape[2], self.data.shape[3])))
for i in range(self.data.shape[0]):
data = data + self.data[i, :, :, :]
return data
class MR_Static_Scan:
def __init__(self):
self.kspace = None
def load_kspace(self, filename):
self.kspace = KSpace()
self.kspace.load_from_file(filename)
self._construct_ilang_model()
def reconstruct_iterative(self, method=None, iterations=20, tol=None, dt=0.4, beta=0.05):
kspace = self.kspace.get_static_data()
shape = kspace.shape
image = asarray(complex64(zeros(shape)), order='F')
for iter in range(iterations):
diff = kspace - self.project(image)
grad = -self.backproject(diff)
image_abs = abs(image)
kernel = -ones((3, 3, 3))
kernel[1, 1, 1] = 26
# 1) smooth magnitude
eps = 1e-8
norm = 2 * (scipy.ndimage.filters.convolve(image_abs, kernel) + eps) / (image_abs + eps); # !!!
# 2) smooth square magnitude
# norm = 4*(scipy.ndimage.filters.convolve(image_abs,kernel))
g0_real = -real(image) * norm
g0_imag = -imag(image) * norm
g0 = complex64(g0_real + g0_imag * 1j)
image = image - dt * (grad - beta * g0)
return Image3D(abs(image))
def reconstruct_ifft(self):
kspace = self.kspace.get_static_data()
image = asarray(fftshift(ifftn(kspace)), order='F')
return Image3D(abs(image))
def project(self, volume):
return asarray(fftn(fftshift(volume)), order='F')
def backproject(self, kspace):
return asarray(fftshift(ifftn(kspace)), order='F')
def _construct_ilang_model(self):
# define the ilang probabilistic model
self.ilang_model = MR_Static_Gaussian(self)
# construct a basic Directed Acyclical Graph
self.ilang_graph = ProbabilisticGraphicalModel(['x', 'k', 'sigma'])
self.ilang_graph.set_nodes_given(['k', 'sigma'], True)
self.ilang_graph.add_dependence(self.ilang_model, {'x': 'x', 'k': 'k', 'sigma': 'sigma'})
# construct a basic sampler object
# self.sampler = Sampler(self.ilang_graph)
def _repr_html_(self):
if not has_ipy_table:
return "Please install ipy_table."
table_data = [
['bla ', 1],
['blabla', 2], ]
table = ipy_table.make_table(table_data)
table = ipy_table.apply_theme('basic_left')
table = ipy_table.set_global_style(float_format="%3.3f")
return table._repr_html_()
class MR_Dynamic_Scan:
def __init__(self):
self.kspace = None
self.time_bins = None
def load_kspace(self, filename, motion_data_file=None):
self.kspace = KSpace()
self.kspace.load_from_file(filename, motion_data_file)
N_time_bins = len(self.kspace.motion_events_indexes) + 1
self.time_bins = [0] + self.kspace.motion_events_indexes
self._construct_ilang_model()
def reconstruct_iterative(self, method=None, iterations=20, tol=None, dt=0.9, beta=0.05, active_frames=None,
interpolation_space=INTERPOLATION_COMPLEXPLANE, interpolation_mode=INTERPOLATION_POINT):
shape = self.kspace.get_static_data().shape
image = asarray(complex128( | zeros(shape) | numpy.zeros |
import numpy as np
a = | np.zeros((4,10,3)) | numpy.zeros |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 26 11:38:14 2021
@author: christian
"""
from astropy import constants as const
from astropy.io import fits
from astropy.convolution import Gaussian1DKernel, convolve
import datetime as dt
import math
import matplotlib.backends.backend_pdf
import matplotlib.pyplot as plt
from matplotlib.ticker import MultipleLocator, AutoMinorLocator
import numpy as np
from scipy.optimize import curve_fit
import scipy.stats as stats
from spectres import spectres
from tqdm import tqdm
import unyt as u
import warnings
def add_weight(line_pos, line_wid, w, err, pix_wid):
"""Lines up the two spectra by the amount of light absorpted in the area
around the line.
Parameters
----------
line_pos : float
The position of the absorption line.
line_wid : float
The width of the absorption line.
w : Array like
A subarray with wavelength values around the line.
err : Array like
The corresponding error array.
pix_wid : float
The width of a pixel in wavelength.
Returns
-------
Two variable:
weight : Array like
An array that weights the corresponding flux values for the
wavelength array w.
"""
i = 0
j = -1
npix = len(w)
# Initially every pixel is weighted by their inverse variance
weight = np.divide(np.ones(len(w)), np.square(err))
# Pixel at a lower wavelength than the specified window have weight = 0
while w[i] + pix_wid / 2 < line_pos - line_wid:
weight[i] = 0.0
i += 1
npix -= 1
# Pixel at a higher wavelength than the specified window have weight = 0
while w[j] - pix_wid / 2 > line_pos + line_wid:
weight[j] = 0.0
j -= 1
npix -= 1
# The pixels on the edge of the window have a reduced weight according to
# their width within the window.
weight[i] = weight[i] * (w[i] + pix_wid / 2 -
line_pos + line_wid) / pix_wid
weight[j] = weight[j] * (pix_wid / 2 +
line_pos + line_wid - w[j]) / pix_wid
# Number of pixels within the window takes into account fractions of pixels
npix = npix - 2.0 + (pix_wid / 2 + line_pos + line_wid - w[j]) / \
pix_wid + (w[i] + pix_wid / 2 - line_pos + line_wid) / pix_wid
# Normalising the weight by the heighest weight
weight = np.divide(weight, max(weight))
return weight, npix
def addSN(flux, time, vmag, DarkN, SkyN, n, norm_f, Boff=0.654, Roff=-0.352,
Ioff=-0.7, HARSN=1000, HAR=False):
"""Adds noice to the inserted flux. The noise is dependent on the
brightness of the target, the observation time, the dark noice and the
sky noice. It simulates noice for a solar star. This simulates noise for
a HERMES spectrum according to the capabilities of the spectrograph and
telescope.
Parameters
----------
flux : Array like
An array holding the flux.
time : float
Observing time (s).
vmag : float
Brightness in the V band (mag).
DarkN : float
Dark noise total photon count.
SkyN : float
Relative sky brightness.
n : int
Band identifier (0: B, 1: V, 2: R, 3: IR).
norm_f : Array like
Normalised flux array.
Boff : float
B band offset from V band (mag). Solar offset by default.
Roff : float
R band offset from V band (mag). Solar offset by default.
Ioff : float
IR band offset from V band (mag). Solar offset by default.
HARSN : float
Previous SNR in the original HARPS spectrum.
(negligible by default)
HAR : Boolean
Has been a HARPS spectrum before. Will take into account previous
noise of spectrum.
Returns
-------
A variable:
results : Library
Contains:
'SN' keyword for the resulting SN as a float
'SNpp' keyword for SN per pixel as a float
'e' keyword for the error numpy array
'f' keyword for the new flux array
"""
results = {}
# Determine the number of electrons observed in the specified band
if n == 0:
ne = time / 3600 * 10**(-0.4 * (0.993 * (vmag + Boff) - 24.05))
nepp = ne / 3.81 # number of measured electrons per pixel
# Find the SNR of the initial HARPS spectrum for the wavelength region.
# Increases SNR per pixel for HERMES cause of larger pixels
try:
harSN = min(HARSN[31:36]) * 2
except TypeError:
harSN = HARSN * 2
harSNpp = harSN / 3.81 # HARPS SNR per HERMES pixel
elif n == 1:
ne = time / 3600 * 10**(-0.4*(1.18 * vmag - 26.25))
nepp = ne / 4.69
try:
harSN = min(HARSN[52:56]) * 2
except TypeError:
harSN = HARSN * 2
harSNpp = harSN / 4.69
elif n == 2:
ne = time / 3600 * 10**(-0.4*(1.07 * (vmag + Roff) - 24.98))
nepp = ne / 3.74
try:
harSN = min(HARSN[66:70]) * 2
except TypeError:
harSN = HARSN * 2
harSNpp = harSN / 3.74
elif n == 3:
ne = time / 3600 * 10**(-0.4*(0.89 * (vmag + Ioff) - 22.33))
nepp = ne / 3.74
harSN = HARSN * 2
harSNpp = harSN / 3.74
# Calculate the SNR (and SNR per pixel) and the number of sky pixel.
skypp = SkyN * nepp * pow(2.5, vmag-17.552)
SN = np.sqrt(ne)
SNpp = math.sqrt(nepp + skypp)
# Compute results for HARPS spectra (calculate individual uncertainties and
# add random noise to the spectrum)
if HAR:
if harSN < SN:
results['e'] = np.abs(np.divide(flux,
np.sqrt(np.abs(norm_f))) / harSNpp)
results['f'] = flux + DarkN * flux / ne
results['SN'] = harSN
results['SNpp'] = harSNpp
else:
SNadd = 1/math.sqrt(1/(SNpp**2) + 1/(harSNpp**2))
adderr = flux / SNadd
results['f'] = np.add(flux, np.random.normal(0, adderr,
len(flux))) + DarkN * flux / ne
results['e'] = np.abs(np.divide(flux,
np.sqrt(np.abs(norm_f))) / SNpp)
results['SN'] = SN
results['SNpp'] = SNpp
# Compute results for HERMES spectra (calculate individual uncertainties and
# add random noise to the spectrum)
else:
results['SN'] = SN
results['SNpp'] = SNpp
results['e'] = np.abs(np.divide(flux, np.sqrt(np.abs(norm_f))) / SNpp)
results['f'] = np.add(flux, np.random.normal(0, results['e'],
len(flux)))
print(max(np.divide(results['f'], results['e'])))
return results
def addSN_simple(flux, SNR, norm_f):
"""Adds noice to the inserted flux. This is the most simple way to do it.
We take only a SNR and project noise of the projected strength on the flux
array.
Parameters
----------
flux : Array like
An array holding the flux.
SNR : float
The signal to noice ratio that shall be simulated on the flux
array.
norm_f : Array like
An array holding the normalised flux.
Returns
-------
A variable:
results: Library
Contains:
'SN' keyword for the resulting SN as a float, the
'SNpp' keyword for SN per pixel as a float
'e' keyword for the error numpy array
'f' keyword for the new flux array
"""
results = {}
# Calculate the flux uncertainties and apply random noise to the spectrum
results['SN'] = SNR
results['SNpp'] = SNR
results['e'] = np.abs(np.divide(flux, np.sqrt(np.abs(norm_f))) / SNR)
results['f'] = np.add(flux, np.random.normal(0, results['e'], len(flux)))
return results
def air_indexEdlen53(l, t=15., p=760.):
"""Return the index of refraction of air at given temperature, pressure,
and wavelength in Angstroms.
The formula is from Edlen 1953, provided directly by ESO.
Parameters
----------
l : float
Vacuum wavelength in Angstroms
t : float
Temperature in °C. (Don't actually change this from the default!)
p : float
Pressure in mmHg. (Don't actually change this from the default!)
Returns
-------
n : float
The index of refraction for air at the given parameters.
"""
n = 1e-6 * p * (1 + (1.049-0.0157*t)*1e-6*p) / 720.883 / (1 + 0.003661*t)\
* (64.328 + 29498.1/(146-(1e4/l)**2) + 255.4/(41-(1e4/l)**2))
n = n + 1
return n
def air2vacESO(air_wavelengths_array):
"""Take an array of air wavelengths and return an array of vacuum
wavelengths in the same units.
Parameters
----------
air_arr : `unyt.unyt_array`
A list of wavelengths in air, with dimensions length. Will be converted
to Angstroms internally.
Returns
-------
`unyt.unyt_array`
A unyt_array of wavelengths in vacuum, in the original units.
"""
reshape = False
original_units = air_wavelengths_array.units
if air_wavelengths_array.ndim == 2:
# We need to flatten the array to 1-D, then reshape it afterwards.
reshape = True
original_shape = air_wavelengths_array.shape
tqdm.write(str(original_shape))
air_wavelengths_array = air_wavelengths_array.flatten()
air_wavelengths_array.convert_to_units(u.angstrom)
tolerance = 2e-12
num_iter = 100
vacuum_wavelengths_list = []
# tqdm.write('Converting air wavelengths to vacuum using Edlen 1953.')
for i in range(0, len(air_wavelengths_array)):
new_wavelength = air_wavelengths_array[i].value
old_wavelength = 0.
iterations = 0
past_iterations = [new_wavelength]
while abs(old_wavelength - new_wavelength) > tolerance:
old_wavelength = new_wavelength
n_refraction = air_indexEdlen53(new_wavelength)
new_wavelength = air_wavelengths_array[i].value * n_refraction
iterations += 1
past_iterations.append(new_wavelength)
if iterations > num_iter:
print(past_iterations)
raise RuntimeError('Max number of iterations exceeded!')
vacuum_wavelengths_list.append(new_wavelength)
vacuum_array = u.unyt_array(vacuum_wavelengths_list, u.angstrom)
if reshape:
tqdm.write(f'Converting back to original shape: {original_shape}.')
# Reshape the array back to its original shape.
return vacuum_array.reshape(original_shape).to(original_units)
else:
return vacuum_array.to(original_units)
def center_line(w, f):
"""Measures the center of an absorption line.
Parameters
----------
w : Array like
A subarray with wavelenghts within a certain line.
f : Array like
A subarray with flux values within a certain line.
deg : int
The degree of the fitting polynomial.
band : int
The band in which the line is within the HERMES spectrograph.
Returns
-------
A variable:
x_min : float
The wavelength value of the line after centering on the minimum.
"""
a = np.argmin(f)
w_pix = w[a] - w[a-1]
f2 = f[a-1:a+2]
A = np.array([[1, -1, 1], [0, 0, 1], [1, 1, 1]])
B = np.array([f2[0], f2[1], f2[2]])
X = np.linalg.inv(A).dot(B)
w_npix = - X[1] / (2*X[0])
if np.absolute(w_npix) > 1:
print("Error: Minimum not close to minimum")
if X[0] < 0:
print("Error: Minimum is actually maximum")
# Return the center of the Gaussian
return w[a] + (w_npix * w_pix)
def Gauss(x, a, x0, sigma, sigma0):
return 1 - a * np.exp(-(x - x0)**2 / (2 * (sigma0**2 + sigma**2)))
def determine_resolving_power(w, f, deg=2, band=0, specres=28000, w2=[],
f2=[]):
"""Determines the resolving power of an absorption feature.
Parameters
----------
w: Array like
A subarray with wavelenghts within a certain line.
f: Array like
A subarray with flux values within a certain line.
deg: int
The degree of the fitting polynomial.
band: int
The band in which the line is within the HERMES spectrograph
Returns
-------
A variable:
x_min : float
The wavelength value of the line after centering on the minimum
"""
# calculate the sigma of the band
boun = [[4715, 4900], [5649, 5873], [6478, 6737], [7585, 7885]]
c = const.c.to('km/s')
sig_final = 0
sig = (boun[band][1] + boun[band][0]) / (2.0 * 2.355 * specres)
# Fit a Gaussian to the line
mean = sum(w * f) / sum(f)
warnings.filterwarnings("ignore", "Covariance of the parameters could not"
+ " be estimated")
try:
popt, pcov = curve_fit(lambda x, a, x0, sigma:
Gauss(x, a, x0, sigma, 0), w, f,
p0=[1-min(f), mean, sig])
except RuntimeError:
return -1, -1, -1
# w_plot = np.linspace(w[0], w[-1], 100)
# plt.step(w_plot, Gauss(w_plot, popt[0], popt[1], popt[2]))
# plt.step(w, f)
# plt.show()
# plt.clf()
if 0.1 < popt[0] and popt[0] < 0.7:
sig2 = popt[2]
if np.diag(pcov)[2] > 0:
sig2_err = np.sqrt(np.diag(pcov)[2])
else:
return -1, -1, -1
R = float(popt[1] / (2.355 * popt[2]))
else:
return -1, -1, -1
sig_b = np.square(sig2) - np.square(sig)
if sig_b < 0:
sig_abs = np.sqrt(np.abs(sig_b))
sig_err = np.sqrt(np.square(sig2 * sig2_err * c.value /
(popt[1] * sig_abs)) +
np.square(sig_abs * c.value / popt[1]**2))
sig_final = 0
elif sig_b >= 0:
sig_b2 = np.sqrt(sig_b)
sig_err = np.sqrt(np.square(sig2 * sig2_err * c.value /
(popt[1] * sig_b2)) +
np.square(sig_b2 * c.value / popt[1]**2))
sig_final = sig_b2 * c.value / popt[1]
return R, sig_final, sig_err
def determine_resolving_power2(w, f, deg=2, band=0, specres=28000, w2=[],
f2=[]):
"""Determines the resolving power of an absorption feature.
Parameters
----------
w: Array like
A subarray with wavelenghts within a certain line.
f: Array like
A subarray with flux values within a certain line.
deg: int
The degree of the fitting polynomial.
band: int
The band in which the line is within the HERMES spectrograph
Returns
-------
A variable:
x_min : float
The wavelength value of the line after centering on the minimum
"""
# calculate the sigma of the band
boun = [[4715, 4900], [5649, 5873], [6478, 6737], [7585, 7885]]
c = const.c.to('km/s')
sig_final = 0
sig = (boun[band][1] + boun[band][0]) / (2.0 * 2.355 * specres)
# Fit a Gaussian to the line
mean = sum(w * f) / sum(f)
warnings.filterwarnings("ignore", "Covariance of the parameters could not"
+ " be estimated")
try:
popt, pcov = curve_fit(lambda x, a, x0, sigma:
Gauss(x, a, x0, sigma, sig), w, f,
p0=[1-min(f), mean, 4])
except RuntimeError:
return -1, -1, -1
if 0.1 < popt[0] and popt[0] < 0.7:
sig_b = np.abs(popt[2])
if np.diag(pcov)[2] > 0:
sig_b_err = np.sqrt(np.diag(pcov)[2])
else:
return -1, -1, -1
R = float(popt[1] / (2.355 * np.abs(popt[2])))
else:
return -1, -1, -1
w_plot = np.linspace(w[0], w[-1], 100)
# plt.step(w_plot, Gauss(w_plot, popt[0], popt[1], popt[2], sig))
# plt.step(w, f)
# plt.show()
# plt.clf()
sig_final = sig_b * c.value / popt[1]
sig_err = sig_b_err * c.value / popt[1]
return R, sig_final, sig_err
def determine_radvel(ref_flux, tar_flux, pixel, rv_weight, mpix=0,
plot_correlation=False, band=0,
mid_wav=0.0, secondary=False):
"""Determine the radial velocity between two Spectra
Parameters
----------
ref_flux : Array like
A subarray holding the reference's flux.
tar_flux : Array like
A subarray holding the targets flux.
pixel : float
The width of a pixel to determine the total shift in wavelength.
rv_weight : Array like
An array with the weigths of all pixels within this correction.
mpix : float
Eliminates pixels from the process as a fraction of total pixels.
E.g. 0.2 will eliminate 20% of pixels from both edges of the
spectrum (whis will reduce the number of pixel by 40%).
plot_correlation : Boolean
If True, plots the correlation function. Mainly used for
debugging.
band : Int
Querries the band in which this correction happens. This is only
relevant for the IR band (band=3) for which the algorithm needs
to be cautious because of sky correction residuals.
mid_wav : float
The middle of the correction to determine the radial velocity in
km/s.
secondary : Boolean
If True, the correction is in a more precise correction mode to
correct only small radial velocities (<8 HERMES pixel).
Returns
-------
A variable:
shift : float
The radial velocity between the spectra.
"""
max_pix = 20
c = const.c.to('km/s')
tar_flux = np.array(tar_flux)
if band == 3:
tar_flux[tar_flux > 0.5] = np.ones_like(tar_flux[tar_flux > 0.5]) * 0.5
if mpix > 0.0 and mpix < 0.3:
pix_elim = int(len(ref_flux)*mpix)
ref_flux = ref_flux[pix_elim:-pix_elim]
corr = np.array([])
rv_weight = np.array(rv_weight)
rv_weight = np.where([f < 0 for f in tar_flux], 0.0, rv_weight)
if all(rv_weight == 0):
rv_weight = np.ones_like(rv_weight)
k = 0
while len(ref_flux)+k <= len(tar_flux):
weight = rv_weight[k:len(ref_flux)+k]
corr_temp = np.divide(
np.sum(np.multiply(np.multiply(
ref_flux, tar_flux[k:len(ref_flux)+k]), weight)),
np.multiply(np.sqrt(np.sum(np.multiply(
weight, np.square(ref_flux)))),
np.sqrt(np.sum(np.multiply(
weight, np.square(
tar_flux[k:len(ref_flux)+k]))))))
corr = np.append(corr, corr_temp)
k = k+1
pix_zero = int(len(corr) / 2)
if plot_correlation is True:
plt.plot(range(len(corr)), corr)
plt.show()
plt.close()
if secondary is True:
min_i = np.argmax(corr[pix_zero-max_pix:pix_zero+max_pix]) + \
pix_zero-max_pix
else:
min_i = np.argmax(corr)
shift = (min_i - pix_zero) * pixel
if plot_correlation is True:
plt.plot(range(len(ref_flux)) + min_i, 1 - ref_flux)
plt.plot(range(len(tar_flux)), 1 - tar_flux)
plt.show()
plt.close()
if mid_wav != 0:
corr_range = np.linspace(0, len(corr) - 1, len(corr))
corr_rv = (corr_range - pix_zero) * pixel * c.value / mid_wav
return shift, corr_rv, corr
else:
return shift
def prepare_reference_rv(wave_r_old, flux_r_old, wave_t, res_power, center_w,
stacked=False, single_out=True, harps=False,
test=False):
"""Convolves and resamples a high resolution reference spectrum onto the
target wavelength grid.
Parameters
----------
wave_r_old : Array like
The old reference wavelength array.
flux_r_old : Array like
The old reference flux array.
wave_t : Array like
The wavelength array on which the spectrum will be projected on.
res_power : Array like
The resolving power of the target spectrum.
center_w : float
Wavelength at the centre of the array. Used to determine the
width of the convolving gaussian from the resolving power.
stacked : Boolean
If True, assumes the spectrum to be a stacked spectrum or to have
a "generic" resolving power layout as a result of being convolved
with HERMES resolving power.
single_out : Boolean
If True, doesn't return f_conv or w_temp (True by default).
harps : Boolean
Recognises HARPS spectra and takes into account their limited
resolving power.
Returns
-------
Variable:
flux_r : Numpy Array
The resulting reference flux array that fits the target wavelength
array.
f_conv : Numpy Array
The flux array on the old wavelength grid.
w_temp : Numpy Array
The old wavelength grid.
"""
if stacked is True:
band_correction = [0.8, 0.775, 0.75, 0.83]
j = int(wave_t[0] / 1000 - 4)
res_power = 28000 * band_correction[j]
if harps is True:
harps_res = 75000
res_power = 1 / np.sqrt(
1/np.square(res_power) - 1/np.square(harps_res))
w_temp = wave_r_old[np.bitwise_and(wave_t[0] - 10 < wave_r_old,
wave_r_old < wave_t[-1] + 10)]
f_temp = flux_r_old[np.bitwise_and(wave_t[0] - 10 < wave_r_old,
wave_r_old < wave_t[-1] + 10)]
w_pix = w_temp[1] - w_temp[0]
sigma = center_w / (2.355 * res_power * w_pix)
if test is True:
c = const.c.to('km/s')
lw2_wav = w_temp[0] * 15.0 / c.value / w_pix
sigma2 = w_temp[int(len(w_temp)/2)] / (2.355 * 28000 * w_pix)
mu = w_temp[0]
x = np.linspace(mu - 5*sigma, mu + 5*sigma, 100)
y = np.linspace(mu - lw2_wav, mu + lw2_wav, 100)
plt.plot(x, stats.norm.pdf(x, mu, sigma))
plt.plot(x, stats.norm.pdf(x, mu, sigma2))
plt.axvline(mu + lw2_wav, color='black')
plt.axvline(mu - lw2_wav, color='black')
print(np.sum(stats.norm.pdf(y, mu, sigma)) /
np.sum(stats.norm.pdf(y, mu, sigma2)))
plt.show()
Gauss = Gaussian1DKernel(stddev=sigma)
f_conv = convolve(f_temp, Gauss)
flux_r = spectres(wave_t, w_temp, f_conv)
if test is True:
print(sigma)
plt.plot(wave_t, flux_r)
plt.plot(w_temp, f_temp)
plt.show()
plt.clf()
if single_out is True:
return flux_r
else:
return flux_r, f_conv, w_temp
def prepare_reference(wave_r_old, flux_r_old, res_power,
stacked=False):
"""Convolves and resamples a high resolution reference spectrum onto the
target wavelength grid.
Parameters
----------
wave_r_old : Array like
The old reference wavelength array.
flux_r_old : Array like
The old reference flux array.
wave_t : Array like
The wavelength array on which the spectrum will be projected on.
res_power : Array like
The resolving power of the target spectrum.
stacked : Boolean
If True, assumes the spectrum to be a stacked spectrum or to have
a "generic" resolving power layout as a result of being convolved
with HERMES resolving power.
Returns
-------
Variable:
flux_r : Array like
The resulting reference flux array that fits the target wavelength
array.
"""
if stacked is True:
band_correction = [0.8, 0.775, 0.75, 0.83]
j = int(wave_r_old[0] / 1000 - 4)
res_power = 28000 * band_correction[j]
w_temp = wave_r_old
f_temp = flux_r_old
w_pix = w_temp[1] - w_temp[0]
sigma = w_temp[int(len(w_temp)/2)] / (2.355 * res_power * w_pix)
Gauss = Gaussian1DKernel(stddev=sigma)
f_conv = convolve(f_temp, Gauss)
return f_conv
def lineup(f_ref, f_tar, e_ref, e_tar, band=0, low_perc=False,
rv_weight=[0], Li_plot=False):
"""Lines up the two spectra by the amount of light absorpted in the
area around the line.
Parameters
----------
f_ref: Array like
A subarray with flux values around a certain line for the
reference spectrum.
f_tar: Array like
A subarray with flux values around a certain line for the
target spectrum.
e_ref: Array like
A subarray with error values around a certain line for the
reference spectrum.
e_tar: Array like
A subarray with error values around a certain line for the
target spectrum.
band : int
The band in which we want to use this algorithm.
(0: B, 1: V, 2: R, 3: IR)
low_perc : Boolean
If True, ignores the lowest 75% of flux values in the reference
spectrum and the corresponding pixel in the target.
rv_weight : Array like
Gives relative weights to all pixels. Note that the aaray length
must be the same as the length of f_ref and e_ref.
Returns
-------
A variable:
raise_tar : Array like
A number by which the target spectrum is multiplied in order
to line it up with the reference.
"""
if Li_plot is True:
plt.step(np.linspace(0, len(f_ref)-1, len(f_ref)), f_ref)
plt.step(np.linspace(0, len(f_tar)-1, len(f_tar)), f_tar)
plt.axvline(len(f_ref)/2)
plt.show()
plt.clf()
i = band
perc = 1.0
if low_perc is True:
perc = 0.25
b_coeff = [3, 3, 3, 3]
b_coeff2 = [5, 5, 5, 5]
if all(rv_weight == 0) or len(rv_weight) != len(e_tar):
rv_weight = np.ones_like(e_tar)
weight = 1 / np.square(e_tar) * rv_weight
cut_value = np.sort(f_ref)[int(len(f_ref)*(1-perc))]
f_tar = f_tar[f_ref > cut_value]
weight = weight[f_ref > cut_value]
# weight = np.ones_like(weight[f_ref > cut_value])
f_ref = f_ref[f_ref > cut_value]
sum1 = sum(f_tar * weight)
if sum1 == 0:
return False
raise_tar = sum(f_ref * weight) / sum1
m_flag = False
for j in range(4):
f_tar_new = f_tar * raise_tar
e_tar_new = e_tar * raise_tar
con = f_tar_new < np.max(
[1.05 + e_tar_new * b_coeff[i],
1.0 + e_tar_new * b_coeff2[i]])
if np.median(f_tar_new[con]) > 1.05 or m_flag is True:
con = np.bitwise_and(con, f_tar_new > np.min(
[0.9 - e_tar_new * b_coeff[i],
1.0 - e_tar_new * b_coeff2[i]]))
m_flag = True
f_ref_new = f_ref[con]
weight_new = weight[con]
f_tar_new = f_tar_new[con]
raise_tar = raise_tar * sum(f_ref_new * weight_new) / \
sum(f_tar_new * weight_new)
return raise_tar
def line_prep_plot(center_w, center_f, linew, linew_old, window,
post_resolv_w, post_resolv_f, target_resolv_w,
target_resolv_f, post_norm_w, post_norm_f, reference_norm_w,
reference_norm_f, weights, twavcon):
"""
Makes a plot for section 2 of the Lehmann et al. 2021 paper.
"""
c = const.c.to('km/s')
l1_vel = 15
l2_vel = 400
lower_bound = linew - window*1.4
upper_bound = linew + window
pre_resolv_w, pre_resolv_f = center_w, center_f
pre_norm_w, pre_norm_f = target_resolv_w, target_resolv_f
reference_EW_w, reference_EW_f, target_EW_w, target_EW_f = \
reference_norm_w, reference_norm_f, post_norm_w, post_norm_f
l1_wav = linew * l1_vel / c.value
l2_wav = linew * l2_vel / c.value
output1 = 'Paper_Prep_plot1.pdf'
output2 = 'Paper_Prep_plot2.pdf'
output3 = 'Paper_Prep_plot3.pdf'
output4 = 'Paper_Prep_plot4.pdf'
output5 = 'MCR_spectrum.pdf'
pdf1 = matplotlib.backends.backend_pdf.PdfPages(output1)
pdf2 = matplotlib.backends.backend_pdf.PdfPages(output2)
pdf3 = matplotlib.backends.backend_pdf.PdfPages(output3)
pdf4 = matplotlib.backends.backend_pdf.PdfPages(output4)
pdf5 = matplotlib.backends.backend_pdf.PdfPages(output5)
fig, ax = plt.subplots(nrows=1, ncols=1)
fig.set_size_inches(8.5, 4.5)
ax.step(center_w, center_f, lw=4, where='mid', color='blue',
label='Reference spectrum')
ax.axhline(1, color='black', ls='--', lw=4)
ax.axvline(linew, color='black', ls='-', lw=4, label='Reference Centroid')
ax.axvline(linew_old-0.02, color='black', ls='dotted', lw=4,
label='Line list wavelength')
ax.set_xlim(lower_bound, upper_bound)
ax.set_ylim(0.4, 1.05)
# ax.set_xlabel(r'\LARGE Wavelength [\AA]')
ax.set_ylabel(r'Normalized flux')
ax.legend(loc='lower left', handlelength=1.0)
ax.xaxis.set_major_locator(MultipleLocator(0.5))
ax.xaxis.set_minor_locator(AutoMinorLocator())
ax.yaxis.set_major_locator(MultipleLocator(0.2))
ax.yaxis.set_minor_locator(AutoMinorLocator())
ax.set_rasterization_zorder(-10)
fig.subplots_adjust(left=0.01, right=0.99, bottom=0.02, top=0.98,
wspace=0.0, hspace=0.0)
pdf1.savefig(fig, bbox_inches='tight', pad_inches=0.02)
pdf1.close()
ax.clear()
ax.step(pre_resolv_w, pre_resolv_f, lw=4, where='mid', color='blue',
label='Unconvolved reference')
ax.step(post_resolv_w, post_resolv_f, lw=4, where='mid', color='purple',
label='Convolved reference')
ax.step(target_resolv_w, target_resolv_f, lw=4, where='mid', color='red',
label='HERMES target')
ax.axhline(1, color='black', ls='--', lw=4)
ax.axvline(linew, color='black', ls='-', lw=4)
ax.set_xlim(lower_bound, upper_bound)
ax.set_ylim(0.4, 1.05)
ax.set_xlabel(r'\LARGE Wavelength [\AA]')
ax.set_ylabel(r'Normalized flux')
ax.legend(loc='lower left', handlelength=1.0)
ax.xaxis.set_major_locator(MultipleLocator(0.5))
ax.xaxis.set_minor_locator(AutoMinorLocator())
ax.yaxis.set_major_locator(MultipleLocator(0.2))
ax.yaxis.set_minor_locator(AutoMinorLocator())
ax.set_rasterization_zorder(-10)
fig.subplots_adjust(left=0.01, right=0.99, bottom=0.02, top=0.98,
wspace=0.0, hspace=0.0)
pdf2.savefig(fig, bbox_inches='tight', pad_inches=0.02)
pdf2.close()
ax.clear()
ax.step(pre_norm_w, pre_norm_f-0.1, lw=4, where='mid', color='red',
label='Pre-norm target')
ax.step(post_norm_w, post_norm_f, lw=4, where='mid', color='orange',
label='Post-norm target')
ax.step(reference_norm_w, reference_norm_f, lw=4, where='mid',
color='purple', label='Reference')
ax.axhline(1, color='black', ls='--', lw=4)
ax.axvline(linew, color='black', ls='-', lw=4)
ax.set_xlim(linew - l2_wav, linew + l2_wav)
ax.set_ylim(0.4, 1.05)
# ax.set_xlabel(r'wavelength [\AA]')
ax.set_ylabel(r'Normalized flux')
ax.legend(loc='lower left', handlelength=1.0)
ax.xaxis.set_major_locator(MultipleLocator(5))
ax.xaxis.set_minor_locator(AutoMinorLocator())
ax.yaxis.set_major_locator(MultipleLocator(0.2))
ax.yaxis.set_minor_locator(AutoMinorLocator())
ax.set_rasterization_zorder(-10)
fig.subplots_adjust(left=0.01, right=0.99, bottom=0.02, top=0.98,
wspace=0.0, hspace=0.0)
pdf3.savefig(fig, bbox_inches='tight', pad_inches=0.02)
pdf3.close()
ax.clear()
ax.step(reference_EW_w, reference_EW_f, lw=4, where='mid', color='purple',
label='Reference')
ax.step(target_EW_w, target_EW_f, lw=4, where='mid', color='orange',
label='HERMES target')
ax.step(target_EW_w[twavcon], weights/max(weights)/3 + 0.39, lw=4,
where='mid', label="Weights")
ax.axhline(1, color='black', ls='--', lw=4)
ax.set_xlim(lower_bound, upper_bound)
ax.axvline(linew, color='black', ls='-', lw=4)
ax.axvline(linew - l1_wav, color='red', ls='--', lw=4)
ax.axvline(linew + l1_wav, color='red', ls='--', lw=4)
ax.set_ylim(0.4, 1.05)
ax.set_xlabel(r'Wavelength [\AA]')
ax.set_ylabel(r'Normalized flux')
ax.legend(loc='lower left', handlelength=1.0)
ax.xaxis.set_major_locator(MultipleLocator(0.5))
ax.xaxis.set_minor_locator(AutoMinorLocator())
ax.yaxis.set_major_locator(MultipleLocator(0.2))
ax.yaxis.set_minor_locator(AutoMinorLocator())
ax.set_rasterization_zorder(-10)
fig.subplots_adjust(left=0.01, right=0.99, bottom=0.02, top=0.98,
wspace=0.0, hspace=0.0)
pdf4.savefig(fig, bbox_inches='tight', pad_inches=0.02)
pdf4.close()
ax.clear()
ax.step(pre_resolv_w, pre_resolv_f, lw=2.5, where='mid',
label='R>100,000')
ax.step(post_resolv_w, post_resolv_f, lw=2.5, where='mid',
label='R$\sim$28,000')
ax.axhline(1, color='black', ls='--', lw=2.5)
ax.set_xlim(linew - l2_wav-0.7, linew + l2_wav-8)
ax.set_ylim(0.4, 1.05)
ax.set_xlabel(r'Wavelength [\AA]')
ax.set_ylabel(r'Normalized flux')
ax.legend(loc='lower left', handlelength=1.0)
ax.xaxis.set_major_locator(MultipleLocator(4))
ax.xaxis.set_minor_locator(AutoMinorLocator())
ax.yaxis.set_major_locator(MultipleLocator(0.2))
ax.yaxis.set_minor_locator(AutoMinorLocator())
ax.set_rasterization_zorder(-10)
fig.subplots_adjust(left=0.01, right=0.99, bottom=0.02, top=0.98,
wspace=0.0, hspace=0.0)
pdf5.savefig(fig, bbox_inches='tight', pad_inches=0.02)
pdf5.close()
ax.clear()
def measure_EW(w, f, err, weight, line_wid):
"""Uses the weight and the given pixel to measure the EW
Parameters
----------
w : Array like
A subarray with wavelength values around a certain
line for the reference spectrum.
f : Array like
A subarray with flux values around a certain
line for the reference spectrum.
err: Array like
The flux error array
weight : Array like
A subarray with weights for flux values around
a certain line for the target spectrum.
line_wid : float
Width of the absorption line window in which to measure the EW.
Returns
-------
Two variable:
EW : float
The equivalent width of the line.
EW_sig : float
The uncertanty of the equivalent width.
"""
absorb = np.subtract(1.0, f)
abs_bar = np.sum(np.multiply(absorb, weight)) / np.sum(weight)
sig = np.sqrt(np.sum(np.multiply(np.square(weight), np.square(err)))
/ np.square(np.sum(weight)))
EW = abs_bar * line_wid * 1000
EWs = sig * line_wid * 1000
return EW, EWs
def prepare_target_wavelegnth(wave_new, wave_old, flux):
"""Re-samples a spectrum with a high pixelwidth to a spectrum with low
pixelwidth. This will not increase the accuracy of the spectrum.
Parameters
----------
wave_new : Array like
The new wavelength array on which the flux is projected.
wave_old : Array like
The old wavelenght array for the flux.
flux : Array like
The flux array of the spectrum.
Returns
-------
Variable:
flux_new : Array like
The resulting flux array for the wave_new wavelength array.
"""
flux_new = []
wav_disp_new = (wave_new[-1] - wave_new[0]) / len(wave_new)
wav_disp_old = (wave_old[-1] - wave_old[0]) / len(wave_old)
if wav_disp_new > wav_disp_old:
print('Error: Old wavelegnth array is finer than the new one.')
return 1
if wave_new[0] < wave_old[0] or wave_new[-1] > wave_old[-1]:
print('Error: New wavelength array must be contained '
+ 'within the old one.')
return 1
for w_new in wave_new:
i = np.argwhere(np.array(wave_old) < w_new)[-1][0]
dist1, dist2 = w_new - wave_old[i], wave_old[i+1] - w_new
w1, w2 = 1 - (dist1 / wav_disp_old), 1 - (dist2 / wav_disp_old)
flux_new = np.append(flux_new, flux[i] * w1 + flux[i+1] * w2)
return flux_new
def readlinelistw(linelistfile):
"""Read a linelist and return the lines Wavelenghts, element, ionisation
and excitation potential.
Parameters
----------
linelistfile : str or Path object
A path to a linelist file to be read.
Returns
-------
Four seperate variables:
w : Numpy array
The wavelength values of all the lines.
elem : Numpy array
The element type of the corresponding lines.
ion : Numpy array
The ionisation of the corresponding lines.
ep : Numpy array
The excitation potential of the corresponding lines.
"""
# print(linelistfile)
with open(linelistfile) as linelist:
lines = linelist.readlines()
del_index = []
w = np.zeros_like(lines, dtype=float)
elem = np.chararray((len(lines),), unicode=True, itemsize=6)
ion = np.ones_like(lines, dtype=int)
ep = np.zeros_like(lines, dtype=float)
for i, line in enumerate(lines):
if line.startswith(';') or line.startswith('#'):
del_index.append(i)
continue
line = line.replace("|", " ")
w[i] = line.split()[1]
elem[i] = line.split()[0]
if all(ws <= 3 for ws in w):
del_index = []
ion = np.array(w)
for i, line in enumerate(lines):
if line.startswith(';') or line.startswith('#'):
del_index.append(i)
continue
line = line.replace("|", " ")
w[i] = float(line.split()[2])
if len(line.split()) > 3:
ep[i] = float(line.split()[3])
w = np.delete(w, del_index)
elem = np.delete(elem, del_index)
ion = np.delete(ion, del_index)
ep = np.delete(ep, del_index)
return w, elem, ion, ep
def read_ap_correct(w, elem, ion, ap_number, callibration, ap_weight, band=0):
"""Read the correction file for lines and return the correction values.
Parameters
----------
w: array-like
Wavelength of the lines.
elem: array like
Element of the lines.
ion: array-like
Ionization of the lines.
ap_numper : int or array like
The aperture number in which the spectrum was observed. Can be array
if spectrum is observed over multiple nights in different apertures.
callibration : str or Path object
The file in which the aperture scaling corrections are stored.
ap_weight : array like
Contains the weights for each contributing aperture.
Normally this is weighted with the SNR of each spectrum for the
combination process.
Returns
-------
ap_correct: Numpy array
Correction for each line. The array is in the same structure as the
line arrays.
"""
b_ex = [[4000, 8000], [4000, 5000], [5000, 6000], [6000, 7000],
[7000, 8000]]
ap_correct = np.ones_like(w)
ap_correct2 = [[]] * len(ap_number)
if ap_number[0] == 0:
return ap_correct
else:
for j in range(len(ap_number)):
with open(callibration + 'Resolving_map_correction/aperture' +
str(ap_number[j]) + '_correct.dat') as corr_file:
lines = corr_file.readlines()
if j == 0:
w2 = np.zeros_like(lines, dtype=float)
elem2 = np.chararray((len(lines),), unicode=True, itemsize=6)
ion2 = np.ones_like(lines, dtype=int)
ap_correct2[j] = np.ones_like(lines, dtype=float)
for i, line in enumerate(lines):
if line.startswith(';') or line.startswith('#'):
continue
if float(line.split()[2]) < b_ex[band][0] \
or float(line.split()[2]) > b_ex[band][1]:
continue
if j == 0:
elem2[i] = line.split()[0]
ion2[i] = int(line.split()[1])
w2[i] = float(line.split()[2])
ap_correct2[j][i] = float(line.split()[3]) * ap_weight[j]
ap_correct3 = np.sum(ap_correct2, axis=0)
for i in range(len(w)):
try:
corr_index = np.where(np.bitwise_and(np.bitwise_and(
elem2 == elem[i], ion2 == ion[i]), w2 == w[i]))[0][0]
ap_correct[i] = ap_correct3[corr_index]
except IndexError:
ap_correct[i] = 1.0
return ap_correct
def combine_ap(combine_file, spec_name):
"""Find all apertures used in HERMES to combine a resulting aperture array.
The weights of the spectrum are given by the SNR^2.
Parameters
----------
combine_file: str or Path object
The name of the file which contains the neccessary information.
Normally kept in the calibration folder.
spec_name: str or Path object
The identifier used in the table for this target
Returns
-------
ap_array: Numpy array
All aperture numbers that participated in the combined spectrum.
weights: Numpy array
The weight of all apertures that are part of the observation.
"""
aperture_array = []
snr_array = []
with open(combine_file, 'r') as comb:
lines = comb.readlines()
for line in lines:
if line.startswith('Name'):
continue
if line.startswith(spec_name):
aperture_array = np.array(line.split(',')[3::5])
snr_array = np.array(line.split(',')[5::5])
snr_array = np.array(snr_array[aperture_array != 'NaN'],
dtype='float')
aperture_array = np.array(
np.array(aperture_array[aperture_array != 'NaN'],
dtype='float'), dtype='int')
if len(aperture_array) == 0:
return [], []
weight_array = np.square(snr_array) / np.sum(np.square(snr_array))
for i in range(len(aperture_array)):
if len(aperture_array) < i+1:
break
indic = np.argwhere(aperture_array == aperture_array[i])
weight_array[i] = np.sum(weight_array[indic])
weight_array = np.delete(weight_array, indic[1:])
aperture_array = np.delete(aperture_array, indic[1:])
aperture_array = np.where([a == 0 for a in aperture_array],
1, aperture_array)
return aperture_array, weight_array
def rHERMES(FITSfile, datahdu=0, SN=False, e_hdu=1, plot_sky=False):
"""Read a HERMES FITS file and returns data information.
Parameters
----------
FITSfile : str or Path object
A path to a HARPS FITS file to be read.
datahdu : int
Decides which data hdulist to read the data from
0 is Flux, 4 is normalized Flux in HERMES spectra.
Returns
-------
dict
A dictionary containing the following key-value pairs:
w : Numpy array
The wavelength array.
f : Numpy array
The flux array.
e : Numpy array
A zero array to be changed later
"""
result = {}
if FITSfile.endswith('.fits'):
with fits.open(FITSfile) as hdulist:
header0 = hdulist[0].header
f = hdulist[datahdu].data
unnorm_f = hdulist[0].data
sky_f = hdulist[2].data
e = hdulist[e_hdu].data
cdelta1 = header0['CDELT1']
crval1 = header0['CRVAL1']
rv_weight = np.ones_like(f)
for i in range(len(sky_f)):
if sky_f[i] < 0:
rv_weight[i] = 0
# create wavelength and error (only 0 values by this point) array
w = np.linspace(0, len(f) - 1, len(f)) * cdelta1 + crval1
# If we want to use the normalized spectrum, we should use a
# normalized error
# if datahdu == 4:
# e = np.divide(np.multiply(e, f), hdulist[0].data)
# write array on output
result['w'] = w
result['f'] = f
result['e'] = e
result['disp'] = w[1] - w[0]
result['rv_weight'] = rv_weight
if SN and 'SNR' in header0:
SNR = header0['SNR']
result['SNR'] = SNR
else:
result['SNR'] = 1000
if plot_sky is True:
print(np.subtract(sky_f, unnorm_f))
fig, ax = plt.subplots(nrows=1, ncols=1)
fig.set_size_inches(8.5, 4.5)
ax.step(w, sky_f, lw=2, where='mid', color='blue',
label='with sky')
ax.step(w, unnorm_f, lw=2, where='mid', color='red',
label='without sky')
ax.axhline(1, color='black', ls='--', lw=4)
ax.set_xlabel(r'\LARGE Wavelength [\AA]')
ax.set_ylabel(r'Normalized flux')
ax.legend(loc='lower left', handlelength=1.0)
ax.set_rasterization_zorder(-10)
plt.show()
return result
else:
wave = np.array([], dtype='float64')
flux = np.array([], dtype='float64')
with open(FITSfile) as data:
for line in data:
if line.startswith('#'):
continue
wave = np.append(wave, float(line.split(',')[0]))
flux = np.abs(np.append(flux, float(line.split(',')[1])))
result['w'] = wave
result['f'] = flux
result['e'] = np.absolute(np.divide(np.power(flux, 0.4), 1000))
result['SNR'] = 1000
return result
def r_resolving_map(FITSfile, ap_number, warn_flag=True, print_max_diff=False,
weight=[1]):
"""Read a HERMES FITS file containing a resolving power map as written in
Kos et al 2016.
Parameters
----------
FITSfile : str or Path object
A path to the FITS file to be read.
ap_number: int
Number of aparature used for the target spectrum.
Returns
-------
dict
A dictionary containing the following key-value pairs:
w : Numpy array
The wavelength array.
R : Numpy array
Array of resolving powers in all querried lines.
"""
R = [[]] * len(ap_number)
with fits.open(FITSfile, mode='readonly') as hdulist:
if print_max_diff is True:
stuff = hdulist[0].data[hdulist[0].data > 0]
print(len(stuff) / len(hdulist[0].data[0]))
stuff = np.reshape(stuff, (int(len(stuff) /
len(hdulist[0].data[0])),
len(hdulist[0].data[0])))
print(np.amax(np.subtract(np.amax(stuff, axis=0),
np.amin(stuff, axis=0))))
for i in range(len(ap_number)):
header0 = hdulist[0].header
R[i] = np.multiply(hdulist[0].data[ap_number[i]], weight[i])
for r in R[i]:
if (r < 10000 and len(ap_number) == 1) or r == 0:
if warn_flag is True:
print('Warning: Aperture does not contain resolving' +
' power.')
R[i] = np.multiply(hdulist[0].data[ap_number[i]+1],
weight[i])
break
R_full = np.sum(R, axis=0)
cdelta1 = header0['CDELT1']
crval1 = header0['CRVAL1']
wav = np.linspace(0, len(R_full) - 1, len(R_full)) * cdelta1 + crval1
return {'w': wav, 'R': R_full}
def rHARPS(FITSfile, obj=False, wavelenmin=False, date_obs=False,
spec_bin=False, med_snr=False, hdnum=False, radvel=False,
coeffs=False, SN=False):
"""Read a HARPS ADP FITS file and return a dictionary of information.
Parameters
----------
FITSfile : str or Path object
A path to a HARPS FITS file to be read.
obj : bool, Default: False
If *True*, the output will contain the contents of the OBJECT FITS
header card.
wavelenmin : bool, Default: False
If *True*, the output will contain the contents of the WAVELMIN FITS
header card.
date_obs : bool, Default: False
If *True*, the output will contain the contents of the DATE-OBS FITS
header card.
spec_bin : bool, Default: False
If *True*, the output will contain the contents of the SPEC_BIN FITS
header card.
med_snr : bool, Default: False
If *True*, the output will contain the contents of the SNR FITS header
card.
hdnum : bool, Default: False
If *True*, the output will contain the contents of the custom-added
HDNUM FITS header card. (Added to unify object identifiers across all
stars, some of which were occasionally identified by things other than
HD number.)
radvel : bool, Default: False
If *True*, the output will contain the contents of the custom-added
RADVEL FITS header card. (Added to unify the radial velocity for each
star, as a small minority of stars had different radial velocity
information in their HIERARCH ESO TEL TAFG RADVEL header cards.)
coeffs : bool, Default: False
If *True*, the output will contain the contents of the various
*ESO DRS CAL TH COEFF LLX* header cards, where *X* ranges from 0 to
287.
Returns
-------
dict
A dictionary containing the following key-value pairs:
w : Numpy array
The wavelength array.
f : Numpy array
The flux array.
e : Numpy array
The estimated error array (HARPS returns no error array by
default).
Optionally
==========
obj : str
The object name from the 'OBJECT' flag.
wlmin : float
The minimum wavelength.
date_obs : datetime object
The date the file was observed.
spec_bin : float
The wavelength bin size.
med_snr : float
The median SNR of the flux array.
hd_num : str
The HD identifier of the star in the format "HDxxxxxx".
radvel : float
The radial velocity of the star in km/s.
If the `coeffs` keyword argument is *True*, there will be 288 entries
of the form "ESO DRS CAL TH COEFF LLX": *value*, where X will range
from 0 to 287.
"""
result = {}
try:
with fits.open(FITSfile) as hdulist:
try:
header0 = hdulist[0].header
header1 = hdulist[1].header
data = hdulist[1].data
w = data.WAVE[0]
gain = header0['GAIN']
# Multiply by the gain to convert from ADUs to photoelectrons
f = data.FLUX[0] * gain
e = 1.e6 * np.absolute(f)
result['w'] = w
result['f'] = f
result['e'] = e
if obj:
result['obj'] = header1['OBJECT']
if wavelenmin:
result['wavelmin'] = header0['WAVELMIN']
if date_obs:
result['date_obs'] = dt.datetime.strptime(
header0['DATE-OBS'], '%Y-%m-%dT%H:%M:%S.%f')
if spec_bin:
result['spec_bin'] = header0['SPEC_BIN']
if med_snr:
result['med_snr'] = header0['SNR']
if hdnum:
result['hdnum'] = header0['HDNUM']
if radvel:
result['radvel'] = header0['RADVEL']
if SN:
SNR = []
for i in range(72):
card = 'HIERARCH ESO DRS SPE EXT SN' + str(i)
SNR.append(header0[card])
result['SN'] = SNR
# If the coeffs keyword is given, returna all
# 288 wavelength solution coefficients.
if coeffs:
for i in range(0, 288, 1):
key_string = 'ESO DRS CAL TH COEFF LL{0}'.format(
str(i))
result[key_string] = header0[key_string]
return result
except:
result['HAR'] = 1
header0 = hdulist[0].header
header1 = hdulist[1].header
data = hdulist[1].data
w = [1/x[0]*100000000 for x in np.flip(data)]
# Multiply by the gain to convert from ADUs to photoelectrons
f = [x[1] for x in np.flip(data)]
result['w'] = w
result['f'] = f
result['e'] = np.divide(np.ones_like(w), 1000)
if obj:
result['obj'] = header1['OBJECT']
if wavelenmin:
result['wavelmin'] = header0['WAVELMIN']
if date_obs:
result['date_obs'] = dt.datetime.strptime(
header0['DATE-OBS'], '%Y-%m-%dT%H:%M:%S.%f')
if spec_bin:
result['spec_bin'] = header0['SPEC_BIN']
if med_snr:
result['med_snr'] = header0['SNR']
if hdnum:
result['hdnum'] = header0['HDNUM']
if radvel:
result['radvel'] = header0['RADVEL']
# if SN:
# SNR = []
# for i in range(72):
# card = 'HIERARCH ESO DRS SPE EXT SN' + str(i)
# SNR.append(header0[card])
# result['SN'] = SNR
# If the coeffs keyword is given, returna all 288 wavelength solution
# coefficients.
if coeffs:
for i in range(0, 288, 1):
key_string = 'ESO DRS CAL TH COEFF LL{0}'.format(
str(i))
result[key_string] = header0[key_string]
return result
except OSError:
with open(FITSfile) as ascii_table:
w_line = ascii_table.readline()
f_line = ascii_table.readline()
w = [float(x) for x in w_line.split(',')]
f = [float(x) for x in f_line.split(',')]
result['w'] = w
result['f'] = f
result['e'] = np.absolute(np.divide(np.power(f, 0.4), 1000))
return result
def rflatHARPS(FITSfile, obj=False, wavelenmin=False, date_obs=False,
spec_bin=False, med_snr=False, hdnum=False, radvel=False,
coeffs=False, SN=False):
"""Read a HARPS ADP FITS file and return a dictionary of information.
Parameters
----------
FITSfile : str or Path object
A path to a HARPS FITS file to be read.
obj : bool, Default: False
If *True*, the output will contain the contents of the OBJECT FITS
header card.
wavelenmin : bool, Default: False
If *True*, the output will contain the contents of the WAVELMIN FITS
header card.
date_obs : bool, Default: False
If *True*, the output will contain the contents of the DATE-OBS FITS
header card.
spec_bin : bool, Default: False
If *True*, the output will contain the contents of the SPEC_BIN FITS
header card.
med_snr : bool, Default: False
If *True*, the output will contain the contents of the SNR FITS header
card.
hdnum : bool, Default: False
If *True*, the output will contain the contents of the custom-added
HDNUM FITS header card. (Added to unify object identifiers across all
stars, some of which were occasionally identified by things other than
HD number.)
radvel : bool, Default: False
If *True*, the output will contain the contents of the custom-added
RADVEL FITS header card. (Added to unify the radial velocity for each
star, as a small minority of stars had different radial velocity
information in their HIERARCH ESO TEL TAFG RADVEL header cards.)
coeffs : bool, Default: False
If *True*, the output will contain the contents of the various
*ESO DRS CAL TH COEFF LLX* header cards, where *X* ranges from 0 to
287.
Returns
-------
dict
A dictionary containing the following key-value pairs:
w : Numpy array
The wavelength array.
f : Numpy array
The flux array.
e : Numpy array
The estimated error array (HARPS returns no error array by
default).
Optionally
==========
obj : str
The object name from the 'OBJECT' flag.
wlmin : float
The minimum wavelength.
date_obs : datetime object
The date the file was observed.
spec_bin : float
The wavelength bin size.
med_snr : float
The median SNR of the flux array.
hd_num : str
The HD identifier of the star in the format "HDxxxxxx".
radvel : float
The radial velocity of the star in km/s.
If the `coeffs` keyword argument is *True*, there will be 288 entries
of the form "ESO DRS CAL TH COEFF LLX": *value*, where X will range
from 0 to 287.
"""
result = {}
with fits.open(FITSfile) as hdulist:
header0 = hdulist[0].header
f = hdulist[0].data
cdelta1 = header0['CDELT1']
crval1 = header0['CRVAL1']
w = np.linspace(0, len(f), len(f)) * cdelta1 + crval1
e = np.zeros(len(f))
# Construct an error array by taking the square root of each flux value
if SN:
SNR = []
for i in range(72):
card = 'HIERARCH ESO DRS SPE EXT SN' + str(i)
SNR.append(header0[card])
result['SN'] = SNR
result['w'] = w
result['f'] = f
result['e'] = e
return result
def HAR2HER(spec, specres, pixelw, band_cor=True):
if max(spec['w']) < 7885.0027:
boun = [[4713.5737, 4901.3360], [5649.1206, 5872.0078],
[6478.3989, 6736.1442]]
npix = 4096 # number of pixels per band
w = np.array(spec['w'])
f = np.array(spec['f'])
wreduced = [[], [], []]
wspec = [[], [], [], []]
freduced = [[], [], []]
avpix = np.ones(3)
sigma = np.ones(3)
if band_cor is True:
band_correction = [0.8, 0.775, 0.73]
else:
band_correction = [1., 1., 1.]
for i in range(3):
wreduced[i] = w[((boun[i][0] - 50) < w) & (w < (boun[i][1] + 50))]
freduced[i] = f[((boun[i][0] - 50) < w) & (w < (boun[i][1] + 50))]
aver = np.zeros(len(wreduced[i])-1)
for j in range(len(wreduced[i])-1):
aver[j] = wreduced[i][j+1] - wreduced[i][j]
avpix[i] = np.average(aver)
minus_pix = int(10.0/avpix[i])
npixold = len(wreduced[i]) - minus_pix*2
wspec[i] = np.linspace(boun[i][0]-40, boun[i][1]+40, num=npixold)
freduced[i] = spectres(wspec[i], wreduced[i], freduced[i])
wreduced[i] = wspec[i]
avpix[i] = wreduced[i][1] - wreduced[i][0]
# Convolving the flux with gaussians (smooth them out)
# Calculate for each band the sigma (from HERMES) and Gaussian
for j in range(3):
sigma[j] = (boun[j][1] + boun[j][0]) / \
(2.0 * 2.355 * specres * band_correction[j] * avpix[j])
# For a broadened spectrum use the factor 2.25
Gauss = Gaussian1DKernel(stddev=sigma[j])
# Convolve the flux with the Gaussian to "blur it out"
freduced[j] = convolve(freduced[j], Gauss)
wnew = [[], [], []]
for j in range(3):
wnew[j] = np.linspace(boun[j][0], boun[j][1], num=npix)
enew = [np.zeros(npix), np.zeros(npix), np.zeros(npix)]
fnew = [[], [], []]
for i in range(3):
try:
fnew[i] = spectres(wnew[i], wreduced[i], freduced[i])
except ValueError:
return 0
norm_flux = np.zeros_like(fnew)
return {'w': wnew, 'f': fnew, 'e': enew, 'norm_f': norm_flux}
else:
boun = [[4713.5737, 4901.3360], [5649.1206, 5872.0078],
[6478.3989, 6736.1442], [7585.0026, 7885.0027]]
npix = 4096 # number of pixels per band
w = np.array(spec['w'])
f = np.array(spec['f'])
wreduced = [[], [], [], []]
wspec = [[], [], [], []]
freduced = [[], [], [], []]
avpix = np.ones(4)
sigma = np.ones(4)
if band_cor is True:
band_correction = [0.8, 0.775, 0.75, 0.83]
else:
band_correction = [1., 1., 1., 1.]
for i in range(4):
wreduced[i] = w[((boun[i][0] - 50) < w) & (w < (boun[i][1] + 50))]
freduced[i] = f[((boun[i][0] - 50) < w) & (w < (boun[i][1] + 50))]
aver = np.zeros(len(wreduced[i])-1)
for j in range(len(wreduced[i])-1):
aver[j] = wreduced[i][j+1] - wreduced[i][j]
avpix[i] = np.average(aver)
minus_pix = int(10.0/avpix[i])
npixold = len(wreduced[i]) - minus_pix*2
wspec[i] = np.linspace(boun[i][0]-40, boun[i][1]+40, num=npixold)
freduced[i] = spectres(wspec[i], wreduced[i], freduced[i])
wreduced[i] = wspec[i]
avpix[i] = wreduced[i][1] - wreduced[i][0]
# convolving the flux with gaussians (smooth them out)
# Calculate for each band the sigma (from HERMES) and Gaussian
for j in range(4):
sigma[j] = (boun[j][1] + boun[j][0]) / \
(2.0 * 2.355 * specres * band_correction[j] * avpix[j])
# For a broadened spectrum use the factor 2.25
Gauss = Gaussian1DKernel(stddev=sigma[j])
# convolve the flux with the Gaussian to "blur it out"
freduced[j] = convolve(freduced[j], Gauss)
wnew = [[], [], [], []]
for j in range(4):
wnew[j] = np.linspace(boun[j][0], boun[j][1], num=npix)
enew = [np.zeros(npix), np.zeros(npix), np.zeros(npix), np.zeros(npix)]
fnew = [[], [], [], []]
for i in range(4):
try:
fnew[i] = spectres(wnew[i], wreduced[i], freduced[i])
except ValueError:
return 0
norm_flux = np.zeros_like(fnew)
return {'w': wnew, 'f': fnew, 'e': enew, 'norm_f': norm_flux}
def HAR2HER2(spec):
boun = [[4713.5737, 4901.3360], [5649.1206, 5872.0078],
[6478.3989, 6736.1442], [7585.0026, 7885.0027]]
buf = [[5.5, 5.7, 11.2], [6.6, 6.9, 13.5], [7.6, 7.9, 15.5],
[8.9, 9.2, 18.1]]
w = np.array(spec['w'])
f = np.array(spec['f'])
wreduced = [[], [], [], []]
freduced = [[], [], [], []]
wnew = [[], [], [], []]
fnew = [[], [], [], []]
enew = [[], [], [], []]
if spec['w'][-1] < 7500:
bands = 3
else:
bands = 4
for i in range(bands):
wreduced[i] = w[((boun[i][0] - buf[i][0] - 1) < w) &
(w < (boun[i][1] + buf[i][1] + 1))]
freduced[i] = f[((boun[i][0] - buf[i][0] - 1) < w) &
(w < (boun[i][1] + buf[i][1] + 1))]
pix_diff = np.array([])
for j in range(len(wreduced)-1):
pix_diff = np.append(pix_diff, wreduced[i][j+1] - wreduced[i][j])
step_size = np.max(pix_diff)
npix = int((boun[i][1]+buf[i][2] - boun[i][0]) / step_size)
wnew[i] = np.linspace(boun[i][0]-buf[i][0], boun[i][1]+buf[i][1],
num=npix)
fnew[i] = spectres(wnew[i], wreduced[i], freduced[i])
enew[i] = np.zeros_like(fnew[i])
norm_flux = np.zeros_like(fnew)
return {'w': wnew, 'f': fnew, 'e': enew, 'norm_f': norm_flux,
'norm_f2': norm_flux, 'e2': enew}
def rHERMES_prep(FITSfile, datahdu=0, SN=False):
"""Read a HERMES FITS file and returns data information.
Parameters
----------
FITSfile : str or Path object
A path to a HARPS FITS file to be read.
datahdu : int
Decides which data hdulist to read the data from
0 is Flux, 4 is normalized Flux in HERMES spectra.
Returns
-------
dict
A dictionary containing the following key-value pairs:
w : Numpy array
The wavelength array.
f : Numpy array
The flux array.
e : Numpy array
A zero array to be changed later
"""
result = {}
if FITSfile.endswith('.fits'):
with fits.open(FITSfile) as hdulist:
header0 = hdulist[0].header
f = hdulist[datahdu].data
sky_f = hdulist[2].data
try:
norm_f = hdulist[4].data
except IndexError:
norm_f = np.ones_like(f)
e = hdulist[1].data
cdelta1 = header0['CDELT1']
crval1 = header0['CRVAL1']
# create wavelength and error (only 0 values by this point) array
w = np.linspace(0, len(f) - 1, len(f)) * cdelta1 + crval1
# If we want to use the normalized spectrum, we should use a
# normalized error
# if datahdu == 4:
# e = np.divide(np.multiply(e, f), hdulist[0].data)
# write array on output
result['w'] = w
result['f'] = f
result['sky_f'] = sky_f
result['e'] = e
result['norm_f'] = norm_f
# print(e)
result['disp'] = w[1] - w[0]
if SN and 'SNR' in header0:
SNR = header0['SNR']
result['SNR'] = SNR
else:
result['SNR'] = 1000
return result
else:
wave = np.array([], dtype='float64')
flux = np.array([], dtype='float64')
with open(FITSfile) as data:
for line in data:
if line.startswith('#'):
continue
wave = np.append(wave, float(line.split(',')[0]))
flux = np.abs(np.append(flux, float(line.split(',')[1])))
result['w'] = wave
result['f'] = flux
result['e'] = np.absolute(np.divide(np.power(flux, 0.4), 1000))
result['SNR'] = 1000
return result
def get_spec_wave(fname):
hdulist = fits.open(fname)
hdu = hdulist[0]
crpix = hdu.header['CRPIX1']-1.0
crval = hdu.header['CRVAL1']
cdelt = hdu.header['CDELT1']
spec_wave = np.float32(np.arange(4096))
for i in np.arange(4096):
spec_wave[i] = (spec_wave[i]-crpix)*cdelt+crval
print(spec_wave)
return spec_wave
def read_unreduced(unred_spec):
fitsspec = {}
spec_wave = get_spec_wave(unred_spec)
hdulist = fits.open(unred_spec)
flux = hdulist[0].data
err = np.sqrt(np.abs(hdulist[1].data))
sky_f = hdulist[5].data
name = hdulist[2].data['NAME']
err = np.array(err)
# err that is nan or smaller than 0 is unacceptable,
# so they are set to the max error
for j in range(len(err)):
err[j][err[j] != err[j]] = np.ones_like(err[j][err[j] != err[j]]) * \
np.max(err[j][err[j] == err[j]])
err[j][err[j] <= 0] = np.ones_like(err[j][err[j] <= 0]) * \
np.max(err[j])
fitsspec['w'] = spec_wave
fitsspec['f'] = flux
fitsspec['e'] = err
fitsspec['sky_f'] = sky_f
fitsspec['norm_sky_f'] = np.zeros_like(sky_f)
fitsspec['disp'] = spec_wave[1] - spec_wave[0]
fitsspec['name'] = name
fitsspec['norm_f'] = np.zeros_like(fitsspec['f'])
fitsspec['band'] = int(spec_wave[0] / 1000) - 3
return fitsspec
def make_cuts(array, n_parts):
"""
Cuts a given `array` up into `n_parts` equal parts, while making sure that
each part contains half of the previous and half of the next part (unless
it is the first or last part).
Parameters
----------
array : array_like
The array that needs to be cut into `n_parts`.
n_parts : int
The number of parts that `array` needs to be cut into.
Returns
-------
parts : list of :obj:`~numpy.ndarray` objects
List containing the created parts.
Example
-------
Dividing a sequence of six integers up into 5 parts:
>>> array = [0, 1, 2, 3, 4, 5]
>>> make_cuts(array, 5)
[array([0., 1.]),
array([1., 2.]),
array([2., 3.]),
array([3., 4.]),
array([4., 5.])]
"""
# Make sure that array is a NumPy array
array = np.asarray(array)
# Calculate the number of elements in array
len_tot = np.shape(array)[0]
# Determine number of cuts
n_cuts = int(np.ceil(n_parts/2))
# Determine the stepsize of all boundaries
step_size = len_tot/n_cuts/2
# Calculate the lengths of all bins
bins = np.array(np.ceil(np.arange(0, len_tot+1, step_size)), dtype=int)
# Divide array up into n_parts parts, using two sequential bins each time
parts = [array[i:j] for i, j in zip(bins[:-2], bins[2:])]
# Return parts
return(parts)
def norm_flux(w, f, res_w, e=[], rej=0.75, l_order=3, Kurucz=False):
f1, f2, f3, f4, w1, w2, w3, index1, index2 = [], [], [], [], [], [], [],\
[], []
# plt.step(w, f)
# plt.axhline(0)
# plt.show()
iterations = 0
if all(ws > 4500 for ws in w) and all(ws < 5000 for ws in w):
sd_correct = 3
else:
sd_correct = 1
delete_con = np.argwhere([4850.0 <= ws <= 4875.0 or 6549.0 <= ws <=
6581.0 or 7587 <= ws <= 7689 for ws in w])
if Kurucz is True:
delete_con = np.argwhere([4855.0 <= ws <= 4870.0 or 6550.0 <= ws <=
6575.0 or 7587 <= ws <= 7689 for ws in w])
w1 = np.delete(w, delete_con)
f1 = np.delete(f, delete_con)
delete_con2 = np.argwhere([fs <= 0.1 for fs in f1])
# print(len(delete_con2), len(f1))
if len(f1)*0.1 <= len(delete_con2):
print('Warning: Flux values of a part of the spectrum is below 0')
return [-1], [-1]
w1 = np.delete(w1, delete_con2)
f1 = np.delete(f1, delete_con2)
med_f = np.median(f1)
if len(e) != 0:
e1 = np.delete(e, delete_con)
e1 = np.delete(e1, delete_con2)
e1 = np.delete(e1, np.argwhere([fs > 2.5*med_f for fs in f1]))
med_e = np.median(e1)
e1 = np.where([e < med_e/3 for e in e1], med_e*1000, e1)
e3 = e1
w1 = np.delete(w1, np.argwhere([fs > 2.5*med_f for fs in f1]))
f1 = np.delete(f1, np.argwhere([fs > 2.5*med_f for fs in f1]))
f3 = np.array(f1)
w3 = np.array(w1)
w11 = w1[:int(len(w1)/2)]
w12 = w1[int(len(w1)/2):]
f11 = f1[:int(len(w1)/2)]
f12 = f1[int(len(w1)/2):]
if len(e) != 0:
e11 = e1[:int(len(w1)/2)]
e12 = e1[int(len(w1)/2):]
for i in range(int(rej*len(f11))):
index1.append(np.argmin(f11))
f11[index1[i]] = 1000000000000
for i in range(int(rej*len(f12))):
index2.append(np.argmin(f12))
f12[index2[i]] = 1000000000000
w2 = np.concatenate([np.delete(w11, index1), np.delete(w12, index2)])
f2 = np.concatenate([np.delete(f11, index1), np.delete(f12, index2)])
if len(e) != 0:
e2 = np.concatenate([np.delete(e11, index1), np.delete(e12, index2)])
e22 = np.square(e2)
if len(e2[e22 == 0]) == 0:
weight = 1 / np.square(e2)
if np.any(np.isinf(weight)):
weight = np.ones_like(w2) / np.std(f2)
else:
weight = np.ones_like(w2) / np.std(f2)
else:
weight = np.ones_like(w2) / np.std(f2)
med_weig = np.median(weight)
for ind in range(len(weight)):
if weight[ind] > 2.5 * med_weig:
weight[ind] = 0
coef = np.polynomial.legendre.legfit(w2, f2, l_order, w=weight)
fit = np.polynomial.legendre.legval(w3, coef)
sdev = np.std(np.subtract(f3, fit))
while True:
up = 3.0 * sdev
down = -1.5 * sdev
dev = np.subtract(f3, fit)
f4 = np.delete(f3,
np.argwhere([devs > up or devs < down for devs in dev]))
w4 = np.delete(w3,
np.argwhere([devs > up or devs < down for devs in dev]))
if iterations > 100:
print("Warning: Reached 100 iterations on the continuum fit.")
break
if np.array_equal(f3, f4):
break
if len(e) == 0:
weight = np.ones_like(w4) / sdev
else:
e4 = np.delete(
e3, np.argwhere([devs > up or devs < down for devs in dev]))
weight = 1 / np.square(e4)
if np.any(np.isinf(weight)):
weight = np.ones_like(w4) / sdev
if len(w4) == 0:
print('Error: Fit could not stabilise!')
return [-1], [-1]
coef = np.polynomial.legendre.legfit(w4, f4, l_order, w=weight)
fit = np.polynomial.legendre.legval(w4, coef)
sdev = np.std(np.subtract(f4, fit))
f3 = f4
w3 = w4
if len(e) != 0:
e3 = e4
iterations = iterations + 1
fit = np.polynomial.legendre.legval(res_w, coef)
fit_e_sdev = sdev / (fit * sd_correct)
# plt.plot(w, f)
# plt.plot(res_w, fit)
# plt.plot(w2, f2)
# plt.show()
# plt.clf()
return fit, fit_e_sdev
def splice_spectra(wav, flux):
"""
Cuts a given `array` up into `n_parts` equal parts, while making sure that
each part contains half of the previous and half of the next part (unless
it is the first or last part).
Parameters
----------
wav : array_like
The array containing the cut wavelength arrays
flux : array_like
The array containing the cut flux arrays (normalized).
Returns
-------
array : np_array
The array containing the complete spectrum.
"""
# find the index where each array overlaps with a new one
splind = []
w = []
f = []
for i in range(len(wav)-1):
splitpoint = wav[i+1][0]
splind = np.concatenate((splind,
np.argwhere(abs(wav[i] - splitpoint)
< 0.001)[0]))
splitpoint = wav[-2][-1]
splind = np.concatenate((splind, np.argwhere(abs(wav[-1] - splitpoint)
< 0.001)[0] + 1))
splind = splind.astype(int)
# Get the first part of the spectrum (no overlap)
for i in range(len(wav)+1):
if i == 0:
f = np.concatenate((f, np.split(flux[i], [0, splind[i]])[1]))
w = np.concatenate((w, | np.split(wav[i], [0, splind[i]]) | numpy.split |
import numpy as np
import os
import re
import requests
import sys
import time
from netCDF4 import Dataset
import pandas as pd
from bs4 import BeautifulSoup
from tqdm import tqdm
# setup constants used to access the data from the different M2M interfaces
BASE_URL = 'https://ooinet.oceanobservatories.org/api/m2m/' # base M2M URL
SENSOR_URL = '12576/sensor/inv/' # Sensor Information
# setup access credentials
AUTH = ['OOIAPI-853A3LA6QI3L62', '<KEY>']
def M2M_Call(uframe_dataset_name, start_date, end_date):
options = '?beginDT=' + start_date + '&endDT=' + end_date + '&format=application/netcdf'
r = requests.get(BASE_URL + SENSOR_URL + uframe_dataset_name + options, auth=(AUTH[0], AUTH[1]))
if r.status_code == requests.codes.ok:
data = r.json()
else:
return None
# wait until the request is completed
print('Waiting for OOINet to process and prepare data request, this may take up to 20 minutes')
url = [url for url in data['allURLs'] if re.match(r'.*async_results.*', url)][0]
check_complete = url + '/status.txt'
with tqdm(total=400, desc='Waiting') as bar:
for i in range(400):
r = requests.get(check_complete)
bar.update(1)
if r.status_code == requests.codes.ok:
bar.n = 400
bar.last_print_n = 400
bar.refresh()
print('\nrequest completed in %f minutes.' % elapsed)
break
else:
time.sleep(3)
elapsed = (i * 3) / 60
return data
def M2M_Files(data, tag=''):
"""
Use a regex tag combined with the results of the M2M data request to collect the data from the THREDDS catalog.
Collected data is gathered into an xarray dataset for further processing.
:param data: JSON object returned from M2M data request with details on where the data is to be found for download
:param tag: regex tag to use in discriminating the data files, so we only collect the correct ones
:return: the collected data as an xarray dataset
"""
# Create a list of the files from the request above using a simple regex as a tag to discriminate the files
url = [url for url in data['allURLs'] if re.match(r'.*thredds.*', url)][0]
files = list_files(url, tag)
return files
def list_files(url, tag=''):
"""
Function to create a list of the NetCDF data files in the THREDDS catalog created by a request to the M2M system.
:param url: URL to user's THREDDS catalog specific to a data request
:param tag: regex pattern used to distinguish files of interest
:return: list of files in the catalog with the URL path set relative to the catalog
"""
page = requests.get(url).text
soup = BeautifulSoup(page, 'html.parser')
pattern = re.compile(tag)
return [node.get('href') for node in soup.find_all('a', text=pattern)]
def M2M_Data(nclist,variables):
thredds = 'https://opendap.oceanobservatories.org/thredds/dodsC/ooi/'
#nclist is going to contain more than one url eventually
for jj in range(len(nclist)):
url=nclist[jj]
url=url[25:]
dap_url = thredds + url + '#fillmismatch'
openFile = Dataset(dap_url,'r')
for ii in range(len(variables)):
dum = openFile.variables[variables[ii].name]
variables[ii].data = np.append(variables[ii].data, dum[:].data)
tmp = variables[0].data/60/60/24
time_converted = pd.to_datetime(tmp, unit='D', origin=pd.Timestamp('1900-01-01'))
return variables, time_converted
class var(object):
def __init__(self):
"""A Class that generically holds data with a variable name
and the units as attributes"""
self.name = ''
self.data = np.array([])
self.units = ''
def __repr__(self):
return_str = "name: " + self.name + '\n'
return_str += "units: " + self.units + '\n'
return_str += "data: size: " + str(self.data.shape)
return return_str
class structtype(object):
def __init__(self):
""" A class that imitates a Matlab structure type
"""
self._data = []
def __getitem__(self, index):
"""implement index behavior in the struct"""
if index == len(self._data):
self._data.append(var())
return self._data[index]
def __len__(self):
return len(self._data)
def M2M_URLs(platform_name,node,instrument_class,method):
var_list = structtype()
#MOPAK
if platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/SBS01/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#METBK
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
#FLORT
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/06-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/06-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/04-FLORTK000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
#FDCHP
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/08-FDCHPA000/telemetered/fdchp_a_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#DOSTA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/02-DOFSTK000/telemetered/dofst_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
#ADCP
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
#ZPLSC
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#WAVSS
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
#VELPT
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
#PCO2W
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
#PHSEN
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
#SPKIR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
#PRESF
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/02-PRESFA000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/02-PRESFA000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/02-PRESFB000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/02-PRESFC000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
#CTDBP
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/06-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/06-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/03-CTDBPE000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#VEL3D
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
#VEL3DK
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/01-VEL3DK000/telemetered/vel3d_k_wfp_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/03-CTDPFK000/telemetered/ctdpf_ckl_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#PCO2A
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
#PARAD
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/05-PARADK000/telemetered/parad_k__stc_imodem_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
#OPTAA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/01-OPTAAC000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#NUTNR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
##
#MOPAK
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/SBD17/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSPM/SBS01/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#METBK
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
#FLORT
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/06-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/SBD17/06-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
#FDCHP
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/08-FDCHPA000/recovered_host/fdchp_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#DOSTA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
#ADCP
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
#WAVSS
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
#VELPT
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
#uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
#PCO2W
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
#PHSEN
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
#SPKIR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
#PRESF
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/02-PRESFA000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/02-PRESFA000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/02-PRESFB000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/02-PRESFC000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
#CTDBP
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/06-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/SBD17/06-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/03-CTDBPE000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#VEL3D
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
#PCO2A
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
#OPTAA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/01-OPTAAC000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#NUTNR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD37/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/SBD17/06-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD37/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/SBD17/06-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD37/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD37/03-CTDBPE000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/03-CTDPFK000/recovered_wfp/ctdpf_ckl_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/SBD17/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/01-VEL3DK000/recovered_wfp/vel3d_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/02-PRESFA000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/02-PRESFA000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/02-PRESFB000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/02-PRESFC000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/05-PARADK000/recovered_wfp/parad_k__stc_imodem_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/SBD12/08-FDCHPA000/recovered_inst/fdchp_a_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/SBD17/06-FLORTD000/recovered_inst/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/SBD17/06-FLORTD000/recovered_inst/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/04-FLORTK000/recovered_wfp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/02-DOFSTK000/recovered_wfp/dofst_k_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/recovered_inst/adcpt_m_instrument_log9_recovered'
var_list[0].name = 'time'
var_list[1].name = 'significant_wave_height'
var_list[2].name = 'peak_wave_period'
var_list[3].name = 'peak_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'seconds'
var_list[3].units = 'degrees'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/recovered_inst/adcpt_m_instrument_log9_recovered'
var_list[0].name = 'time'
var_list[1].name = 'significant_wave_height'
var_list[2].name = 'peak_wave_period'
var_list[3].name = 'peak_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'seconds'
var_list[3].units = 'degrees'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'CTD' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/06-CTDBPN106/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_no_seawater_pressure'
var_list[5].name = 'ctdbp_no_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'CTD' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/06-CTDBPO108/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_no_seawater_pressure'
var_list[5].name = 'ctdbp_no_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'DOSTA' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/06-CTDBPN106/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'DOSTA' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/06-CTDBPO108/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'PHSEN' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/10-PHSEND103/streamed/phsen_data_record'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'PHSEN' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/10-PHSEND107/streamed/phsen_data_record'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'PCO2W' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/09-PCO2WB103/streamed/pco2w_b_sami_data_record'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'PCO2W' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/09-PCO2WB104/streamed/pco2w_b_sami_data_record'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'ADCP' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/05-ADCPTB104/streamed/adcp_velocity_beam'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'ADCP' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/05-ADCPSI103/streamed/adcp_velocity_beam'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'VEL3D' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/07-VEL3DC108/streamed/vel3d_cd_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'VEL3D' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/07-VEL3DC107/streamed/vel3d_cd_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'OPTAA' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/08-OPTAAD106/streamed/optaa_sample'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'OPTAA' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/08-OPTAAC104/streamed/optaa_sample'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#CSPP Data below
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/08-FLORTJ000/telemetered/flort_dj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/08-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/08-FLORTJ000/telemetered/flort_dj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/08-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/02-DOSTAJ000/telemetered/dosta_abcdjm_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/02-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/02-DOSTAJ000/telemetered/dosta_abcdjm_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/02-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/09-CTDPFJ000/telemetered/ctdpf_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/09-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/09-CTDPFJ000/telemetered/ctdpf_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/09-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/10-PARADJ000/telemetered/parad_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/10-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/10-PARADJ000/telemetered/parad_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/10-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/06-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'salinity_corrected_nitrate'
var_list[2].name = 'nitrate_concentration'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/06-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'salinity_corrected_nitrate'
var_list[2].name = 'nitrate_concentration'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/07-SPKIRJ000/telemetered/spkir_abj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/07-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/07-SPKIRJ000/telemetered/spkir_abj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/07-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/05-VELPTJ000/telemetered/velpt_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/05-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/05-VELPTJ000/telemetered/velpt_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/05-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'OPTAA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'OPTAA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/07-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/07-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/01-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/01-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/08-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/08-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/09-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/09-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/05-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'salinity_corrected_nitrate'
var_list[2].name = 'nitrate_concentration'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/05-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'salinity_corrected_nitrate'
var_list[2].name = 'nitrate_concentration'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/06-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/06-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/02-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/02-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = | np.array([]) | numpy.array |
import gym
import numpy as np
from scipy.linalg import circulant
from gym.spaces import Tuple, Box, Dict
from copy import deepcopy
class SplitMultiAgentActions(gym.ActionWrapper):
'''
Splits mujoco generated actions into a dict of tuple actions.
'''
def __init__(self, env):
super().__init__(env)
self.n_agents = self.metadata['n_actors']
lows = np.split(self.action_space.low, self.n_agents)
highs = np.split(self.action_space.high, self.n_agents)
self.action_space = Dict({
'action_movement': Tuple([Box(low=low, high=high, dtype=self.action_space.dtype)
for low, high in zip(lows, highs)])
})
def action(self, action):
return action['action_movement'].flatten()
class JoinMultiAgentActions(gym.ActionWrapper):
def __init__(self, env):
super().__init__(env)
self.n_agents = self.metadata['n_actors']
low = | np.concatenate([space.low for space in self.action_space.spaces]) | numpy.concatenate |
##############################################################################
#
# Copyright (c) 2003-2018 by The University of Queensland
# http://www.uq.edu.au
#
# Primary Business: Queensland, Australia
# Licensed under the Apache License, version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
#
# Development until 2012 by Earth Systems Science Computational Center (ESSCC)
# Development 2012-2013 by School of Earth Sciences
# Development from 2014 by Centre for Geoscience Computing (GeoComp)
#
##############################################################################
from __future__ import print_function, division
__copyright__="""Copyright (c) 2003-2018 by The University of Queensland
http://www.uq.edu.au
Primary Business: Queensland, Australia"""
__license__="""Licensed under the Apache License, version 2.0
http://www.apache.org/licenses/LICENSE-2.0"""
__url__="https://launchpad.net/escript-finley"
"""
Test suite for the util.py module.
The tests must be linked with a function space class object in the setUp method:
to run the use:
from esys.bruce import Brick
class Test_utilOnBruce(Test_util_no_tagged_data):
def setUp(self):
self.domain = Brick(10,10,13)
self.functionspace = ContinuousFunction(self.domain)
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(Test_utilOnBruce))
unittest.TextTestRunner(verbosity=2).run(suite)
This test assumes that samples with x_0 coordinate 0 are tagged with 1 and all samples tagged with 1 have x_0
coordinate 0.
:note: at this stage this test will not pass as it tests for functionlity that has not been implemented yet. It also
does not test the full functionalitu of util.py yet.
:var __author__: name of author
:var __copyright__: copyrights
:var __license__: licence agreement
:var __url__: url entry point on documentation
:var __version__: version
:var __date__: date of the version
"""
__author__="<NAME>, <EMAIL>"
import esys.escriptcore.utestselect as unittest
import numpy
from esys.escript import *
from test_util_base import Test_util_base, Test_util_values
from test_util_reduction_new import Test_util_reduction_new
from test_util_unary_new import Test_util_unary_new
from test_util_binary_new import Test_util_binary_new
from test_util_binary_leftover import Test_util_binary_leftover
## these aspects are test in the _new tests
#from test_util_overloaded_binary_no_tagged_data import Test_util_overloaded_binary_no_tagged_data
#from test_util_overloaded_binary_with_tagged_data import Test_util_overloaded_binary_with_tagged_data
#from test_util_unary_no_tagged_data import Test_util_unary_no_tagged_data
#from test_util_unary_with_tagged_data import Test_util_unary_with_tagged_data
#from test_util_binary_no_tagged_data import Test_util_binary_no_tagged_data
#from test_util_binary_with_tagged_data import Test_util_binary_with_tagged_data
from test_util_spatial_functions1 import Test_Util_SpatialFunctions_noGradOnBoundary_noContact
from test_util_spatial_functions2 import Test_Util_SpatialFunctions_noGradOnBoundary
from test_util_spatial_functions3 import Test_Util_SpatialFunctions
from test_util_slicing_no_tagged_data import Test_util_slicing_no_tagged_data
from test_util_slicing_with_tagged_data import Test_util_slicing_with_tagged_data
class Test_util_reduction(Test_util_reduction_new):
""" test for reduction operation Lsup,sup,inf for all data types"""
pass
class Test_util_unary(Test_util_unary_new):
""" all unary tests """
pass
class Test_util_binary(Test_util_binary_new, Test_util_binary_leftover):
"""
test for all binary operation
"""
pass
## Testing of these ops is now in Test_util_binary
#class Test_util_overloaded_binary(Test_util_overloaded_binary_no_tagged_data,Test_util_overloaded_binary_with_tagged_data):
#"""test for all overloaded operation"""
#pass
class Test_util(Test_util_unary_new,Test_util_reduction_new, Test_util_binary):
"""all tests"""
pass
class Test_util_overloaded_binary_still_failing(Test_util_base):
"""
these overloaded operations still fail!
- wrong return value of Data binaries (Mantis 0000054)
"""
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_constData_rank0_Symbol_rank1(self):
arg0=Data(-4.93686078973,self.functionspace)
arg1=Symbol(shape=(2,))
res=arg0+arg1
s1=numpy.array([0.51662736235119944, 2.8171396846123073])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([-4.4202334273802917, -2.1197211051191838]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_constData_rank0_Symbol_rank2(self):
arg0=Data(-2.22764991169,self.functionspace)
arg1=Symbol(shape=(4, 5))
res=arg0+arg1
s1=numpy.array([[2.0746979587719538, 0.99992890307042437, -2.3128078094931848, -4.0103712739722654,
4.8853529531011013],
[0.09856857946648212, 0.73520899085847624, -3.6585265509750844, 3.0095320582437939, 3.4125902906059444],
[1.4894150898632059,
-1.4124339049368793, 1.5397397961722188, 4.8841402613336111, 1.1241155288598881], [2.8283598865494408,
1.5980765295723476,
-1.0022373011497274, -2.0622178471715067, 4.9699555072046042]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[-0.15295195292152819, -1.2277210086230577, -4.5404577211866668, -6.2380211856657475,
2.6577030414076193],
[-2.1290813322269999, -1.4924409208350058, -5.8861764626685664, 0.78188214655031185, 1.1849403789124624],
[-0.73823482183027611,
-3.6400838166303613, -0.68791011552126324, 2.6564903496401291, -1.103534382833594], [0.60070997485595878,
-0.62957338212113445,
-3.2298872128432095, -4.2898677588649887, 2.7423055955111222]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_constData_rank0_Symbol_rank3(self):
arg0=Data(-4.67318656609,self.functionspace)
arg1=Symbol(shape=(6, 2, 2))
res=arg0+arg1
s1=numpy.array([[[3.9409337165894076, 1.6101568824796857], [1.2441782896909706, 1.2872758759353298]],
[[4.022494973005406,
-2.758155583474049], [1.8311643900357311, 4.0940647266277157]], [[2.5378127449303243, 0.063283784588161751],
[4.5495644157820809,
2.8673770080506742]], [[-0.93484143473477577, 4.914438575705228], [-1.951066895455166, -1.2021165219313259]],
[[-0.4220608661301819, -4.9682501775464418], [0.98338081352961559, 3.4054674805751066]], [[3.9967556325744127,
-4.7659141789100659],
[0.34265275409881024, -0.25226631819007572]]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[-0.73225284950136693, -3.0630296836110888], [-3.429008276399804, -3.3859106901554448]],
[[-0.6506915930853685, -7.4313421495648235], [-2.8420221760550435, -0.57912183946305884]],
[[-2.1353738211604503,
-4.6099027815026128], [-0.12362215030869361, -1.8058095580401003]], [[-5.6080280008255503,
0.24125200961445348],
[-6.6242534615459405, -5.8753030880221004]], [[-5.0952474322209564, -9.6414367436372164],
[-3.6898057525611589,
-1.2677190855156679]], [[-0.67643093351636185, -9.4391007450008395], [-4.3305338119919643,
-4.9254528842808503]]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_constData_rank0_Symbol_rank4(self):
arg0=Data(4.16645075056,self.functionspace)
arg1=Symbol(shape=(3, 2, 3, 4))
res=arg0+arg1
s1=numpy.array([[[[1.5917180025121436, -0.50082927718401749, 0.71261274386013618, 2.4216324938382936],
[2.5988764746053095,
0.15985324844397741, -2.1952754277135025, -2.1102730593254035], [4.7816092243808672, -3.1240954141765496,
4.0831220997721331, 2.4301203557965216]], [[3.4691826046114969, -2.4961081730013177, -4.9623977358253111,
2.2652744558918698],
[0.41830032681767193, -3.2186897293959649, -4.1590967541108324, -1.7789994379155196], [-0.17901184206486764,
-0.85223673399918809, 1.2515459884606104, -4.530305999148645]]], [[[-4.9028671865135838, 3.9106181278983012,
0.69716765577825246, 4.8537569187159395], [-2.8912890367657318, -4.8177854256421764, -4.3303142092509415,
-0.99481907472179198], [-1.2640734452454305, 4.8028129765204639, -2.5491771511234962, 3.2550469051981921]],
[[2.0572417475748761, 3.7392706991121187, 4.5778678295843704, 3.6658188498258486], [-2.7069743698567206,
-2.684769111460461, -3.0941141983763156, -2.1180719361316589], [-1.4744678905986119, 1.926687036555828,
2.2206999030392947, 0.72956973127168734]]], [[[-2.8290294475300151, -3.1467788245496631, 3.6471044178360348,
3.5237454065241209], [-1.6165850845596652, 1.2437746199742081, -2.8022357261752004, -1.9652183524467781],
[-2.3842126490032092, 3.7068998814751613, -1.389546865398994, -1.7153758702474589]], [[-1.0746517242894815,
-4.3575382718398723, 0.93160793707280121, 1.4002531109392731], [-1.5745690740270168, -3.4394046042905124,
4.2641517580348793, -1.7620679696550843], [-4.2559205627171135, 2.1912319337278863, 1.1987265764805723,
-3.2957352772592809]]]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[[5.7581687530761378, 3.6656214733799768, 4.8790634944241305, 6.5880832444022879],
[6.7653272251693037, 4.3263039990079717, 1.9711753228504918, 2.0561776912385907], [8.9480599749448615,
1.0423553363874447, 8.2495728503361274, 6.5965711063605159]], [[7.6356333551754911, 1.6703425775626766,
-0.7959469852613168, 6.4317252064558641], [4.5847510773816662, 0.94776102116802941, 0.0073539964531619262,
2.3874513126484747], [3.9874389084991266, 3.3142140165648062, 5.4179967390246047, -0.36385524858465068]]],
[[[-0.7364164359495895, 8.0770688784622955, 4.8636184063422467, 9.0202076692799338], [1.2751617137982625,
-0.6513346750781821, -0.16386345868694718, 3.1716316758422023], [2.9023773053185637, 8.9692637270844582,
1.6172735994404981, 7.4214976557621863]], [[6.2236924981388704, 7.905721449676113, 8.7443185801483647,
7.8322696003898429], [1.4594763807072737, 1.4816816391035332, 1.0723365521876786, 2.0483788144323354],
[2.6919828599653823, 6.0931377871198222, 6.3871506536032889, 4.8960204818356816]]], [[[1.3374213030339792,
1.0196719260143312, 7.8135551684000291, 7.6901961570881152], [2.5498656660043291, 5.4102253705382024,
1.3642150243887938, 2.2012323981172162], [1.7822381015607851, 7.8733506320391555, 2.7769038851650003,
2.4510748803165354]], [[3.0917990262745128, -0.19108752127587803, 5.0980586876367955, 5.5667038615032673],
[2.5918816765369774, 0.72704614627348185, 8.4306025085988736, 2.40438278090891], [-0.089469812153119221,
6.3576826842918805, 5.3651773270445666, 0.87071547330471333]]]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_constData_rank1_Symbol_rank0(self):
arg0=Data(numpy.array([3.8454947431609945, 3.4801848055393254]),self.functionspace)
arg1=Symbol(shape=())
res=arg0+arg1
s1=numpy.array(0.181985677208)
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([4.0274804203691783, 3.6621704827475092]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_constData_rank1_Symbol_rank1(self):
arg0=Data(numpy.array([2.6719646801005306, 4.0262173014652003]),self.functionspace)
arg1=Symbol(shape=(2,))
res=arg0+arg1
s1=numpy.array([3.7355891147806837, -3.0309968912239551])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([6.4075537948812142, 0.99522041024124519]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_constData_rank2_Symbol_rank0(self):
arg0=Data(numpy.array([[2.209887477038702, 2.087043312051243, 3.7254247294014622,
-3.7510652436671732, 0.70343608099575317], [4.1654611738215745, 1.5418518980850271,
2.7730022594684423, 3.386030420596251, 1.2758288509710365], [2.2174938185138764,
-1.244837837360393, 2.2331288285078887, -1.1442348969501834, 1.9394801392868004],
[0.68612447219195705, 0.7127527031233436, -3.6346644102130776, 2.0671128943191714,
3.7445028703597156]]),self.functionspace)
arg1=Symbol(shape=())
res=arg0+arg1
s1=numpy.array(4.82316401579)
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[7.0330514928326018, 6.9102073278451428, 8.5485887451953619,
1.0720987721267266, 5.5266000967896529], [8.9886251896154743, 6.3650159138789268,
7.596166275262342, 8.2091944363901508, 6.0989928667649362], [7.0406578343077761,
3.5783261784335068, 7.0562928443017885, 3.6789291188437163, 6.7626441550807002],
[5.5092884879858568, 5.5359167189172434, 1.1884996055808221, 6.8902769101130712,
8.5676668861536154]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_constData_rank2_Symbol_rank2(self):
arg0=Data(numpy.array([[-3.62961836797558, 4.0323249470469893, -2.4833229912823516,
-0.0081902035785272886, -0.26448613257378906], [2.0867535529248489, 0.049446344294963751,
4.4906317789174501, 2.6121865600043499, 1.3687146632565392], [4.2509170325103511,
2.9845191554148567, -0.9329820582137387, -0.58236994049271118, -3.4448732067194388],
[-2.3231599587033402, 1.6550934434842866, -4.5990521452319584, -2.1470268566500152,
-3.9698084155531008]]),self.functionspace)
arg1=Symbol(shape=(4, 5))
res=arg0+arg1
s1=numpy.array([[3.3234017918244003, 3.3386199217996175, -2.5928786077225316,
-4.1429140632213803, 0.42204291369978719], [3.4123580113357495, -3.9076190537235664,
1.8779298531672159, 0.98377543853039562, -4.9365820051249267], [4.5252395032935961,
-4.8193051910732096, 1.060979071451845, -3.2927325266544871, -3.3828356655691971],
[-4.6411804903406182, -0.42921544747540707, -2.4541073523344323, -0.70845691989162329,
-1.2357505826155588]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[-0.3062165761511797, 7.3709448688466068, -5.0762015990048832,
-4.1511042667999076, 0.15755678112599814], [5.4991115642605983, -3.8581727094286027,
6.3685616320846661, 3.5959619985347455, -3.5678673418683875], [8.7761565358039473,
-1.834786035658353, 0.12799701323810631, -3.8751024671471983, -6.8277088722886354],
[-6.9643404490439584, 1.2258779960088795, -7.0531594975663907, -2.8554837765416385,
-5.2055589981686596]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_constData_rank3_Symbol_rank0(self):
arg0=Data(numpy.array([[[-2.0819775543023136, 4.4438294149957258], [1.203494127071604,
1.3934659764012478]], [[-1.7207192546012995, 1.128687542370864], [1.013953229943537,
2.0535582502969056]], [[-1.8482126685735398, 0.64499519705235819],
[-4.1200947648310313, 3.8041018736261574]], [[-0.12876390427677542, -0.26859118353213773],
[-2.8945993824974847, -3.3476923883525944]], [[3.1332107854705562, -4.6334666373330595],
[3.0499420638074994, -2.7959034777693104]], [[4.726734207260332, -1.3724501610660034],
[3.3499737674080023, -2.515294322458935]]]),self.functionspace)
arg1=Symbol(shape=())
res=arg0+arg1
s1=numpy.array(0.860178486532)
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[-1.2217990677700952, 5.3040079015279442], [2.0636726136038224,
2.2536444629334662]], [[-0.86054076806908109, 1.9888660289030824], [1.8741317164757554,
2.913736736829124]], [[-0.98803418204132143, 1.5051736835845766], [-3.2599162782988129,
4.6642803601583758]], [[0.73141458225544298, 0.59158730300008067], [-2.0344208959652663,
-2.487513901820376]], [[3.9933892720027746, -3.7732881508008411], [3.9101205503397178,
-1.935724991237092]], [[5.5869126937925504, -0.51227167453378497], [4.2101522539402207,
-1.6551158359267166]]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_constData_rank3_Symbol_rank3(self):
arg0=Data(numpy.array([[[-1.849788129717993, 0.64693319038907493], [3.0379670344950327,
0.80277076526299229]], [[2.4995340022105639, -4.3955703049125949], [0.58331276679079203,
0.044119077451267863]], [[2.2979922792046947, 1.6054844683234073], [0.50524258350986084,
-3.5539312710422779]], [[-1.1980433912188793, -2.6450000406046001], [-2.4128326188310121,
0.80678465051263526]], [[-2.9963692865064209, -1.0152803020104519], [-0.21931259441936035,
-1.153119362615751]], [[-4.2927186206837717, 0.4561872009236847], [3.0860876046130041,
-0.78568544768378068]]]),self.functionspace)
arg1=Symbol(shape=(6, 2, 2))
res=arg0+arg1
s1=numpy.array([[[-3.4985389035935222, 1.8888458641158987], [-4.2891085749380489,
2.8296217607019845]], [[-0.8200921678141917, 4.4359194831012676],
[-4.6185751325042244, 0.16520675598470014]], [[-2.801157092531934, 3.6231020804204928],
[1.5439760747845899, 2.0378140868272894]], [[0.99864930993784018, 3.369884315459073],
[4.399815205976239, -4.9546136700941936]], [[1.6240932313892289, -3.4517363344048615],
[2.8668483027947236, 1.1624090061600336]], [[2.6364367974081624, 2.628371373764919],
[-2.5877409052653833, -1.29236451403668]]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[-5.3483270333115147, 2.5357790545049737], [-1.2511415404430162,
3.6323925259649767]], [[1.6794418343963722, 0.040349178188672674],
[-4.0352623657134323, 0.209325833435968]], [[-0.50316481332723928, 5.2285865487439001],
[2.0492186582944507, -1.5161171842149885]], [[-0.19939408128103908, 0.72488427485447282],
[1.9869825871452269, -4.1478290195815584]], [[-1.372276055117192, -4.4670166364153134],
[2.6475357083753632, 0.0092896435442826331]], [[-1.6562818232756094,
3.0845585746886037], [0.49834669934762088, -2.0780499617204606]]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_constData_rank4_Symbol_rank0(self):
arg0=Data(numpy.array([[[[-0.026017904532606551, -0.80192450547405958,
0.93785799257835656, -4.4900007911078319], [-1.8444162073720949,
1.2059856695600812, 1.8326324480310756, 3.3745782356451564],
[3.0929324433706693, -0.94197156488767142, -2.3469684397851207,
-4.8976052662192613]], [[1.2658444546015346, 3.0389250549456399,
-2.567254770133963, 3.7513728753285314], [-0.10225306211433605,
-0.34121316520335299, -2.8745573331597321, -0.73976781968982142],
[4.6114590072566681, 3.5325642767850063, 2.1587079910040661,
3.8644723652636905]]], [[[-2.5953113243103623, 0.6437882672443429,
4.5677362343759853, 3.4108524985046262], [2.9904338528780352,
0.73113299006492127, 2.4253724263400445, 3.8646536702562031],
[-1.2545053686514152, -4.2675706218911706, -3.6576679389702105,
-0.29502287354943402]], [[0.9550527228483654, 2.9537233833481267,
-2.6904009310953283, 1.5998857010519698], [-3.7171702199982004,
-1.1578306702024044, 1.764070139728485, -1.1506068782808967],
[1.5727320181060982, 0.18468074769418674, 3.3262967055395372,
-1.2208265816075849]]], [[[-0.25003967903418278, -2.603663543909648,
4.6824047463125531, 1.0968919539473987], [1.3471700099604398,
-3.8321880437450218, -4.2809409903460676, 1.2933005361204906],
[-2.857251250328674, 3.6768205829450178, -2.7999953058490643,
2.1117422072666692]], [[-2.1994223710236427, 3.7669030216280923,
-3.5232105054852991, -3.7071480752824462], [-0.35952695279389246,
2.5451704526750873, -4.2842310996736144, -1.3813503044378783],
[-2.5647173415905145, 4.7437501634141572, -4.2234318870342245,
2.1862042652792866]]]]),self.functionspace)
arg1=Symbol(shape=())
res=arg0+arg1
s1=numpy.array(0.33323555487)
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[[0.30721765033724147, -0.46868895060421156,
1.2710935474482046, -4.1567652362379839], [-1.5111806525022469,
1.5392212244299293, 2.1658680029009236, 3.7078137905150044],
[3.4261679982405173, -0.6087360100178234, -2.0137328849152727,
-4.5643697113494133]], [[1.5990800094713826, 3.3721606098154879,
-2.234019215264115, 4.0846084301983794], [0.23098249275551197,
-0.0079776103335049697, -2.541321778289884, -0.4065322648199734],
[4.9446945621265161, 3.8657998316548543, 2.4919435458739141,
4.1977079201335386]]], [[[-2.2620757694405143, 0.97702382211419092,
4.9009717892458333, 3.7440880533744743], [3.3236694077478832,
1.0643685449347693, 2.7586079812098925, 4.1978892251260511],
[-0.92126981378156714, -3.9343350670213226, -3.3244323841003625,
0.038212681320413999]], [[1.2882882777182134, 3.2869589382179747,
-2.3571653762254803, 1.9331212559218178], [-3.3839346651283524,
-0.82459511533255636, 2.097305694598333, -0.81737132341104868],
[1.9059675729759462, 0.51791630256403476, 3.6595322604093852,
-0.88759102673773693]]], [[[0.083195875835665234, -2.2704279890398,
5.0156403011824011, 1.4301275088172467], [1.6804055648302878,
-3.4989524888751737, -3.9477054354762195, 1.6265360909903386],
[-2.524015695458826, 4.0100561378148658, -2.4667597509792163,
2.4449777621365172]], [[-1.8661868161537947, 4.1001385764979403,
-3.1899749506154511, -3.3739125204125981], [-0.026291397924044446,
2.8784060075449354, -3.9509955448037664, -1.0481147495680303],
[-2.2314817867206664, 5.0769857182840052, -3.8901963321643764,
2.5194398201491346]]]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_constData_rank4_Symbol_rank4(self):
arg0=Data(numpy.array([[[[1.6204760394819004, -0.95393695229398112, -1.221681223499369, 2.6618713903411937],
[-1.5387523541807724, 4.6220978399651482, -2.1795716817360713, -3.776821154104939], [1.4330066566763016,
3.7880327985429378, -0.65902727001966976, -4.29506128665055]], [[-4.0199255222547103, -3.644811287300751,
3.6508998060332054, -3.569704984460552], [-3.8429890733645489, -2.9119635791576437, 2.3183698092323652,
1.3643661323778851], [2.9328022056563725, -0.080129403375118535, 0.15566128013433289, 2.344258136058456]]],
[[[3.03272210358924, 2.8841814084596393, -4.059068204445289, -0.091640986980607408], [-4.2591024547151859,
-0.36305436045316863, 0.19284537915686428, 4.5041324479849649], [1.2988816365062537, -1.6778808169453416,
-3.5496975707176146, 4.314356820196215]], [[-1.4533462849506518, -1.003910808707118, 3.8948057966291092,
1.266066103629278], [-4.4119138102620346, -2.1246183047037603, -2.4610566322999161, -3.5862383252945271],
[2.9290698526446066, -0.26093763373887136, 0.87809331627623344, -0.47993365832407076]]], [[[2.1717793325666745,
0.83592896851733212, -2.2538107669063279, 1.6303402530881517], [-0.53207705017646578, -4.5214994998308979,
-3.6999121226789988, 3.5355643886671686], [3.3936340080223193, -2.1140030580705247, 1.821327452830638,
-1.6123768640462668]], [[2.3105165926895497, -3.0414367260786292, -1.5788704194425076, 1.0377969965556915],
[1.3575822980511116, 4.3465002873169833, 0.55678010189701688, 4.99079375906609], [4.2819911907361128,
4.9615031124625322, 2.7964852390480104, 0.029646894001982282]]]]),self.functionspace)
arg1=Symbol(shape=(3, 2, 3, 4))
res=arg0+arg1
s1=numpy.array([[[[3.779495003239937, -4.7840877608643506, 2.651273004571375, -2.179381582597685],
[-0.27370078331190673, -3.6151069379138887, -0.6880481455894909, 4.4373993248198644], [-1.6276288613086387,
-1.6376839670015721, -3.1138607609774835, -2.7809800576738719]], [[0.85446276622548556, -4.3676040003341114,
-4.0083595770538496, -3.915065868011578], [1.6989039436984452, 3.5347026474299419, -1.8748410832866327,
-4.6526613314583045], [1.9480513434936046, 4.7386182205273322, -1.2001630607496541, 1.8094726084650006]]],
[[[4.9996435011863589, 0.60285036470010045, 1.457536438507919, 2.7443970579013879], [4.131864622110669,
0.20996245110639133, 3.3652305004680549, 3.1437873739212119], [-3.0818670302029405, -2.461603163946088,
-0.56609916674720218, -4.1186964404844861]], [[-2.7183232427482262, -2.1509712746053999, -2.281087666097271,
-2.4094567126275344], [-3.4723848022755091, -1.563218902128277, -4.7598832341275878, 1.8751725484288029],
[-4.0474621098792882, 0.59894943914858167, 1.0736279895120182, 4.5015525072725033]]], [[[-3.0082200796749703,
0.23283074563588535, 2.5230303985659734, 4.8262414779000231], [3.3772486493634837, 1.8234317033464915,
-1.7905158376185746, -2.9990918311449244], [-3.6765085717620041, 2.0057610304617572, -2.1487273241068525,
-4.1965541804451352]], [[0.26210933249566715, -2.9167787158271663, -0.89589477578380539, -0.41427249402553912],
[-3.1708181836677332, 4.3890602408555726, -1.1754542095914857, 4.8422639037274919], [-3.0044937138520034,
-4.1626528668210083, 0.20385989364778467, -0.016309737359709864]]]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[[5.3999710427218375, -5.7380247131583317, 1.429591781072006, 0.48248980774350869],
[-1.8124531374926791, 1.0069909020512595, -2.8676198273255622, 0.66057817071492542], [-0.19462220463233715,
2.1503488315413657, -3.7728880309971533, -7.0760413443244214]], [[-3.1654627560292248, -8.0124152876348624,
-0.35745977102064419, -7.4847708524721295], [-2.1440851296661037, 0.62273906827229819, 0.44352872594573256,
-3.2882951990804195], [4.8808535491499772, 4.6584888171522136, -1.0445017806153212, 4.1537307445234566]]],
[[[8.0323656047755989, 3.4870317731597398, -2.60153176593737, 2.6527560709207805], [-0.12723783260451693,
-0.1530919093467773, 3.5580758796249192, 7.6479198219061768], [-1.7829853936966868, -4.1394839808914297,
-4.1157967374648168, 0.19566037971172889]], [[-4.171669527698878, -3.154882083312518, 1.6137181305318382,
-1.1433906089982564], [-7.8842986125375436, -3.6878372068320373, -7.2209398664275035, -1.7110657768657243],
[-1.1183922572346816, 0.33801180540971032, 1.9517213057882516, 4.0216188489484326]]], [[[-0.83644074710829575,
1.0687597141532175, 0.26921963165964558, 6.4565817309881748], [2.8451715991870179, -2.6980677964844064,
-5.4904279602975734, 0.53647255752224421], [-0.28287456373968478, -0.10824202760876744, -0.3273998712762145,
-5.808931044491402]], [[2.5726259251852168, -5.9582154419057956, -2.474765195226313, 0.62352450253015235],
[-1.8132358856166215, 8.7355605281725559, -0.61867410769446884, 9.833057662793582], [1.2774974768841094,
0.79885024564152385, 3.0003451326957951, 0.013337156642272419]]]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_taggedData_rank0_Symbol_rank0(self):
arg0=Data(3.50668349593,self.functionspace)
arg0.setTaggedValue(1,-3.09146650776)
arg1=Symbol(shape=())
res=arg0+arg1
s1=numpy.array(-4.32369560802)
sub=res.substitute({arg1:s1})
ref=Data(-0.81701211209,self.functionspace)
ref.setTaggedValue(1,-7.41516211578)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_taggedData_rank0_Symbol_rank1(self):
arg0=Data(3.83444600418,self.functionspace)
arg0.setTaggedValue(1,-0.266863397142)
arg1=Symbol(shape=(2,))
res=arg0+arg1
s1=numpy.array([3.6938635924807581, -2.3199399928130826])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([7.5283095966592981, 1.5145060113654574]),self.functionspace)
ref.setTaggedValue(1,numpy.array([3.4270001953384694, -2.5868033899553713]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_taggedData_rank0_Symbol_rank2(self):
arg0=Data(-2.85642807584,self.functionspace)
arg0.setTaggedValue(1,-0.357260114938)
arg1=Symbol(shape=(4, 5))
res=arg0+arg1
s1=numpy.array([[4.4124412590911621, 1.732298167196193, 1.8228166076040306, -3.9853565905277355,
3.3793508288079881], [-1.5339512663354116, -2.8915144317379058, -3.6493591659102464, 1.4243106283527815,
-0.6931246781623841], [4.7714119110273394, 0.45700055229079606, 1.2539528503924027, -1.4029360809413403,
2.8915917074007416], [4.2546657221847255, 3.2639891865967527, -0.4712967898993945, -3.9077971138749112,
-3.5655383189938084]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[1.5560131832472779, -1.1241299086476912, -1.0336114682398536, -6.8417846663716197,
0.52292275296410384], [-4.3903793421792958, -5.74794250758179, -6.5057872417541311, -1.4321174474911027,
-3.5495527540062684], [1.9149838351834552, -2.3994275235530882, -1.6024752254514816, -4.2593641567852245,
0.035163631556857311], [1.3982376463408412, 0.40756111075286849, -3.3277248657432787, -6.7642251897187951,
-6.4219663948376926]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[4.0551811441529519, 1.3750380522579828, 1.4655564926658204,
-4.3426167054659457, 3.0220907138697779], [-1.8912113812736218, -3.248774546676116, -4.0066192808484562,
1.0670505134145714, -1.0503847931005943], [4.4141517960891292, 0.099740437352585865, 0.89669273545419248,
-1.7601961958795505, 2.5343315924625314], [3.8974056072465153, 2.9067290716585426, -0.82855690483760469,
-4.2650572288131219, -3.9227984339320185]]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_taggedData_rank0_Symbol_rank3(self):
arg0=Data(-2.98759917871,self.functionspace)
arg0.setTaggedValue(1,-4.26584239637)
arg1=Symbol(shape=(6, 2, 2))
res=arg0+arg1
s1=numpy.array([[[0.65736935684204045, 1.4685807994312459], [0.99740155640158257, -2.8001282911414127]],
[[-0.80947613326718226, -4.0270117786915378], [1.1564198209626229, -4.917538904347448]], [[-1.0488230155998202,
4.0958534641909754], [-4.9502522108275002, -0.19486641488505008]], [[-4.507307254914509, -0.98539101308887389],
[-4.5909807035957675, 2.4265853650826985]], [[-4.252924691613126, 0.42394291278212481], [3.4198717705842103,
-4.6000003047031024]], [[4.9609535782609235, 3.1625779529060711], [0.26834958946896492, 3.0941570460788874]]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[-2.3302298218695272, -1.5190183792803218], [-1.9901976223099851, -5.7877274698529799]],
[[-3.7970753119787499, -7.0146109574031055], [-1.8311793577489448, -7.9051380830590157]], [[-4.0364221943113883,
1.1082542854794077], [-7.9378513895390679, -3.1824655935966177]], [[-7.4949064336260767, -3.9729901918004416],
[-7.5785798823073351, -0.56101381362886915]], [[-7.2405238703246937, -2.5636562659294428], [0.43227259187264266,
-7.5875994834146701]], [[1.9733543995493559, 0.17497877419450347], [-2.7192495892426027,
0.10655786736731976]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[-3.6084730395261495, -2.7972615969369441], [-3.2684408399666074,
-7.0659706875096031]], [[-5.0753185296353722, -8.2928541750597269], [-3.1094225754055671, -9.183381300715638]],
[[-5.3146654119680097, -0.16998893217721456], [-9.2160946071956893, -4.46070881125324]], [[-8.773149651282699,
-5.2512334094570638], [-8.8568230999639574, -1.8392570312854915]], [[-8.5187670879813169, -3.8418994835860651],
[-0.84597062578397964, -8.8658427010712924]], [[0.69511118189273358, -1.1032644434621188], [-3.997492806899225,
-1.1716853502893025]]]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_taggedData_rank0_Symbol_rank4(self):
arg0=Data(-3.36894529378,self.functionspace)
arg0.setTaggedValue(1,-4.62956527999)
arg1=Symbol(shape=(3, 2, 3, 4))
res=arg0+arg1
s1=numpy.array([[[[-4.6824549992604805, 0.17860523484039881, -3.9939994980255102, -0.36579022311332743],
[-2.0003582573358858, 3.3436256968249793, -1.5671485178714373, 3.9554829351801821], [4.0499415739210693,
-3.1796189569360358, 0.28181611699077536, 1.4851321313182684]], [[4.9608073066477267, 2.1353944107091136,
3.2103965744924743, 0.36273874746876089], [0.33193515801312934, -1.8768462949087295, -3.5968753845201462,
-1.9342255010038101], [-0.98845968068423407, -2.6505467151645048, -3.9269883741621214, -1.2671783073823359]]],
[[[4.0296290320262234, 0.094183089334959114, -1.6548527114390654, 1.1815006848827636], [4.4205350333429578,
1.0602877007979998, -2.7207610093848364, 2.5749353581909009], [2.368743673752042, 0.36879117257479166,
3.1294699111463196, 3.8766421343643209]], [[-4.2994052301352443, -4.4665347726615128, -4.9654257982784813,
1.4010627781386145], [-0.49010647980719568, 1.1149343027340697, 3.8533389980231654, -1.4762647122950145],
[-2.4078638813490985, 4.4431147205208923, 3.0392301612263246, -2.3032611338556377]]], [[[1.1388924488325571,
4.4978561941078308, -3.3123851704811691, 1.3453478111463726], [4.1779635175178385, 3.1786527767023234,
-2.8109803623964669, 4.7217176158252876], [0.26914741902392958, -1.6630169842885789, -3.6267544687045641,
-4.7016327677304943]], [[0.44478691577550755, 2.9451130426961889, -1.0836274217802466, -4.8754431681482586],
[1.6457024072282014, -1.106310648992209, -3.2732924796145912, 4.7940609535301668], [-4.2482158844391957,
2.2391243759174451, 4.6408645091714327, 4.1449515947243611]]]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[[-8.0514002930449351, -3.1903400589440558, -7.3629447918099649, -3.734735516897782],
[-5.3693035511203409, -0.025319596959475277, -4.9360938116558923, 0.5865376413957275], [0.68099628013661473,
-6.5485642507204904, -3.0871291767936793, -1.8838131624661862]], [[1.5918620128632721, -1.233550883075341,
-0.15854871929198033, -3.0062065463156937], [-3.0370101357713253, -5.2457915886931836, -6.9658206783046008,
-5.3031707947882651], [-4.3574049744686887, -6.0194920089489594, -7.2959336679465761, -4.6361236011667906]]],
[[[0.66068373824176874, -3.2747622044494955, -5.0237980052235205, -2.187444608901691], [1.0515897395585032,
-2.3086575929864548, -6.0897063031692911, -0.79400993559355371], [-1.0002016200324126, -3.000154121209663,
-0.23947538263813506, 0.5076968405798663]], [[-7.668350523919699, -7.8354800664459674, -8.3343710920629359,
-1.9678825156458402], [-3.8590517735916503, -2.2540109910503849, 0.48439370423871075, -4.8452100060794692],
[-5.7768091751335531, 1.0741694267364377, -0.32971513255813001, -5.6722064276400923]]], [[[-2.2300528449518975,
1.1289109003233762, -6.6813304642656242, -2.023597482638082], [0.80901822373338383, -0.19029251708213124,
-6.1799256561809219, 1.352772322040833], [-3.099797874760525, -5.0319622780730331, -6.9956997624890187,
-8.0705780615149489]], [[-2.9241583780089471, -0.42383225108826572, -4.4525727155647008, -8.2443884619327132],
[-1.7232428865562532, -4.4752559427766636, -6.6422377733990459, 1.4251156597457122], [-7.6171611782236504,
-1.1298209178670096, 1.2719192153869781, 0.77600630093990652]]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[[-9.3120202792456048, -4.4509600451447255, -8.6235647780106355,
-4.9953555030984518], [-6.6299235373210106, -1.285939583160145, -6.1967137978565621, -0.67408234480494222],
[-0.579623706064055, -7.8091842369211601, -4.347749162994349, -3.144433148666856]], [[0.33124202666260238,
-2.4941708692760107, -1.4191687054926501, -4.2668265325163635], [-4.297630121971995, -6.5064115748938534,
-8.2264406645052706, -6.5637907809889349], [-5.6180249606693584, -7.2801119951496291, -8.5565536541472458,
-5.8967435873674603]]], [[[-0.59993624795890099, -4.5353821906501652, -6.2844179914241902, -3.4480645951023607],
[-0.20903024664216652, -3.5692775791871245, -7.3503262893699608, -2.0546299217942234], [-2.2608216062330824,
-4.2607741074103327, -1.5000953688388048, -0.75292314562080342]], [[-8.9289705101203687, -9.0961000526466371,
-9.5949910782636056, -3.2285025018465099], [-5.11967175979232, -3.5146309772510547, -0.77622628196195897,
-6.1058299922801389], [-7.0374291613342228, -0.18645055946423206, -1.5903351187587997, -6.932826413840762]]],
[[[-3.4906728311525672, -0.13170908587729357, -7.9419504504662939, -3.2842174688387518], [-0.45160176246728589,
-1.450912503282801, -7.4405456423815917, 0.092152335840163246], [-4.3604178609611948, -6.2925822642737028,
-8.2563197486896875, -9.3311980477156187]], [[-4.1847783642096168, -1.6844522372889355, -5.7131927017653705,
-9.505008448133383], [-2.983862872756923, -5.7358759289773333, -7.9028577595997156, 0.16449567354504246],
[-8.8777811644243201, -2.3904409040676793, 0.011299229186308324, -0.48461368526076321]]]]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_taggedData_rank1_Symbol_rank0(self):
arg0=Data(numpy.array([-4.9434811071655114, 1.7588416724781917]),self.functionspace)
arg0.setTaggedValue(1,numpy.array([3.0524482361043965, -0.58828792238396233]))
arg1=Symbol(shape=())
res=arg0+arg1
s1=numpy.array(-4.86003727467)
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([-9.8035183818403411, -3.1011956021966389]),self.functionspace)
ref.setTaggedValue(1,numpy.array([-1.8075890385704341, -5.4483251970587929]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_taggedData_rank1_Symbol_rank1(self):
arg0=Data(numpy.array([0.47124983588436109, 3.3842142103059487]),self.functionspace)
arg0.setTaggedValue(1,numpy.array([4.4506172428158504, -1.5976912605342894]))
arg1=Symbol(shape=(2,))
res=arg0+arg1
s1=numpy.array([2.7380372395241483, -1.2414970456241372])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([3.2092870754085094, 2.1427171646818115]),self.functionspace)
ref.setTaggedValue(1,numpy.array([7.1886544823399987, -2.8391883061584267]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_taggedData_rank2_Symbol_rank0(self):
arg0=Data(numpy.array([[3.7123556177495072, -1.2322724929891438, -4.3196981967098704, 4.5149190397092358,
-3.4294461596271342], [-0.32526237821140569, 4.906418518064358, 1.6782843293160443, -4.5452294423093242,
-3.4252951962126454], [4.7623389482797158, 4.8957853100883888, 2.4605965522735644, -3.3235939770772349,
-3.6622677868193731], [3.7849671492059009, -3.7965523255405484, -0.98706292680421903, -2.9575953641431996,
3.7235194699440495]]),self.functionspace)
arg0.setTaggedValue(1,numpy.array([[3.846235478086534, -2.9152984736534773, 2.1299170235868692,
1.4194093106373815, -1.9728564928751369], [0.12730504885223404, -2.4537968289763077, 1.8352652361138375,
-1.1054616749639532, -0.67553225283567997], [-4.6542627767136047, 0.014905560429250286, 0.84138572626791408,
-1.4074784720342515, -3.3322631066777983], [-0.64893500421415951, 4.4524265176475826, -3.5204114624144456,
3.5239615703390363, 2.3718443568961201]]))
arg1=Symbol(shape=())
res=arg0+arg1
s1=numpy.array(3.4845259086)
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[7.1968815263516515, 2.2522534156130005, -0.83517228810772615, 7.9994449483113801,
0.055079748975010112], [3.1592635303907386, 8.3909444266665023, 5.1628102379181886, -1.06070353370718,
0.059230712389498841], [8.2468648568818601, 8.3803112186905331, 5.9451224608757087, 0.16093193152490937,
-0.17774187821722887], [7.2694930578080452, -0.31202641693840416, 2.4974629817979253, 0.52693054445894472,
7.2080453785461938]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[7.3307613866886783, 0.56922743494866701, 5.6144429321890135,
4.9039352192395258, 1.5116694157270074], [3.6118309574543783, 1.0307290796258366, 5.3197911447159818,
2.3790642336381911, 2.8089936557664643], [-1.1697368681114604, 3.4994314690313946, 4.3259116348700584,
2.0770474365678928, 0.15226280192434594], [2.8355909043879848, 7.9369524262497269, -0.035885553812301296,
7.0084874789411806, 5.8563702654982643]]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_taggedData_rank2_Symbol_rank2(self):
arg0=Data(numpy.array([[2.7675952117994296, 0.98431175880226363, -1.8309000840442566, 2.0351166910383416,
2.1718600084175153], [0.64718493825654111, 3.0274641310077364, 4.6031246235215555, -0.072830522019846633,
-3.436466903373192], [-2.7989895712459734, 3.2804563231391093, 3.1416998470123456, 0.25702028842752966,
-3.1553411419958821], [-4.5620989116806543, -0.23300222673645532, -2.3978689464069101, 0.41391436589174457,
-3.7252639362836382]]),self.functionspace)
arg0.setTaggedValue(1,numpy.array([[-2.1509506437818238, -2.5007519800405218, 0.30616207266744233,
-0.46790716227581797, 0.6454558125610621], [1.9589653025955753, -4.9059174981425437, -4.7107956989445992,
2.6150016745692826, -3.3329567586885211], [1.1850451086308738, 3.8781029980110997, -4.7104324292639133,
-4.8362413881812492, 4.9066980390674555], [-1.2440311634968171, -1.6429522113717008, 4.0547225056117124,
-0.33314796054153195, -2.6143781039708855]]))
arg1=Symbol(shape=(4, 5))
res=arg0+arg1
s1=numpy.array([[-0.0104190624259477, 3.439083370835446, -1.7585221913131677, 3.8784501968475897,
0.08088556648108991], [0.53276272310770789, -1.3171951284400176, -0.841014288686317, 2.4350359443944622,
0.55796159262639922], [-3.3985580423616479, 0.73804937880111687, 0.84641655693241269, -2.0376479444757822,
-0.094456394031885438], [0.8829252865168975, 0.84170422580042903, -1.9539396350167637, -4.8054718599517194,
-0.37594711864698205]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[2.7571761493734819, 4.4233951296377096, -3.5894222753574243, 5.9135668878859313,
2.2527455748986052], [1.179947661364249, 1.7102690025677187, 3.7621103348352385, 2.3622054223746156,
-2.8785053107467928], [-6.1975476136076217, 4.0185057019402262, 3.9881164039447583, -1.7806276560482526,
-3.2497975360277676], [-3.6791736251637568, 0.60870199906397371, -4.3518085814236738, -4.3915574940599749,
-4.1012110549306202]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[-2.1613697062077715, 0.93833139079492422, -1.4523601186457253,
3.4105430345717718, 0.72634137904215201], [2.4917280257032832, -6.2231126265825614, -5.5518099876309162,
5.0500376189637448, -2.7749951660621219], [-2.2135129337307742, 4.6161523768122166, -3.8640158723315006,
-6.8738893326570309, 4.8122416450355701], [-0.36110587697991958, -0.80124798557127175, 2.1007828705949487,
-5.1386198204932514, -2.9903252226178676]]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_taggedData_rank3_Symbol_rank0(self):
arg0=Data(numpy.array([[[1.6094791339338048, 4.27222307751477], [4.9486531857239697, -4.5552975586923292]],
[[-0.12032729123703056, -4.1413061177629231], [-2.7473350985925316, 4.7319188820310991]], [[0.13107637034429231,
-3.2138415379490204], [-3.9942457581718696, 1.3262496008026838]], [[2.56850905863657, 1.8321753808437329],
[4.5176482730823331, 4.4664637318837137]], [[0.50860355331966556, 0.55279434819439199], [3.1688695988617859,
-2.6740526298455016]], [[4.4977965557520072, 3.6422271944652209], [3.7948343945899445,
-3.0377990068633332]]]),self.functionspace)
arg0.setTaggedValue(1,numpy.array([[[-2.9548694146760557, 3.1101651017467038], [-0.31006440672923752,
0.74616091042484989]], [[-3.1016477433464864, 2.9532816390640111], [-2.0494474684559894, -1.1448583599993354]],
[[4.2052724347365604, -1.8157003708847643], [4.8073133555422327, -2.7045312989764492]], [[-2.3803833325202763,
0.19928505008920272], [-2.8622812030202094, 3.9488692362256081]], [[-4.1266217915470236, 4.8461083576413735],
[-3.1895474177762351, 4.4625154514412237]], [[-0.65350755924337811, 2.8015786665738105], [0.94103003425367859,
0.27556367440023166]]]))
arg1=Symbol(shape=())
res=arg0+arg1
s1=numpy.array(4.49324308458)
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[6.1027222185118468, 8.765466162092812], [9.4418962703020117, -0.062054474114287217]],
[[4.3729157933410114, 0.35193696681511888], [1.7459079859855104, 9.2251619666091411]], [[4.6243194549223343,
1.2794015466290216], [0.49899732640617245, 5.8194926853807258]], [[7.061752143214612, 6.3254184654217749],
[9.0108913576603751, 8.9597068164617557]], [[5.0018466378977076, 5.046037432772434], [7.6621126834398279,
1.8191904547325404]], [[8.9910396403300492, 8.1354702790432629], [8.2880774791679865,
1.4554440777147089]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[1.5383736699019863, 7.6034081863247458], [4.1831786778488045,
5.2394039950028919]], [[1.3915953412315556, 7.4465247236420531], [2.4437956161220526, 3.3483847245787066]],
[[8.6985155193146024, 2.6775427136932777], [9.3005564401202747, 1.7887117856015928]], [[2.1128597520577657,
4.6925281346672447], [1.6309618815578326, 8.4421123208036501]], [[0.36662129303101842, 9.3393514422194155],
[1.3036956668018069, 8.9557585360192657]], [[3.8397355253346639, 7.2948217511518525], [5.4342731188317206,
4.7688067589782737]]]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_taggedData_rank3_Symbol_rank3(self):
arg0=Data(numpy.array([[[-2.7345315461324993, 4.5316724428402377], [-1.2207000383039999, -2.1651454481686692]],
[[-2.5222456135735638, 3.1325113872519896], [0.54140311786327011, -1.6266115642059011]], [[4.3999274072752783,
-0.64510581732829841], [-3.3878893926233533, -0.14783111107246061]], [[2.4816188811184228, 1.505965932327137],
[-2.8128544405052458, 3.2460332510852936]], [[1.5649806120186849, 1.1768584297160487], [-3.3133262672401544,
-2.5740884272652789]], [[2.936076596237732, -0.80694051724477056], [1.6382059835800931,
-0.059174653042079584]]]),self.functionspace)
arg0.setTaggedValue(1,numpy.array([[[4.107948776768561, 4.79459166600315], [-0.070211802843057391,
-2.3000592273671394]], [[1.53142006950028, 0.5983353676488381], [4.2000369856633419, -3.7326077043834074]],
[[-3.6852528003303684, -0.40061815593309014], [4.849947657932514, 3.2046322763443698]], [[4.6824735127774275,
-2.3356975272114679], [-1.4284737023138216, -0.96863966970867921]], [[4.4306883649430571, 0.16250464015770305],
[4.7866411719098583, -1.6949698779239197]], [[-4.9624929004021014, -0.4120760567738655], [-3.510925072784119,
-0.26388846668772636]]]))
arg1=Symbol(shape=(6, 2, 2))
res=arg0+arg1
s1=numpy.array([[[3.7560333190798687, 0.63030183757017788], [-3.8821224320935288, 4.3508142113739634]],
[[4.3548667192676795, -3.4709315123037445], [-0.19540447292770935, -1.1720138856956916]], [[3.7993994701980398,
-4.5475458462287497], [-0.20650310401114513, -2.7802894344079201]], [[-0.46867874332271242, 0.82685022383334505],
[-3.5357776147305264, 0.7633420403065605]], [[-0.19578164461526359, -4.1370261640670458], [-1.2073883253186946,
0.74664652191646397]], [[-0.697880661399644, -0.46932885527321488], [2.4087818009804716, -1.8245102799854829]]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[1.0215017729473694, 5.1619742804104156], [-5.1028224703975287, 2.1856687632052942]],
[[1.8326211056941157, -0.33842012505175489], [0.34599864493556076, -2.7986254499015928]], [[8.1993268774733181,
-5.1926516635570481], [-3.5943924966344984, -2.9281205454803807]], [[2.0129401377957103, 2.3328161561604821],
[-6.3486320552357718, 4.0093752913918541]], [[1.3691989674034213, -2.9601677343509971], [-4.520714592558849,
-1.8274419053488149]], [[2.238195934838088, -1.2762693725179854], [4.0469877845605646,
-1.8836849330275625]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[7.8639820958484297, 5.4248935035733279], [-3.9523342349365862,
2.050754984006824]], [[5.8862867887679595, -2.8725961446549064], [4.0046325127356326, -4.904621590079099]],
[[0.11414666986767141, -4.9481640021618398], [4.6434445539213689, 0.42434284193644967]], [[4.2137947694547151,
-1.5088473033781229], [-4.9642513170443481, -0.20529762940211871]], [[4.2349067203277935, -3.9745215239093428],
[3.5792528465911637, -0.94832335600745576]], [[-5.6603735618017454, -0.88140491204708038], [-1.1021432718036475,
-2.0883987466732092]]]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_taggedData_rank4_Symbol_rank0(self):
arg0=Data(numpy.array([[[[1.2403285479679145, -0.65355746314869823, 0.23507371305026048, 2.9495208917061202],
[-4.4153187452600653, -1.0271324152128747, 3.6087985228033794, 1.587633224392107], [1.5882989512534262,
-2.3766989521547401, -4.6462509853387939, 1.1425676014861166]], [[-4.8469447836806694, -1.4338245370809863,
-4.8441809139347694, 0.082128480181090424], [4.2695412477206585, -2.0376229192188622, -2.685821131586259,
-4.5654361329152717], [3.5226403567783482, -4.9633770210253347, 4.1637469549065127, -3.5898874968684167]]],
[[[2.7439089503129228, 0.81346375693975492, -2.576882111469688, 4.758878084101946], [0.098363354586225249,
-4.314913184354209, -1.1821682575010484, 4.9687115939178916], [-2.5414207769554564, 1.9836872846103208,
-1.5982744174212127, 4.5509211096426121]], [[4.759533396882766, -4.550347299113696, 4.9394743649799153,
-3.9692445921595421], [1.5755016838325195, 2.6599597206311305, -0.59545966103916648, -1.308464088815966],
[1.7018715016873482, 0.31781368103450536, -0.91184792887657995, -0.60566457689943931]]], [[[-0.365764084374395,
-0.75878286483821444, -3.1104661623240091, -3.7302303444372109], [0.58052395594970907, 0.14085590954626337,
4.6712439745076182, 0.65991412045590181], [-4.5675491076195733, -3.3042112830144132, -2.6719400309110553,
-3.8520603991598765]], [[3.4260488825099618, -1.2789319515430164, 1.8435112511824903, 1.0773214658952854],
[-4.0772283149901236, 1.0211433275718873, -2.015430043082814, 0.1376630245430368], [1.3249956905172624,
3.1987247807146968, 1.0304156332749459, 3.785256475561086]]]]),self.functionspace)
arg0.setTaggedValue(1,numpy.array([[[[-3.8774766185796605, 3.1521364883779448, -4.9233158714840091,
3.7988193665209522], [4.8244393256113263, 2.4688683468745563, -4.5044275072582254, 1.1107496985072052],
[-2.9980383766650376, -4.2922660982517158, 3.4924104659712771, -0.5135964311738892]], [[1.9573144047865201,
-2.2686101409008961, -2.907052414660404, -4.0582253229051144], [-2.0281877168409657, 1.7867206317317663,
0.018511114285918673, -4.0475974398672498], [1.3023403490307315, 1.9932255873687215, -4.6698465653310688,
-4.5630845029599421]]], [[[-1.9525649263627876, -0.72040110769848908, -3.6987029249472769, -3.3184217891099999],
[-4.0519149413902857, 4.1195877398536549, -3.8261874289376463, 3.423780007792768], [0.11768639970294359,
-1.4898880703788131, -1.1746648112150213, -0.28493737967147226]], [[-2.0138403307539932, 3.9987186392010816,
-1.0125535260055338, 0.57376641241565363], [4.213727608092972, 0.51388058678005066, -4.4106027756910908,
-1.9979423050108283], [1.5708368447511347, -1.6270284297780933, -0.55277364435139376, -1.7748804647831715]]],
[[[2.7639070541103061, 2.7303808332951629, 0.41148416591473591, -1.9337000414572802], [-2.7585163378482456,
2.2319457297797207, 3.7988668025967804, 3.6103374331669471], [-4.5925114196923271, -2.1274746711435997,
3.3094547630756779, -4.1386856959210352]], [[-2.1348423629137692, 3.539794593057783, 4.8265405725541157,
4.9426398297282788], [4.5757071915543417, -4.0433372993763399, -0.84096548582416997, 2.0567811910343226],
[4.5367596882428671, -4.9139510999364404, 1.1342166543217944, 1.4859311895053571]]]]))
arg1=Symbol(shape=())
res=arg0+arg1
s1=numpy.array(4.83582066753)
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[[6.0761492154946453, 4.1822632043780326, 5.0708943805769913, 7.785341559232851],
[0.4205019222666655, 3.8086882523138561, 8.4446191903301102, 6.4234538919188378], [6.424119618780157,
2.4591217153719906, 0.18956968218793691, 5.9783882690128474]], [[-0.011124116153938601, 3.4019961304457444,
-0.008360246408038563, 4.9179491477078212], [9.1053619152473892, 2.7981977483078686, 2.1499995359404718,
0.27038453461145906], [8.358461024305079, -0.12755635349860395, 8.9995676224332435, 1.2459331706583141]]],
[[[7.5797296178396536, 5.6492844244664857, 2.2589385560570427, 9.5946987516286768], [4.934184022112956,
0.52090748317252178, 3.6536524100256824, 9.8045322614446224], [2.2943998905712744, 6.8195079521370516,
3.2375462501055181, 9.3867417771693429]], [[9.5953540644094968, 0.28547336841303483, 9.7752950325066461,
0.86657607536718873], [6.4113223513592503, 7.4957803881578613, 4.2403610064875643, 3.5273565787107648],
[6.537692169214079, 5.1536343485612361, 3.9239727386501508, 4.2301560906272915]]], [[[4.4700565831523358,
4.0770378026885163, 1.7253545052027217, 1.1055903230895199], [5.4163446234764399, 4.9766765770729942,
9.507064642034349, 5.4957347879826326], [0.26827155990715745, 1.5316093845123175, 2.1638806366156755,
0.98376026836685426]], [[8.2618695500366925, 3.5568887159837144, 6.679331918709221, 5.9131421334220162],
[0.75859235253660717, 5.8569639950986181, 2.8203906244439167, 4.9734836920697676], [6.1608163580439932,
8.0345454482414276, 5.8662363008016767, 8.6210771430878168]]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[[0.95834404894707026, 7.9879571559046756, -0.087495203957278278,
8.634640034047683], [9.6602599931380571, 7.304689014401287, 0.3313931602685054, 5.946570366033936],
[1.8377822908616932, 0.543554569275015, 8.3282311334980079, 4.3222242363528416]], [[6.7931350723132509,
2.5672105266258347, 1.9287682528663268, 0.77759534462161639], [2.8076329506857651, 6.6225412992584971,
4.8543317818126495, 0.78822322765948094], [6.1381610165574623, 6.8290462548954523, 0.165974102195662,
0.27273616456678873]]], [[[2.8832557411639432, 4.1154195598282417, 1.1371177425794539, 1.5173988784167309],
[0.78390572613644505, 8.9554084073803857, 1.0096332385890845, 8.2596006753194988], [4.9535070672296744,
3.3459325971479177, 3.6611558563117095, 4.5508832878552585]], [[2.8219803367727376, 8.8345393067278124,
3.823267141521197, 5.4095870799423844], [9.0495482756197028, 5.3497012543067815, 0.42521789183563996,
2.8378783625159025], [6.4066575122778655, 3.2087922377486375, 4.283047023175337, 3.0609402027435593]]],
[[[7.5997277216370369, 7.5662015008218937, 5.2473048334414667, 2.9021206260694505], [2.0773043296784852,
7.0677663973064515, 8.6346874701235112, 8.4461581006936779], [0.24330924783440366, 2.7083459963831311,
8.1452754306024087, 0.6971349716056956]], [[2.7009783046129616, 8.3756152605845138, 9.6623612400808465,
9.7784604972550095], [9.4115278590810725, 0.79248336815039089, 3.9948551817025608, 6.8926018585610533],
[9.3725803557695979, -0.078130432409709627, 5.9700373218485252, 6.3217518570320879]]]]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_taggedData_rank4_Symbol_rank4(self):
arg0=Data(numpy.array([[[[-3.1509236523814286, 1.680234058442708, -1.7187977550532416, 3.9846453843972913],
[-1.6754979614332322, -3.8450074807346901, -1.5740330789137689, -4.4201074343218751], [2.276529915966389,
-0.80235747833916982, 4.571247045598767, -3.4093255486695617]], [[-4.0166628667791446, -1.3933240066153738,
-1.215071574667598, -3.4706735067142258], [-3.0960303329082572, 4.3009033191704589, 4.4065883064621634,
4.8965445768019009], [-4.4443460968929758, 3.8975314333052253, -4.4153045047286144, 1.7496820405056166]]],
[[[1.634274247051799, -2.4623052709302771, 1.4279180811059975, 0.92544783745377668], [-4.4862942162658106,
-0.17080151547727951, 0.52532922395695625, -0.11419327223481623], [-1.1603038628614835, -2.5757515035829472,
1.9959550719114718, -1.7953240768392242]], [[4.9309159450812103, 3.2298165897638906, -0.075208625571880461,
-1.1899071115534432], [1.6545058865005409, -1.9426363189361773, 1.620629502101667, -4.2257681218133687],
[-0.24689686416986767, 2.1247379677905815, -0.022501917990521925, -1.9988138278359822]]], [[[-2.16170138942825,
1.2184335532362125, 1.1509535832826323, 2.2195238124001797], [2.7455643566460015, 4.6453581322389361,
-4.1082447076462643, -4.0639146315693067], [-4.96116105494092, -3.6915142795866762, -1.2186796693827917,
4.7933913234222967]], [[2.0022553772723217, -0.96891528014022654, -2.5457411370843142, -3.3574915783043058],
[0.10326637441549735, 2.2065594442944327, 3.4159550457557479, -0.71182719653128945], [-1.5473005591196651,
-1.8237704422942014, 3.7660184612895105, -2.1565964302540372]]]]),self.functionspace)
arg0.setTaggedValue(1,numpy.array([[[[-2.7644812297675436, 0.40931971763703956, 3.611075059192606,
0.50972765741910564], [-4.2130726841282584, -1.1190277433669751, -0.71203745760782766, -3.152956525368753],
[-1.6186056313723087, 1.1274726343098616, 4.4133392834898437, 1.5220424195160689]], [[0.16147933294385375,
2.4654462130650998, -2.2315133839410328, -4.5248215067907562], [2.2226933853289026, 3.7083490689582508,
1.6042940030913613, 0.26178935291219929], [2.4033332562872989, 2.6116613010273229, -3.5340848426974594,
-4.3871506552920767]]], [[[-2.5011422414749243, -2.9785737952530678, -4.0632268435384287, -2.9061747268645899],
[-3.4361922491984487, 0.92512310228203631, -3.7591410062368915, -0.10199113857196274], [1.4370716393838645,
0.71874746237537668, -4.5480615526025323, -3.9385610102938093]], [[-3.5039474073115562, 1.4740925776889409,
-0.06403798877318323, -3.3828440686373753], [-1.9590119108809123, -0.13446729158123816, -2.4360152863347251,
0.81375486060557112], [2.4638296949211451, 0.84554464160795018, 1.0770605717668191, 0.90311465710515648]]],
[[[-3.0365259446312756, -2.1113062138954444, 3.190598106141481, 4.7146234105400531], [4.7073713389281071,
2.0949812753843036, 1.902801485931489, -0.4384294077249864], [-4.4341512258710214, 4.114619941421422,
4.1663347911930675, -0.082374028629738305]], [[-0.58950965471106098, -1.9744112566224792, -0.0098348725084971278,
2.3871548847218813], [-1.1861224380121662, -3.8703032573387253, 0.2332725218101972, 2.7881117501797101],
[-4.3313677243610327, 2.5428749523942127, 3.9018944633638419, -0.49408732338659789]]]]))
arg1=Symbol(shape=(3, 2, 3, 4))
res=arg0+arg1
s1=numpy.array([[[[1.8433628252117984, 1.5322432245117268, 0.55363793461945665, 4.6657626927783653],
[-0.94710403494804751, 3.9800168829397649, 3.0366988370600794, 2.8875431155604332], [-1.188024345098996,
1.0665386751463011, 4.7835901054797993, 2.5969696632689807]], [[-1.99850752062535, 1.1333681341555639,
-0.49718999089842697, 1.1440753369804515], [0.26294280812698378, -3.8684363170040701, 0.061030108864615684,
-4.1179127492349608], [-4.67031644465197, 4.9054510497550492, -0.2640662442281041, 1.363134852748785]]],
[[[-1.4621905107325697, -2.8811881835070574, -2.0127263016810106, 3.9187151372775499], [4.0559843147336121,
3.8748150284806506, -4.7195991819934049, 1.6441241199343715], [1.1018797372155733, 1.5720711461020827,
-2.8718182782954003, -2.4926472889456743]], [[2.1583981297206112, -2.7029142786449709, -4.0306810999276212,
-0.041927417439557857], [2.5297094316362001, 3.2023688131127575, -0.87830172094753056, 1.5087811969314782],
[0.94040146920827272, 1.8042467131134678, 2.6306472495122346, 0.16819275341523543]]], [[[0.15798239523545377,
2.4104584738150319, 2.3850248364278386, 3.2174938931658534], [4.8575582926065533, 0.30772922316230389,
-4.4397211951638047, 0.39063821497748741], [-2.3146321369181688, -3.0703095447217885, 1.7397877979741549,
4.033153568325778]], [[-1.7935270727714037, -3.9682025038313595, -3.4065483616803141, 2.1844510922893523],
[-4.2449404804537032, 1.9572337718531996, -4.6593011375931308, 0.98236210083608633], [4.8624542464851288,
0.5657266529616205, 0.50114562982511135, -3.2736237576584317]]]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[[-1.3075608271696302, 3.2124772829544348, -1.165159820433785, 8.6504080771756566],
[-2.6226019963812797, 0.13500940220507474, 1.4626657581463105, -1.5325643187614419], [1.088505570867393,
0.26418119680713126, 9.3548371510785664, -0.81235588540058101]], [[-6.0151703874044946, -0.25995587245980989,
-1.7122615655660249, -2.3265981697337743], [-2.8330875247812735, 0.43246700216638878, 4.4676184153267791,
0.77863182756694016], [-9.1146625415449449, 8.8029824830602745, -4.6793707489567185, 3.1128168932544016]]],
[[[0.17208373631922935, -5.3434934544373345, -0.58480822057501314, 4.8441629747313266], [-0.4303099015321985,
3.7040135130033711, -4.1942699580364486, 1.5299308476995552], [-0.058424125645910152, -1.0036803574808646,
-0.87586320638392845, -4.2879713657848981]], [[7.0893140748018215, 0.52690231111891972, -4.1058897254995017,
-1.2318345289930011], [4.184215318136741, 1.2597324941765802, 0.74232778115413645, -2.7169869248818905],
[0.69350460503840505, 3.9289846809040494, 2.6081453315217127, -1.8306210744207467]]], [[[-2.0037189941927962,
3.6288920270512444, 3.5359784197104709, 5.4370177055660331], [7.6031226492525548, 4.95308735540124,
-8.5479659028100698, -3.6732764165918192], [-7.2757931918590888, -6.7618238243084647, 0.52110812859136324,
8.8265448917480747]], [[0.20872830450091806, -4.9371177839715861, -5.9522894987646282, -1.1730404860149535],
[-4.1416741060382058, 4.1637932161476323, -1.2433460918373829, 0.27053490430479687], [3.3151536873654637,
-1.2580437893325809, 4.2671640911146218, -5.430220187912469]]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[[-0.92111840455574523, 1.9415629421487663, 4.1647129938120626,
5.1754903501974709], [-5.1601767190763059, 2.8609891395727898, 2.3246613794522517, -0.26541340980831984],
[-2.8066299764713047, 2.1940113094561626, 9.1969293889696431, 4.1190120827850496]], [[-1.8370281876814962,
3.5988143472206637, -2.7287033748394598, -3.3807461698103047], [2.4856361934558864, -0.16008724804581931,
1.665324111955977, -3.8561233963227615], [-2.2669831883646712, 7.5171123507823721, -3.7981510869255635,
-3.0240158025432917]]], [[[-3.9633327522074939, -5.8597619787601252, -6.0759531452194393, 1.0125404104129601],
[0.61979206553516342, 4.7999381307626869, -8.4787401882302973, 1.5421329813624087], [2.5389513765994378,
2.2908186084774593, -7.4198798308979326, -6.4312082992394837]], [[-1.345549277590945, -1.22882170095603,
-4.0947190887008045, -3.4247714860769332], [0.57069752075528779, 3.0679015215315193, -3.3143170072822556,
2.3225360575370493], [3.4042311641294178, 2.649791354721418, 3.7077078212790537, 1.0713074105203919]]],
[[[-2.8785435493958218, 0.29915225991958749, 5.5756229425693196, 7.9321173037059065], [9.5649296315346604,
2.4027104985466075, -2.5369197092323157, -0.047791192747498989], [-6.7487833627891902, 1.0443103966996334,
5.9061225891672224, 3.9507795396960397]], [[-2.3830367274824646, -5.9426137604538383, -3.4163832341888112,
4.5716059770112336], [-5.4310629184658694, -1.9130694854855257, -4.4260286157829336, 3.7704738510157965],
[0.53108652212409613, 3.1086016053558332, 4.4030400931889533, -3.7677110810450296]]]]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_expandedData_rank0_Symbol_rank0(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*(-0.481249850026)+(1.-msk_arg0)*(-1.48465416864)
arg1=Symbol(shape=())
res=arg0+arg1
s1=numpy.array(-2.65110429185)
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*(-3.13235414188)+(1.-msk_ref)*(-4.13575846049)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_expandedData_rank0_Symbol_rank1(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*(1.13411439983)+(1.-msk_arg0)*(-0.629637549331)
arg1=Symbol(shape=(2,))
res=arg0+arg1
s1=numpy.array([-0.62992419613163175, 4.55886114005793])
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([0.50419020369403444, 5.6929755398835962])+(1.-msk_ref)*numpy.array([-1.259561745462479,
3.9292235907270827])
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_expandedData_rank0_Symbol_rank2(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*(3.01809294358)+(1.-msk_arg0)*(0.889743657807)
arg1=Symbol(shape=(4, 5))
res=arg0+arg1
s1=numpy.array([[-2.793178683106079, -2.6222774715493582, 1.0142792223620747, -3.0640922264732984,
-2.3554298671206055], [0.088775964219395043, 3.4441381957619619, 3.3892189758872853, 2.7423767697866088,
3.977644321141641], [1.4526982641352157, 2.2184052986969505, -3.952710218879385, -4.7169576073736375,
-0.7937042808225101], [2.2686916098744314, -1.553248315886353, -2.7367045745859819, 3.7958840729585344,
1.4548199443717298]])
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([[0.22491426047411567, 0.39581547203083645, 4.0323721659422693, -0.045999282893103732,
0.66266307645958911], [3.1068689077995897, 6.4622311393421565, 6.4073119194674799, 5.7604697133668035,
6.9957372647218357], [4.4707912077154104, 5.2364982422771451, -0.93461727529919036, -1.6988646637934428,
2.2243886627576845], [5.2867845534546261, 1.4648446276938416, 0.28138836899421271, 6.813977016538729,
4.4729128879519244]])+(1.-msk_ref)*numpy.array([[-1.9034350252987218, -1.732533813742001, 1.9040228801694319,
-2.1743485686659412, -1.4656862093132483], [0.97851962202675224, 4.3338818535693191, 4.2789626336946425,
3.632120427593966, 4.8673879789489982], [2.3424419219425729, 3.1081489565043077, -3.0629665610720278,
-3.8272139495662802, 0.096039376984847102], [3.1584352676817886, -0.66350465807899583, -1.8469609167786247,
4.6856277307658916, 2.344563602179087]])
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_expandedData_rank0_Symbol_rank3(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*(-4.98444562132)+(1.-msk_arg0)*(4.30756765987)
arg1=Symbol(shape=(6, 2, 2))
res=arg0+arg1
s1=numpy.array([[[1.9993822405268356, -3.1230808428690615], [4.9036400439562815, -4.8838867997176525]],
[[0.42763250705520939, 1.7579324334230453], [-3.7242679708963458, 1.8833596506298056]], [[-3.5481907533254931,
0.2040318933875751], [-2.5124574767604746, -4.1576503017979416]], [[2.4187154671810562, -0.51775884222858526],
[-1.722028671225063, 4.8177194310600537]], [[3.5460779618762999, 3.7426721831596925], [-3.14876579453641,
-1.8491069265603413]], [[-2.0602497125201733, 1.8445672729830882], [2.6289048953955998, -2.1171625740448654]]])
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([[[-2.9850633807947604, -8.1075264641906575], [-0.080805577365314463,
-9.8683324210392485]], [[-4.5568131142663866, -3.2265131878985507], [-8.7087135922179417, -3.1010859706917904]],
[[-8.5326363746470886, -4.7804137279340209], [-7.4969030980820701, -9.1420959231195376]], [[-2.5657301541405397,
-5.5022044635501812], [-6.7064742925466589, -0.16672619026154223]], [[-1.4383676594452961, -1.2417734381619034],
[-8.1332114158580069, -6.8335525478819372]], [[-7.0446953338417693, -3.1398783483385078], [-2.3555407259259962,
-7.1016081953664614]]])+(1.-msk_ref)*numpy.array([[[6.3069499004015404, 1.1844868170056433], [9.2112077038309863,
-0.57631913984294769]], [[4.7352001669299142, 6.0655000932977501], [0.58329968897835904, 6.1909273105045104]],
[[0.75937690654921175, 4.5115995532622799], [1.7951101831142302, 0.14991735807676321]], [[6.726283127055761,
3.7898088176461195], [2.5855389886496418, 9.1252870909347585]], [[7.8536456217510047, 8.0502398430343973],
[1.1588018653382948, 2.4584607333143635]], [[2.2473179473545315, 6.152134932857793], [6.9364725552703046,
2.1904050858298394]]])
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_expandedData_rank0_Symbol_rank4(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*(-2.9697925334)+(1.-msk_arg0)*(-4.26135335725)
arg1=Symbol(shape=(3, 2, 3, 4))
res=arg0+arg1
s1=numpy.array([[[[3.9689996783063126, 2.6024749301521517, -2.8657897182202263, 3.4523361907793202],
[1.0646468808240472, 2.2809214673673006, 1.9110441510817342, 3.6637536830808415], [-4.8161620946685977,
1.1260192950202335, -1.5444099528131283, 4.5856953227320361]], [[3.4807853259935388, 1.0632821522370133,
-1.7813251042294, 0.96803702807832348], [-2.2395880868316476, 4.8919502166960243, 3.0915081953974273,
-0.85921425228962178], [-0.24500754865585961, -3.000069805276242, -2.3285433357124861, -3.7526812827715004]]],
[[[-2.6148866735769314, -2.9426881222754986, -2.1105189060422127, -1.718323686970705], [0.38236683235255065,
4.8146833101999391, -0.69724678041282662, -3.674837501299455], [-1.1217878757973345, 1.9457797122429064,
4.3330454272287042, 1.2870165165330079]], [[0.90390350707926448, 4.0932246664578322, 4.0170833493811937,
2.3057200276883218], [-4.1149618340720506, 4.3206785552080422, 4.5478406361616468, 3.4270491303459689],
[-3.2122582790653578, -0.051138136931458078, 2.847106348954056, -2.0922906343243097]]], [[[-3.8470709835005801,
0.79389346854249432, 1.9702586564654192, -1.230993932131331], [0.52027641197917784, 4.1606002966489264,
-4.1240899145057277, 3.0855602864655047], [1.2434749670286918, 1.9421106344042691, -4.7997149299258455,
-3.1016051858236517]], [[-4.0158867307020536, -1.2810983979769732, 4.1806447574751786, 2.4159993753375488],
[3.8210591526688589, 2.9170696329659753, 0.212629682453775, -3.6791629346607402], [-0.52709663403725493,
-2.0893727810689953, -1.7473644406170976, -4.1869442335699976]]]])
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([[[[0.99920714490574225, -0.36731760324841867, -5.8355822516207967, 0.48254365737874982],
[-1.9051456525765231, -0.68887106603326975, -1.0587483823188362, 0.69396114968027112], [-7.7859546280691685,
-1.8437732383803369, -4.5142024862136987, 1.6159027893314657]], [[0.51099279259296848, -1.9065103811635571,
-4.7511176376299709, -2.0017555053222469], [-5.2093806202322179, 1.9221576832954539, 0.12171566199685691,
-3.8290067856901921], [-3.21480008205643, -5.9698623386768119, -5.2983358691130569, -6.7224738161720712]]],
[[[-5.5846792069775013, -5.9124806556760685, -5.0803114394427826, -4.6881162203712758], [-2.5874257010480197,
1.8448907767993687, -3.667039313813397, -6.6446300347000253], [-4.0915804091979044, -1.024012821157664,
1.3632528938281339, -1.6827760168675625]], [[-2.0658890263213059, 1.1234321330572619, 1.0472908159806233,
-0.66407250571224852], [-7.0847543674726214, 1.3508860218074719, 1.5780481027610764, 0.45725659694539855],
[-6.1820508124659277, -3.0209306703320284, -0.12268618444651436, -5.0620831677248805]]], [[[-6.8168635169011509,
-2.175899064858076, -0.99953387693515117, -4.2007864655319018], [-2.4495161214213925, 1.190807763248356,
-7.0938824479062976, 0.11576775306493436], [-1.7263175663718786, -1.0276818989963012, -7.7695074633264163,
-6.0713977192242226]], [[-6.9856792641026235, -4.250890931377544, 1.2108522240746082, -0.55379315806302154],
[0.8512666192682885, -0.052722900434595044, -2.7571628509467954, -6.6489554680613105], [-3.4968891674378253,
-5.0591653144695652, -4.7171569740176675, -7.1567367669705675]]]])+(1.-msk_ref)*numpy.array([[[[-0.29235367894345909,
-1.65887842709762, -7.1271430754699985, -0.80901716647045152], [-3.1967064764257245, -1.9804318898824711,
-2.3503092061680375, -0.59759967416893023], [-9.0775154519183694, -3.1353340622295383, -5.8057633100629005,
0.32434196548226435]], [[-0.78056803125623286, -3.1980712050127584, -6.0426784614791718, -3.2933163291714482],
[-6.5009414440814197, 0.63059685944625254, -1.1698451618523444, -5.1205676095393935], [-4.5063609059056313,
-7.2614231625260137, -6.5898966929622578, -8.0140346400212721]]], [[[-6.8762400308267031, -7.2040414795252703,
-6.3718722632919844, -5.9796770442204767], [-3.8789865248972211, 0.5533299529501674, -4.9586001376625983,
-7.9361908585492262], [-5.3831412330471062, -2.3155736450068654, 0.071692069978932516, -2.9743368407167639]],
[[-3.3574498501705072, -0.16812869079193948, -0.244270007868578, -1.9556333295614499], [-8.3763151913218223,
0.059325197958270515, 0.28648727891187509, -0.83430422690380279], [-7.4736116363151295, -4.3124914941812298,
-1.4142470082957157, -6.3536439915740814]]], [[[-8.1084243407503518, -3.4674598887072774, -2.2910947007843525,
-5.4923472893811027], [-3.7410769452705939, -0.10075306060084532, -8.3854432717554985, -1.175793070784267],
[-3.01787839022108, -2.3192427228455026, -9.0610682871756172, -7.3629585430734235]], [[-8.2772400879518244,
-5.5424517552267449, -0.080708599774593104, -1.8453539819122229], [-0.44029420458091284, -1.3442837242837964,
-4.0487236747959967, -7.9405162919105123], [-4.7884499912870266, -6.350726138318767, -6.0087177978668693,
-8.4482975908197702]]]])
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_expandedData_rank1_Symbol_rank0(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*numpy.array([2.1945719955206853,
-3.4851810549539852])+(1.-msk_arg0)*numpy.array([-3.159460740559509, 1.0507096466806898])
arg1=Symbol(shape=())
res=arg0+arg1
s1=numpy.array(2.92811762582)
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([5.1226896213358133,
-0.5570634291388572])+(1.-msk_ref)*numpy.array([-0.23134311474438096, 3.9788272724958178])
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_expandedData_rank1_Symbol_rank1(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*numpy.array([1.9387192390641195,
-2.294788495198282])+(1.-msk_arg0)*numpy.array([-3.9950296964046816, -4.9584579002903517])
arg1=Symbol(shape=(2,))
res=arg0+arg1
s1=numpy.array([0.68148355985483988, 0.33396702170122339])
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([2.6202027989189594, -1.9608214734970586])+(1.-msk_ref)*numpy.array([-3.3135461365498418,
-4.6244908785891283])
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_expandedData_rank2_Symbol_rank0(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*numpy.array([[1.9335525790389809, 4.8876884032830024, -3.6794048434152948, -2.9337672885330814,
0.5880232587543972], [1.2731441866942719, 4.8021715240969982, 2.9871285060348427, 4.3674026791776921,
2.3324101078324144], [3.257367767879968, 3.614481137699638, -4.0465097244122443, -3.3712543524462166,
0.83424572698980626], [-4.7734011845397317, -1.1918316514932537, -2.641576771310632, -3.7441723823507447,
2.5792398168240602]])+(1.-msk_arg0)*numpy.array([[0.51038147587387783, -3.548018657118809, 3.7494118465432393,
3.6729170048063136, -2.9522974158811746], [3.2109365766033289, -1.7347320393345091, -0.9996429948297223,
-0.75500884718678307, 1.5928790967815267], [-4.1174844249701259, 4.2030131668606234, -4.8484509001230229,
2.7032344298767921, 4.3009935101668333], [-1.4527019870327429, 3.9347061378002781, 1.21415230923688,
-3.666838308237784, -3.8400590973123858]])
arg1=Symbol(shape=())
res=arg0+arg1
s1=numpy.array(3.22997214356)
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([[5.1635247225953336, 8.117660546839355, -0.44943269985894219, 0.29620485502327121,
3.8179954023107499], [4.5031163302506245, 8.0321436676533509, 6.2171006495911953, 7.5973748227340447,
5.5623822513887671], [6.4873399114363206, 6.8444532812559906, -0.81653758085589168, -0.14128220888986398,
4.0642178705461589], [-1.5434290409833791, 2.038140492063099, 0.58839537224572069, -0.51420023879439203,
5.8092119603804129]])+(1.-msk_ref)*numpy.array([[3.7403536194302305, -0.31804651356245639, 6.979383990099592,
6.9028891483626662, 0.27767472767517809], [6.4409087201596815, 1.4952401042218435, 2.2303291487266304,
2.4749632963695696, 4.8228512403378794], [-0.88751228141377325, 7.4329853104169761, -1.6184787565666703,
5.9332065734331447, 7.5309656537231859], [1.7772701565236098, 7.1646782813566308, 4.4441244527932326,
-0.43686616468143136, -0.61008695375603317]])
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_expandedData_rank2_Symbol_rank2(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*numpy.array([[-0.074742989914646785, -1.8482493880577588, 1.0926262448311599, 4.5158483202643716,
-3.0805669333005561], [0.0085606966159099684, -2.9696862086974996, 3.3024460854167597, 1.5088165460119427,
-3.6452065491857266], [0.18694035412066512, -4.6738922180085147, 3.9551045875071438, 4.0084174115638724,
-0.63332177275981749], [2.5093858800842108, -0.36171911019222946, 0.19138395375626427, -3.1795621861527734,
-2.6267949144535008]])+(1.-msk_arg0)*numpy.array([[-3.5942187686631524, -3.7060821431133406, 0.9533196788857623,
-4.8840044000628744, 0.3938790125214453], [4.0652979493208985, 4.5325841421496644, -0.4281905049316661,
-1.742508580451184, 2.7120740894023898], [0.56888661640784566, -2.4569299021956068, 3.568568120069024,
-2.0793352745659766, -1.7689628659930126], [-4.8632954420706014, -2.8828667280653364, 3.4090243893802246,
3.0651732601260697, 4.6463764755640256]])
arg1=Symbol(shape=(4, 5))
res=arg0+arg1
s1=numpy.array([[-1.4953863183942318, -3.5127993001524969, 2.9138150805794103, -1.6144165168200519,
-0.65062618022498242], [-4.9181569250500168, -2.6971927119277908, 4.2365880197149934, -4.2036145824282496,
2.2260090531531453], [4.0868409931398002, -3.3893548967194032, 2.9012650531553019, -2.2355683566643378,
2.9627609193479501], [4.9921359000605019, 0.6569024014440803, 3.3639734573108839, 0.89356331435440595,
-4.0709626638242327]])
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([[-1.5701293083088785, -5.3610486882102553, 4.0064413254105702, 2.9014318034443196,
-3.7311931135255385], [-4.9095962284341068, -5.6668789206252903, 7.5390341051317531, -2.6947980364163069,
-1.4191974960325813], [4.2737813472604653, -8.0632471147279183, 6.8563696406624457, 1.7728490548995346,
2.3294391465881326], [7.5015217801447127, 0.29518329125185083, 3.5553574110671482, -2.2859988717983675,
-6.6977575782777334]])+(1.-msk_ref)*numpy.array([[-5.0896050870573841, -7.2188814432658379, 3.8671347594651726,
-6.4984209168829263, -0.25674716770353712], [-0.85285897572911828, 1.8353914302218737, 3.8083975147833273,
-5.9461231628794335, 4.9380831425555352], [4.6557276095476459, -5.8462847989150095, 6.4698331732243259,
-4.3149036312303144, 1.1937980533549375], [0.12884045798990051, -2.2259643266212561, 6.7729978466911085,
3.9587365744804757, 0.57541381173979289]])
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_expandedData_rank3_Symbol_rank0(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*numpy.array([[[-2.1957568090391955, 0.56747277575122101], [-1.4226171578539604,
-3.1174336379255854]], [[1.9150168705353749, 0.46771483389240665], [-0.73261624542450932, 1.4533109165427449]],
[[-4.3700026677098416, -4.4121889510507675], [-4.2432470132589684, -4.6365817911825937]], [[4.3712760608754326,
0.48815678812850649], [-4.2919585871561221, 2.8753619236403747]], [[4.7410827225779482, -3.2941488290580354],
[3.5834613437014919, 0.53477849558006074]], [[-2.2697241902980902, 1.4839036193452078], [4.3514574228344109,
2.0334834769049763]]])+(1.-msk_arg0)*numpy.array([[[1.9065956016010119, 3.8011536401496766], [4.2481111431072272,
0.7657337986451509]], [[1.7488690210709832, 4.5064595133713876], [-1.261534521038973, -1.5095749568667172]],
[[1.2010203264269057, 0.055494332510111377], [4.3269730839285749, -0.54412407243328076]], [[-2.6257140205956175,
-3.4462245120816002], [1.3451771798822101, 2.462398203439907]], [[-2.5713124204289493, 1.9356323962441504],
[1.8879658089499234, 3.1212800001648091]], [[1.942043508304808, 0.80539011514164471], [-0.3765200612428643,
0.73339801844715691]]])
arg1=Symbol(shape=())
res=arg0+arg1
s1=numpy.array(2.24723235412)
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([[[0.05147554507665264, 2.8147051298670691], [0.82461519626188773,
-0.87020128380973727]], [[4.162249224651223, 2.7149471880082547], [1.5146161086913388, 3.700543270658593]],
[[-2.1227703135939935, -2.1649565969349194], [-1.9960146591431203, -2.3893494370667456]], [[6.6185084149912807,
2.7353891422443546], [-2.044726233040274, 5.1225942777562228]], [[6.9883150766937963, -1.0469164749421873],
[5.83069369781734, 2.7820108496959088]], [[-0.022491836182242153, 3.7311359734610559], [6.598689776950259,
4.2807158310208244]]])+(1.-msk_ref)*numpy.array([[[4.15382795571686, 6.0483859942655247], [6.4953434972230752,
3.012966152760999]], [[3.9961013751868313, 6.7536918674872357], [0.98569783307687509, 0.73765739724913093]],
[[3.4482526805427538, 2.3027266866259595], [6.574205438044423, 1.7031082816825673]], [[-0.37848166647976944,
-1.1989921579657521], [3.5924095339980582, 4.7096305575557551]], [[-0.32408006631310116, 4.1828647503599985],
[4.1351981630657715, 5.3685123542806572]], [[4.1892758624206561, 3.0526224692574928], [1.8707122928729838,
2.980630372563005]]])
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_expandedData_rank3_Symbol_rank3(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*numpy.array([[[-3.6330041831896742, 1.9011276595647058], [4.0527837903730326, 3.7453216540822218]],
[[1.1423057067323032, -4.6191355501663702], [-0.19479401086936399, 3.6518312558771875]], [[-0.78164127432320996,
-0.0025588788834731702], [-2.5155059876978534, -2.7853664238124578]], [[-2.4557560474662496, -1.7001261418483038],
[2.2437567320884249, -4.5528490181464578]], [[3.3965240991344601, 2.7531638892344281], [-1.0182649859279858,
0.37879180372082377]], [[-2.2634040587587356, -3.6908761533687482], [-2.6652399154901509,
-2.0159814304593739]]])+(1.-msk_arg0)*numpy.array([[[4.9981907924797788, 4.277720751221235], [-4.4785446333946686,
-3.8140270519701982]], [[1.4517149340948965, 1.9122847710945834], [-1.0984824997077558, 4.9260526287710995]],
[[3.0231870187238314, -4.426803554802202], [-0.1009215503507912, -2.4226611633877337]], [[3.1439947236211125,
-2.7156096061802728], [-0.27949941006709977, 0.15562912547547469]], [[-1.6704879956646712, -0.87822202800174587],
[-4.0968204088950708, -4.8812474874399072]], [[-3.0876637956180186, 0.42808604578959475], [-0.76617423765119153,
1.4811418969805343]]])
arg1=Symbol(shape=(6, 2, 2))
res=arg0+arg1
s1=numpy.array([[[-3.655791939954395, 1.9082625611635287], [2.0305234873740705, -3.9575879711347337]],
[[0.58883813376680294, -0.44253502109642717], [-0.50659655202841058, 4.7262250303753071]], [[2.3551049262619417,
-2.7472704728416062], [-4.2131185370897501, 1.1560716927603512]], [[-1.8521430501234626, -2.8126771236453196],
[-1.6116964851382032, 4.3144406033510982]], [[-4.4005771771028979, -3.8795508309654512], [0.95903540985898683,
-0.84559016177598512]], [[-2.6007509769442674, -0.13151235868250399], [-1.5038936232862978, -3.9733280592961249]]])
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([[[-7.2887961231440688, 3.8093902207282344], [6.0833072777471031, -0.21226631705251187]],
[[1.7311438404991062, -5.0616705712627974], [-0.70139056289777457, 8.3780562862524945]], [[1.5734636519387317,
-2.7498293517250794], [-6.7286245247876035, -1.6292947310521066]], [[-4.3078990975897122, -4.5128032654936234],
[0.63206024695022167, -0.23840841479535957]], [[-1.0040530779684378, -1.1263869417310231], [-0.059229576068998924,
-0.46679835805516134]], [[-4.8641550357030034, -3.8223885120512522], [-4.1691335387764488,
-5.9893094897554988]]])+(1.-msk_ref)*numpy.array([[[1.3423988525253838, 6.1859833123847636], [-2.4480211460205981,
-7.7716150231049319]], [[2.0405530678616994, 1.4697497499981562], [-1.6050790517361664, 9.6522776591464066]],
[[5.3782919449857731, -7.1740740276438082], [-4.3140400874405413, -1.2665894706273826]], [[1.29185167349765,
-5.5282867298255924], [-1.891195895205303, 4.4700697288265729]], [[-6.0710651727675691, -4.757772858967197],
[-3.137784999036084, -5.7268376492158923]], [[-5.688414772562286, 0.29657368710709076], [-2.2700678609374894,
-2.4921861623155905]]])
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_expandedData_rank4_Symbol_rank0(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*numpy.array([[[[4.965007128412612, 3.4584141019026564, -1.0391619896304451, 4.5542963326499351],
[-0.0016792172679549466, -2.9053441565334981, 0.025786108583792711, -0.89554847161554374], [4.4904084527351209,
-0.89553646258473307, 3.8929449623498495, -2.8715607346304415]], [[-3.727374719009604, 2.2555823384608908,
0.53380019017552272, -0.29480940480144113], [-3.6344667828862445, -4.8499559892732567, 3.5342171405331317,
1.9875915936023327], [3.0643486049591804, -2.9482947381564806, 1.257296440825332, -4.4599817600046716]]],
[[[-3.7989993001254971, 4.2006768317373879, -1.9340842456373886, 0.25295780568139836], [0.15305381262779072,
2.184447614622945, -2.0595806484522039, 1.6196719151709491], [-1.550459702477788, 2.2328097059995393,
-3.2648987061947632, -1.7698524550474004]], [[-3.1067614393264673, 3.6490340896776274, 4.2948603770463407,
-3.4382940099694084], [-1.765073080880275, 2.5928931740693892, 2.2530590640640069, 2.7653349815108443],
[-0.88766895991026384, 3.8444038125137965, 3.8283329993863564, 1.6961545196727537]]], [[[-1.6941819291782823,
-4.3507603532160344, 0.58625398426930175, -4.9534370199923137], [4.3258398610183271, 4.7398172498630355,
-0.27425006429631082, -0.80958052389792012], [0.27800145594245151, -0.70646630926925713, -1.3619199397032533,
-0.22712536683851958]], [[-3.7307177958823781, -0.17135910311966995, -1.2454260400370809, 1.8499155339141273],
[0.7652733563966283, -4.2318891899847593, 4.1390775019993704, 2.1086112655335079], [-4.4480501135282662,
4.3290513315610166, -4.1098101623830443, -2.8839598970399614]]]])+(1.-msk_arg0)*numpy.array([[[[3.9323713317642746,
4.4527426387356446, 1.8489227456459432, 2.295838413561385], [-1.5932231826477694, -0.043483214358698064,
2.6866561252017789, -1.3064680912144833], [-4.563955043071191, -4.5294274892608124, 1.1139333008427865,
-3.356095173880258]], [[-0.39784058429088365, 1.3572530126249651, 0.73921609667405086, -2.8036097598039502],
[-1.6466307808609693, -3.6730522383966999, -4.2815488732075613, -3.0943250956889665], [0.84471742986867238,
3.3304241697775492, -2.7207357502431542, -1.8257126717947059]]], [[[0.21030801293033274, 4.6379651350087698,
4.213456762528347, 4.0550184068364885], [-2.5755175539757227, 2.6713165204428986, 3.2808072440183729,
2.8475364996882107], [4.8503832880401561, -0.89396576884489498, 4.8726952699950328, 1.8570156992262419]],
[[-4.6778874236692944, 2.1109769293880465, 0.79097589510131172, -2.1112073984121893], [2.558958067688426,
2.8307096810380727, 0.012443144332241474, -3.7601222060065065], [-1.3755439053562823, 2.9800220614031678,
1.6579582033193425, 4.4427116407434362]]], [[[-0.86660146317817688, 1.3032310329697525, 3.0027070238303377,
-2.9114837729491319], [-3.4567748888099636, 3.3638086688271702, 4.1486162466002519, 2.0749122046757407],
[0.84439318528796647, -3.6592289308593697, 0.77430002321168345, 1.7927967246699836]], [[-1.1981415218608116,
2.3445312580391588, -1.5436298697897444, 1.6111465180751141], [1.6230738725320037, -1.3035089800291666,
-4.6787506207538687, 2.9155460797717678], [3.3315156088599238, -3.5200805068877128, -1.1181004173108544,
-2.2485916181204857]]]])
arg1=Symbol(shape=())
res=arg0+arg1
s1=numpy.array(3.43950171094)
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([[[[8.4045088393544027, 6.8979158128444471, 2.4003397213113455, 7.9937980435917257],
[3.4378224936738357, 0.5341575544082926, 3.4652878195255834, 2.543953239326247], [7.9299101636769116,
2.5439652483570576, 7.3324466732916402, 0.56794097631134921]], [[-0.28787300806781335, 5.6950840494026815,
3.9733019011173134, 3.1446923061403496], [-0.19496507194445378, -1.410454278331466, 6.9737188514749224,
5.4270933045441234], [6.5038503159009711, 0.49120697278531011, 4.6967981517671227, -1.0204800490628809]]],
[[[-0.35949758918370645, 7.6401785426791786, 1.5054174653044021, 3.6924595166231891], [3.5925555235695814,
5.6239493255647357, 1.3799210624895868, 5.0591736261127398], [1.8890420084640027, 5.67231141694133, 0.1746030047470275,
1.6696492558943903]], [[0.33274027161532338, 7.0885358006194181, 7.7343620879881314, 0.0012077009723823195],
[1.6744286300615157, 6.0323948850111799, 5.6925607750057976, 6.204836692452635], [2.5518327510315268,
7.2839055234555872, 7.2678347103281471, 5.1356562306145443]]], [[[1.7453197817635084, -0.91125864227424369,
4.0257556952110924, -1.513935309050523], [7.7653415719601178, 8.1793189608048262, 3.1652516466454799,
2.6299211870438706], [3.7175031668842422, 2.7330354016725336, 2.0775817712385374, 3.2123763441032711]],
[[-0.29121608494058737, 3.2681426078221207, 2.1940756709047098, 5.289417244855918], [4.204775067338419,
-0.79238747904296858, 7.5785792129411611, 5.5481129764752986], [-1.0085484025864755, 7.7685530425028073,
-0.67030845144125362, 0.55554181390182933]]]])+(1.-msk_ref)*numpy.array([[[[7.3718730427060652, 7.8922443496774353,
5.2884244565877339, 5.7353401245031757], [1.8462785282940213, 3.3960184965830926, 6.1261578361435696,
2.1330336197273074], [-1.1244533321294004, -1.0899257783190217, 4.5534350117845772, 0.083406537061532671]],
[[3.041661126650907, 4.7967547235667558, 4.1787178076158416, 0.63589195113784047], [1.7928709300808214,
-0.23355052745490923, -0.84204716226577059, 0.34517661525282417], [4.2842191408104631, 6.7699258807193399,
0.71876596069863652, 1.6137890391470848]]], [[[3.6498097238721234, 8.0774668459505605, 7.6529584734701377,
7.4945201177782792], [0.86398415696606801, 6.1108182313846893, 6.7203089549601636, 6.2870382106300013],
[8.2898849989819468, 2.5455359420968957, 8.3121969809368235, 5.2965174101680326]], [[-1.2383857127275038,
5.5504786403298372, 4.2304776060431024, 1.3282943125296014], [5.9984597786302167, 6.2702113919798634,
3.4519448552740322, -0.32062049506471579], [2.0639578055855083, 6.4195237723449585, 5.0974599142611332,
7.8822133516852269]]], [[[2.5729002477636138, 4.7427327439115432, 6.4422087347721284, 0.52801793799265884],
[-0.017273177868172951, 6.8033103797689609, 7.5881179575420425, 5.5144139156175314], [4.2838948962297572,
-0.21972721991757904, 4.2138017341534741, 5.2322984356117743]], [[2.2413601890809791, 5.7840329689809495,
1.8958718411520463, 5.0506482290169048], [5.0625755834737944, 2.1359927309126241, -1.239248909812078,
6.3550477907135585], [6.7710173198017145, -0.080578795945922099, 2.3214012936309363, 1.190910092821305]]]])
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_expandedData_rank4_Symbol_rank4(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*numpy.array([[[[3.2510674404409041, 2.1171696862303406, 2.9610258759664267, -3.8373977579450456],
[0.75383244276133166, 2.4077943881602728, 3.873284406870285, 3.7937584009819574], [-4.6069898901399364,
-2.5452970249895754, 3.650830786457707, -0.56630176651201847]], [[3.6738989513815135, -1.1553536380556686,
4.303352195803182, 2.0201689947921695], [2.5110280594242029, 1.1178178456135743, 3.5722095880572251,
-3.0495901167648221], [-1.8161969765914288, -3.850369287459924, 1.8305771607495833, 3.8129356009276751]]],
[[[4.8159492177547296, -2.7259760165966638, -0.056119891503465524, 3.2320437499651025], [4.1412540490540568,
2.3145635424798332, 4.2298625240821792, -4.9326174629443722], [1.2505234798682396, 4.1728981653768358,
-1.4526511101284445, -0.73865645812869563]], [[-2.5027203270038956, -0.75821705726011146, -2.0074201432570495,
-0.20166798891695503], [1.7962444938241209, 4.9186635916785164, -3.3612255674731486, -3.1402103698143327],
[4.8100127068213077, -3.7003932729639377, -2.3809463861562454, 2.6337296431542621]]], [[[0.8461884816413443,
2.2850095300693116, 3.1039351776827235, 2.7358221987272575], [-1.331100327658973, -2.4718869003284438,
3.8392116060077814, 3.7886003252177218], [-2.740692362699221, -1.1104811343803189, 1.065443269317063,
-1.604926521206449]], [[3.1359320207935291, 2.4159415877072101, -2.9781841648177654, 0.4457695581762291],
[1.4022534028069558, 3.2181877465159641, 4.1561033889739196, -4.5314636502141923], [2.4896032954770373,
-1.6749755107952033, -4.2977752660345292, 4.3862296692093636]]]])+(1.-msk_arg0)*numpy.array([[[[3.8098232095134126,
-2.0180524002497693, 4.420784171182504, -2.4324750966542674], [2.4681882567616125, 3.0279649104786941,
2.2383665512055266, -0.091420157761364251], [4.7846856391630048, 0.45001495814867454, 2.8428137570111911,
3.6542996408716562]], [[-3.3832925941075711, -4.6684050424331947, 2.7145812310865534, 0.57489640415196952],
[3.2363298539062395, -0.28076205609599914, -2.1610563710523598, -3.9600308036480381], [4.1445091213012599,
0.23464603550937735, -4.9214532841127738, 3.7601288072640866]]], [[[4.5878923885513938, -2.7602444517968006,
-2.4823493575559641, -1.1998619544811917], [-1.0165322624110429, 4.8743114304602564, 3.0069704689379755,
2.0086372739622043], [-1.7482883016273565, 4.5233781656491008, 1.0481669308330579, 3.3780108680134457]],
[[-4.5351514069636076, -4.760484108729206, -1.7334568308716203, -4.3080131499917833], [4.0321976091043883,
-2.6576000312675063, 1.3372423488299923, -3.8949616711167625], [3.5793384711817051, 2.60693067621275,
1.8056256765125287, -3.9915454170699869]]], [[[0.39851532295995273, 2.2465287291059273, 0.64170560779626662,
-4.7331314705888738], [3.5329039709028898, -2.5311269573107662, 2.8367974744858193, -4.3457969220676684],
[-1.526677955424999, -2.5983211468943357, -1.3293797580217093, -3.1887378668078279]], [[3.1416335105809505,
0.35146012646543134, 2.428390004415637, 2.7813900205500861], [3.5228217461650111, -0.012304332300811183,
-3.1395042313107369, 4.8647351561551702], [2.2570133784920099, -1.7535240218446777, 0.38792070998653028,
-0.21839923153693785]]]])
arg1=Symbol(shape=(3, 2, 3, 4))
res=arg0+arg1
s1=numpy.array([[[[-0.55399336747432937, -3.6468486902030306, 2.4533567494215669, 4.8267547347789659],
[1.1480960590338416, 3.5599245920968787, -2.8247534868419724, -2.2031349101131505], [1.7520095897646017,
4.4293583295521266, -3.2046920932014888, -3.8760923163847472]], [[3.9288042477427645, 1.103593535294765,
0.62546922225950485, 2.5431633219905123], [2.5483588394973191, -0.82358610517599207, -0.47010674146441023,
2.7635563586840011], [3.5616440522317419, 2.2995934729430481, -3.501591556463012, 1.3778428754586027]]],
[[[-4.3918539920661051, 0.24976043236636869, -2.4847081470778463, 4.8636790550226792], [-4.2172400078729559,
-2.0316184192507647, -0.53464794178739794, -0.035422588600630966], [1.7049703562375615, 4.2019750499164399,
-3.7430217705554858, -3.4952387702082346]], [[-0.39925876875124189, 1.4505137462439404, -4.1941814051173072,
-1.844757872605356], [-3.4448187389632414, -3.5340944666273377, -3.178247383159305, -1.7824872241435519],
[-3.6843631882800798, -4.1186208792142187, 2.0636953370355959, -0.18717114434561122]]], [[[-2.4316812831173742,
0.39582208925882689, 1.4893695917228467, -3.1232026180567773], [2.1122901499636226, 4.9884613457151978,
-4.7793541216702149, -3.9541373136233391], [-4.8256481088328194, -0.10764491664526066, 2.9970513787255895,
-1.0443943611478437]], [[3.6491162738908258, 3.4225261399204765, -2.9600723325757849, 3.3422667802452324],
[-3.763493116056098, 4.6894908619506595, 2.532040050484988, 0.99028387045053101], [2.5962274887920085,
-0.2721955960411897, -4.7946284910477441, -0.96141278632713245]]]])
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([[[[2.6970740729665748, -1.52967900397269, 5.4143826253879936, 0.98935697683392032],
[1.9019285017951733, 5.9677189802571515, 1.0485309200283126, 1.5906234908688068], [-2.8549803003753347,
1.8840613045625512, 0.44613869325621813, -4.4423940828967652]], [[7.6027031991242779, -0.051760102760903592,
4.9288214180626868, 4.5633323167826818], [5.059386898921522, 0.29423174043758227, 3.1021028465928149,
-0.28603375808082099], [1.7454470756403131, -1.550775814516876, -1.6710143957134287, 5.1907784763862779]]],
[[[0.42409522568862457, -2.4762155842302951, -2.5408280385813118, 8.0957228049877816], [-0.075985958818899135,
0.28294512322906851, 3.6952145822947813, -4.9680400515450032], [2.9554938361058012, 8.3748732152932757,
-5.1956728806839303, -4.2338952283369302]], [[-2.9019790957551375, 0.6922966889838289, -6.2016015483743567,
-2.046425861522311], [-1.6485742451391205, 1.3845691250511787, -6.5394729506324536, -4.922697593957885],
[1.1256495185412279, -7.8190141521781564, -0.3172510491206495, 2.4465584988086508]]], [[[-1.5854928014760299,
2.6808316193281385, 4.5933047694055702, -0.38738041932951983], [0.78118982230464962, 2.516574445386754,
-0.94014251566243345, -0.16553698840561726], [-7.5663404715320404, -1.2181260510255796, 4.0624946480426525,
-2.6493208823542926]], [[6.7850482946843549, 5.8384677276276866, -5.9382564973935503, 3.7880363384214615],
[-2.3612397132491423, 7.9076786084666235, 6.6881434394589077, -3.5411797797636613], [5.0858307842690458,
-1.9471711068363931, -9.0924037570822733, 3.4248168828822312]]]])+(1.-msk_ref)*numpy.array([[[[3.2558298420390832,
-5.6649010904527994, 6.8741409206040709, 2.3942796381246985], [3.6162843157954541, 6.5878895025755728,
-0.58638693563644573, -2.2945550678745148], [6.5366952289276066, 4.8793732877008011, -0.36187833619029774,
-0.22179267551309101]], [[0.54551165363519338, -3.5648115071384296, 3.3400504533460582, 3.1180597261424818],
[5.7846886934035586, -1.1043481612719912, -2.63116311251677, -1.196474444964037], [7.7061531735330018,
2.5342395084524254, -8.4230448405757858, 5.1379716827226893]]], [[[0.19603839648528876, -2.5104840194304319,
-4.9670575046338108, 3.6638171005414875], [-5.2337722702839988, 2.8426930112094917, 2.4723225271505775,
1.9732146853615733], [-0.043317945389794943, 8.7253532155655407, -2.6948548397224279, -0.11722790219478885]],
[[-4.9344101757148495, -3.3099703624852657, -5.9276382359889279, -6.1527710225971397], [0.58737887014114687,
-6.1916944978948436, -1.8410050343293127, -5.6774488952603139], [-0.10502471709837469, -1.5116902030014687,
3.8693210135481246, -4.1787165614155981]]], [[[-2.0331659601574215, 2.6423508183647542, 2.1310751995191133,
-7.8563340886456512], [5.6451941208665124, 2.4573343884044316, -1.9425566471843956, -8.2999342356910084],
[-6.3523260642578183, -2.7059660635395963, 1.6676716207038802, -4.2331322279556716]], [[6.7907497844717764,
3.7739862663859078, -0.53168232816014793, 6.1236568007953185], [-0.24067136989108695, 4.6771865296498483,
-0.60746418082574882, 5.8550190266057012], [4.8532408672840184, -2.0257196178858674, -4.4067077810612139,
-1.1798120178640703]]]])
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_constData_rank0_Symbol_rank0(self):
arg0=Data(1.30830371112,self.functionspace)
arg1=Symbol(shape=())
res=arg0-arg1
s1=numpy.array(0.0412291309402)
sub=res.substitute({arg1:s1})
ref=Data(1.26707458018,self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_constData_rank0_Symbol_rank1(self):
arg0=Data(-4.2604726935,self.functionspace)
arg1=Symbol(shape=(2,))
res=arg0-arg1
s1=numpy.array([-3.8546037299533653, -1.305392606117024])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([-0.4058689635493371, -2.9550800873856784]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_constData_rank0_Symbol_rank2(self):
arg0=Data(0.902009664206,self.functionspace)
arg1=Symbol(shape=(4, 5))
res=arg0-arg1
s1=numpy.array([[-3.117681444740418, -3.2512793024980069, -3.7762244881344218, -0.50644943812549315,
3.066726444630655], [-2.6348956508380805, -0.90372740616696667, 0.5252271533586752, 2.0132741900533446,
2.0837322808099037], [0.088376617597372586, 0.67864487020517306, 3.7057383001711681, 1.0445042366908988,
-2.1093161712985955], [4.328915747720707, -0.73501622742024342, -0.088412628376807412, -3.0414953794209754,
1.610361274316344]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[4.0196911089468177, 4.1532889667044071, 4.6782341523408215, 1.4084591023318929,
-2.1647167804242553], [3.5369053150444802, 1.8057370703733664, 0.37678251084772452, -1.1112645258469449,
-1.181722616603504], [0.81363304660902713, 0.22336479400122666, -2.8037286359647684, -0.14249457248449904,
3.0113258355049952], [-3.4269060835143073, 1.6370258916266431, 0.99042229258320713, 3.9435050436273751,
-0.7083516101099443]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_constData_rank0_Symbol_rank3(self):
arg0=Data(4.30012329043,self.functionspace)
arg1=Symbol(shape=(6, 2, 2))
res=arg0-arg1
s1=numpy.array([[[-2.4328051948060772, 1.3096803933228829], [-1.9201038070201615, 2.2529209930562519]],
[[4.4911763191005498, -0.0070408039855616167], [-4.5070979412665588, 0.23394826644475319]], [[-2.0679275681214171,
4.7260141882743518], [-1.9530690972223672, 4.2165911161948344]], [[4.2340594486013217, 0.31531838157863668],
[1.2102543060708451, 4.5768051588147358]], [[4.9016533619135778, 1.0237157761801843], [-1.6198381225390657,
1.509534129406096]], [[-2.8351524725878399, -0.8712771035569391], [-1.2500793307427105, 0.52784760832550681]]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[6.732928485237343, 2.990442897108383], [6.2202270974514278, 2.0472022973750139]],
[[-0.19105302866928398, 4.3071640944168275], [8.8072212316978238, 4.0661750239865126]], [[6.3680508585526834,
-0.42589089784308598], [6.2531923876536331, 0.083532174236431445]], [[0.066063841829944181, 3.9848049088526292],
[3.0898689843604208, -0.27668186838346998]], [[-0.60153007148231197, 3.2764075142510816], [5.9199614129703315,
2.7905891610251699]], [[7.1352757630191057, 5.1714003939882049], [5.5502026211739768,
3.772275682105759]]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_constData_rank0_Symbol_rank4(self):
arg0=Data(-3.5839426267,self.functionspace)
arg1=Symbol(shape=(3, 2, 3, 4))
res=arg0-arg1
s1=numpy.array([[[[-2.9729696451374421, 2.7845056200381855, 0.070436437102223692, 0.66836223796868044],
[0.40381761203578836, -1.7869220467261826, -4.3681167712065552, 1.0762008553734699], [-3.4293067325266744,
-3.8959384230092855, -4.2869773308861872, -3.5982581222849266]], [[3.8085384127848325, -4.9902013750126919,
1.7025140755302903, -1.8585391591273237], [-1.8948326373524536, 2.0874520505745666, -1.8647114753321095,
3.9665649921657007], [-2.6617432109425376, -0.043781338271665859, -4.3924469058705498, -4.6038566089651081]]],
[[[4.1612414942039617, -0.24691459950937489, 1.8801077349311939, -4.0607604598486082], [-0.48975931816079132,
4.776651055544292, 2.5892649853139229, 2.6300466396994988], [-0.6331493645323949, -4.8747858313906498,
2.5714462579440713, -0.12625615907892662]], [[1.8766405716198298, 0.97931619405259518, -1.2333119307639082,
3.632140408148242], [0.96979041799351151, -4.0819837173164526, 3.4625138677193164, -1.7431511130821575],
[-2.7530992377422381, -3.1495479306859906, 1.3466227111831488, -2.3016323722421128]]], [[[-2.8378224290103491,
-0.7230057223129247, 0.95865498114414649, 0.14297561114879365], [2.3319242484901492, 4.9972541799736234,
-1.7121650896762564, 1.6097551517446558], [2.7133813837524077, -3.1913323682416994, -0.39896207531318861,
-3.2753783571190107]], [[1.3158800827274399, -0.034075573686918936, 3.2707189112070392, -2.9118211235462041],
[4.362994678434946, -3.2771781302292515, 3.4919565479064456, 1.6061522420425254], [-1.8973785117347788,
-4.4461539342202174, -3.8132486661529263, -0.74231592463494511]]]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[[-0.61097298156602342, -6.368448246741651, -3.6543790638056892, -4.252304864672146],
[-3.9877602387392539, -1.7970205799772829, 0.78417414450308964, -4.6601434820769354], [-0.15463589417679113,
0.31199579630581997, 0.70303470418272163, 0.014315495581461057]], [[-7.392481039488298, 1.4062587483092264,
-5.2864567022337559, -1.7254034675761418], [-1.689109989351012, -5.6713946772780321, -1.7192311513713561,
-7.5505076188691662], [-0.9221994157609279, -3.5401612884317997, 0.80850427916708423, 1.0199139822616425]]],
[[[-7.7451841209074272, -3.3370280271940906, -5.4640503616346594, 0.4768178331451427], [-3.0941833085426742,
-8.3605936822477567, -6.1732076120173884, -6.2139892664029643], [-2.9507932621710706, 1.2908432046871843,
-6.1553888846475369, -3.4576864676245389]], [[-5.4605831983232953, -4.5632588207560607, -2.3506306959395573,
-7.2160830348517075], [-4.553733044696977, 0.49804109061298707, -7.0464564944227819, -1.840791513621308],
[-0.83084338896122745, -0.43439469601747493, -4.9305653378866143, -1.2823102544613527]]], [[[-0.74612019769311644,
-2.8609369043905408, -4.542597607847612, -3.7269182378522592], [-5.9158668751936148, -8.5811968066770881,
-1.8717775370272092, -5.1936977784481213], [-6.2973240104558732, -0.39261025846176612, -3.1849805513902769,
-0.30856426958445482]], [[-4.8998227094309055, -3.5498670530165466, -6.8546615379105047, -0.67212150315726138],
[-7.9469373051384116, -0.306764496474214, -7.0758991746099111, -5.1900948687459909], [-1.6865641149686867,
0.8622113075167519, 0.22930603944946082, -2.8416267020685204]]]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_constData_rank1_Symbol_rank0(self):
arg0=Data(numpy.array([2.6649927252905226, 0.29496968217893382]),self.functionspace)
arg1=Symbol(shape=())
res=arg0-arg1
s1=numpy.array(1.03366663195)
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([1.6313260933372291, -0.73869694977435962]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_constData_rank1_Symbol_rank1(self):
arg0=Data(numpy.array([3.9090880537794526, -3.9706193840215942]),self.functionspace)
arg1=Symbol(shape=(2,))
res=arg0-arg1
s1=numpy.array([-3.7233870114697742, 0.99043840493200186])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([7.6324750652492268, -4.9610577889535961]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_constData_rank2_Symbol_rank0(self):
arg0=Data(numpy.array([[2.8033126273843685, 0.51509190965393792, 3.931306976936968, -3.3823534090429486,
-2.3486719525293087], [-2.9837425664154784, -2.4457160287299686, 3.8981965382683743, -0.89609359902144714,
4.1620406111464288], [3.6868893591462246, -2.9993029597001462, 1.8283120616948665, -2.0195573949932277,
-2.1640627499057361], [-2.9723279323425489, -4.8559061533246624, -1.0130455282709172, -3.7833351321644395,
3.514692525422209]]),self.functionspace)
arg1=Symbol(shape=())
res=arg0-arg1
s1=numpy.array(4.86937457463)
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[-2.0660619472497519, -4.3542826649801825, -0.93806759769715242, -8.2517279836770694,
-7.2180465271634286], [-7.8531171410495988, -7.315090603364089, -0.97117803636574607, -5.7654681736555675,
-0.70733396348769162], [-1.1824852154878958, -7.8686775343342665, -3.0410625129392539, -6.8889319696273486,
-7.0334373245398565], [-7.8417025069766693, -9.7252807279587827, -5.8824201029050371, -8.6527097067985608,
-1.3546820492119114]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_constData_rank2_Symbol_rank2(self):
arg0=Data(numpy.array([[-1.1140360715186182, -1.5235600156934481, 4.3075103934286023, 4.6800377743432158,
-3.2505150436972521], [0.39123458636258768, 0.41088806870879768, -2.9614108446790501, 1.1049238977643405,
0.92166667279843395], [0.54565864417397059, -4.8476249672143004, 4.9444652981547943, 4.0252126389168215,
-3.9123423425216322], [-3.6777596228844844, -3.4408972758983558, 2.7718180074050611, -0.3997152204895924,
-0.16573647825956073]]),self.functionspace)
arg1=Symbol(shape=(4, 5))
res=arg0-arg1
s1=numpy.array([[-2.4209487163246299, 1.3152643083131128, -0.71046464711788015, 0.21557543046364458,
-2.202065459251934], [-3.9101544501984198, -2.8682151089642827, 2.7125251197023488, 1.4173123031722534,
2.7246295240806209], [-1.5744991442525436, 3.0598215212654001, 0.63494427405471487, -4.906149376046594,
-1.6839564426436748], [4.0729555430880922, -0.83371622418680769, 0.46337987461630981, 4.0014755703742395,
-2.1103899940006032]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[1.3069126448060118, -2.8388243240065609, 5.0179750405464825, 4.4644623438795712,
-1.0484495844453181], [4.301389036561007, 3.2791031776730803, -5.6739359643813989, -0.31238840540791291,
-1.8029628512821869], [2.1201577884265141, -7.9074464884797004, 4.3095210241000794, 8.9313620149634154,
-2.2283858998779573], [-7.7507151659725766, -2.6071810517115481, 2.3084381327887513, -4.4011907908638319,
1.9446535157410425]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_constData_rank3_Symbol_rank0(self):
arg0=Data(numpy.array([[[-2.6064326776506652, 4.9989076052590633], [-3.0068821433777249, -3.1193113732509516]],
[[-1.3190483681618739, 3.9479827067009108], [1.0954417889014865, 4.6359051697534426]], [[-2.9778493741722056,
3.4845430816156977], [1.7569072943914552, 1.1616150547614428]], [[-0.91210869485198565, -1.3406976214361355],
[3.2217649968914159, -2.662260898242006]], [[4.1697693146337542, -1.1741423631833072], [-4.9803850608859115,
1.2700647554700222]], [[4.6074170359664368, 1.453706456526124], [0.20949339688511692,
3.0091215511346796]]]),self.functionspace)
arg1=Symbol(shape=())
res=arg0-arg1
s1=numpy.array(-1.04145599079)
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[-1.5649766868561219, 6.0403635960536066], [-1.9654261525831815, -2.0778553824564083]],
[[-0.27759237736733056, 4.9894386974954541], [2.1368977796960298, 5.6773611605479859]], [[-1.9363933833776623,
4.525999072410241], [2.7983632851859985, 2.2030710455559861]], [[0.12934729594255767, -0.29924163064159215],
[4.2632209876859593, -1.6208049074474626]], [[5.2112253054282975, -0.13268637238876391], [-3.9389290700913682,
2.3115207462645655]], [[5.6488730267609801, 2.4951624473206673], [1.2509493876796602,
4.0505775419292229]]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_constData_rank3_Symbol_rank3(self):
arg0=Data(numpy.array([[[2.0075159970537113, 4.417162011434554], [0.71949384400506577, 1.0783048900035652]],
[[4.7614254606302335, -2.0888542276996978], [-3.5997702799671547, 4.2825487871951644]], [[-0.39389734575197544,
1.3283252585178928], [3.6919455158435834, -0.76277259642421402]], [[-4.4972180700076887, -3.7983795355307128],
[-0.26779668046970784, -0.79380221724008582]], [[-2.0572521505738273, -1.5154686544559368], [4.0972713376059851,
4.5986089620495108]], [[-1.3971821196462377, 0.16028646761807508], [-0.63755809097850857,
-3.3787710682197272]]]),self.functionspace)
arg1=Symbol(shape=(6, 2, 2))
res=arg0-arg1
s1=numpy.array([[[3.5103565349856751, 0.91526758558677379], [-3.7224124618951135, -0.27931399630195397]],
[[1.5813622936549105, 3.6172915696233972], [-1.2364412564258132, 0.16417768270487709]], [[0.64050559170122234,
4.6361361331624593], [-0.47839680540824325, -2.1615310941440589]], [[-0.85667930966756511, 1.669882578368358],
[0.22343162562157293, 0.80905790542025358]], [[-3.5873387244847543, 3.1163266795230058], [3.5553732672252671,
-4.6758779472194405]], [[3.6742958529176484, 0.58762359541383802], [1.5778519953325496, -0.39731537378910975]]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[-1.5028405379319638, 3.5018944258477802], [4.4419063059001793, 1.3576188863055192]],
[[3.180063166975323, -5.7061457973230949], [-2.3633290235413416, 4.1183711044902873]], [[-1.0344029374531978,
-3.3078108746445665], [4.1703423212518267, 1.3987584977198448]], [[-3.6405387603401236, -5.4682621138990708],
[-0.49122830609128076, -1.6028601226603394]], [[1.5300865739109271, -4.6317953339789426], [0.54189807038071791,
9.2744869092689513]], [[-5.0714779725638861, -0.42733712779576294], [-2.2154100863110582,
-2.9814556944306174]]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_constData_rank4_Symbol_rank0(self):
arg0=Data(numpy.array([[[[0.66483074145605592, 2.9129070748039982, -1.8655842911981346, -1.098354904466996],
[1.7426470733136448, -2.4896761957460898, 4.3864323453867851, -4.0781460331955177], [-0.62183708580819008,
-2.6186592235582786, -1.8750164189422014, -3.9631241880095969]], [[4.0419620323350909, 0.15536839603964836,
1.9771157591398101, -2.6101097405194453], [-4.7364297803535704, 1.8318126417179714, 3.2354822684907454,
2.2507758179659376], [-4.8699934080808029, -0.35744120243411981, 4.0908957400805122, -3.8440017446794084]]],
[[[4.5466344627836612, -2.8174576749848423, -0.32339288977492142, -3.3368918944053516], [3.3311423168153738,
-1.2448667289851647, -0.66737673743075376, -3.9953617725851598], [-4.8878412407428931, 3.1347720870691358,
-2.4390985397355847, -3.5615840737730475]], [[-3.7978882365989697, 4.345238312451805, 2.8310129832366435,
2.8564779239624674], [-0.85025481289091864, -4.3757742754757345, 3.5451710843902031, -2.5068001174158816],
[2.6943798866386315, 2.2746017608025317, -4.2655778273063607, 0.97165631163417387]]], [[[-2.9330039029788955,
4.3910413333213238, 2.5513441899802833, -3.8678703253194402], [-2.6748516851594308, -3.8887038302549062,
1.2485088138696518, -3.9629424578182251], [-0.38166273681210328, 3.82781593241344, -4.1817331752844087,
4.682478964767725]], [[-0.85849290617372809, -0.49338756563096275, -1.0480256440941615, -0.51008618582467946],
[-0.26820315453886501, 4.8354933917592806, 2.9555158912003154, -2.4766421456452479], [2.5098219987182944,
3.6215601735655589, -4.4497307132070123, -3.9295385075107028]]]]),self.functionspace)
arg1=Symbol(shape=())
res=arg0-arg1
s1=numpy.array(-2.59361652138)
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[[3.2584472628375467, 5.506523596185489, 0.72803223018335617, 1.4952616169144948],
[4.3362635946951356, 0.10394032563540101, 6.9800488667682759, -1.4845295118140269], [1.9717794355733007,
-0.025042702176787834, 0.7186001024392894, -1.3695076666281061]], [[6.6355785537165817, 2.7489849174211392,
4.5707322805213009, -0.01649321913795454], [-2.1428132589720796, 4.4254291630994622, 5.8290987898722362,
4.8443923393474284], [-2.2763768866993122, 2.236175318947371, 6.6845122614620029, -1.2503852232979176]]],
[[[7.140250984165152, -0.22384115360335155, 2.2702236316065694, -0.74327537302386082], [5.9247588381968646,
1.3487497923963261, 1.926239783950737, -1.401745251203669], [-2.2942247193614023, 5.7283886084506266,
0.15451798164590613, -0.96796755239155674]], [[-1.2042717152174789, 6.9388548338332958, 5.4246295046181343,
5.4500944453439581], [1.7433617084905721, -1.7821577540942437, 6.1387876057716939, 0.08681640396560919],
[5.2879964080201223, 4.8682182821840225, -1.6719613059248699, 3.5652728330156647]]], [[[-0.33938738159740467,
6.9846578547028146, 5.1449607113617741, -1.2742538039379494], [-0.081235163777940045, -1.2950873088734154,
3.8421253352511426, -1.3693259364367343], [2.2119537845693875, 6.4214324537949308, -1.5881166539029179,
7.2760954861492158]], [[1.7351236152077627, 2.100228955750528, 1.5455908772873292, 2.0835303355568113],
[2.3254133668426258, 7.4291099131407714, 5.5491324125818062, 0.11697437573624292], [5.1034385200997852,
6.2151766949470497, -1.8561141918255215, -1.335921986129212]]]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_constData_rank4_Symbol_rank4(self):
arg0=Data(numpy.array([[[[2.140332416756844, -4.5756565160935745, 1.0268217328307561, 1.594533973931731],
[4.1426026647673879, 0.1548614651600202, 3.351820863446946, 0.54777524679756073], [-4.6470169243406527,
-3.4101935702258368, 1.3604597013400213, -4.3236653508957374]], [[2.3543066928954612, 1.6355558219698443,
3.8590758340122093, 0.055467084597328409], [1.3949738751098479, -2.9042097100731445, 2.1331143130237962,
-0.45715627400394165], [3.9505052117900146, -4.8644226435153097, 0.13641466419900183, 0.92434447564323374]]],
[[[-4.2036478385109302, -2.2096856472681958, -3.309442061812593, -0.17761420723311439], [-4.5417481392819026,
3.354117107537796, 2.9925164896060084, 4.231145636082223], [-4.3165407391400308, -0.16204594013147311,
-1.5308101185053733, 3.7017204822457384]], [[2.4648028362561725, 0.43817614121240833, -4.4908194091317366,
-0.081928750874263656], [-3.4087689978816016, 4.259133980931324, -4.2850896710829334, 4.6395735766216326],
[-1.3584480043808989, -4.7738821023855085, -1.2617431337636842, -1.2598313032270116]]], [[[2.2708892792624855,
1.9132737394453327, -0.50215367058696003, 0.19108419265161469], [-2.0796597802531669, 1.1505151966811367,
1.2957662425378791, -1.5883201097665802], [-1.7035021892623838, 4.8639671345493021, 3.1243484697100534,
0.47610495992410051]], [[-4.0444287366693015, -1.3614006776767349, -0.18268931922481002, 4.8063591217845332],
[3.1407426206783704, 2.8940879164962441, -4.9664997014592807, 1.6951588068340158], [-3.895479459710558,
1.7220903215355694, -3.7165673657855267, 3.1903385713544257]]]]),self.functionspace)
arg1=Symbol(shape=(3, 2, 3, 4))
res=arg0-arg1
s1=numpy.array([[[[-4.3482304868754991, -1.2480666735558845, 0.43538858115159051, -2.0858236027245205],
[-2.442305699452354, 2.0213192586154003, -2.5262404161243679, -4.458062700052194], [0.26228138879138641,
-2.6430658161459242, -4.7246503759525602, 4.2538788761081854]], [[-1.6124403577544308, -1.8284497197976037,
-3.0160374139385002, 2.7523938918136759], [1.4437250527651582, -2.7814473787336489, 3.5116683735594361,
-3.9808640616716562], [1.7054962689298705, 4.7974185413341068, 1.9447068850818283, -1.2797130952071156]]],
[[[3.7642823106611107, 0.11145650212965919, -0.096799862214571597, 2.0215787533002523], [0.26390717935294816,
0.12612295721321498, 4.0275730341758482, -1.2268861937462172], [-2.947926663434548, -1.4514539315574626,
2.4550945474164232, -2.7897655841602651]], [[-1.5947829088079746, 0.80620330852535815, -4.5614285986030234,
-1.9102368071164841], [2.0807019362652692, -4.099640999530064, -1.8395330667711352, -4.6367501410986929],
[-2.5162327168837786, 4.6954385782651951, -2.1576821461704854, -1.62194811763983]]], [[[0.06729391952569852,
-0.57919376543293488, -3.1838952254737416, 1.7056529660452817], [3.6116233555564143, 0.81964000588296315,
-0.16440769780998377, 0.079355513141521783], [2.9805073823987431, 1.3188532056435962, 3.4153481616516537,
-2.5138710663982189]], [[2.8884594089569315, 1.1351683507610142, -0.68804270946144719, -4.7325886514124882],
[1.1204800401276476, 0.55566378590737031, 0.94240513232859335, 2.9610440134171334], [-2.6222587774463815,
-4.4048348584786705, -0.29650368246657699, -1.0078523107846902]]]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[[6.4885629036323431, -3.32758984253769, 0.59143315167916555, 3.6803575766562515],
[6.5849083642197419, -1.8664577934553801, 5.8780612795713143, 5.0058379468497547], [-4.9092983131320391,
-0.76712775407991263, 6.0851100772925815, -8.5775442270039228]], [[3.9667470506498921, 3.464005541767448,
6.8751132479507095, -2.6969268072163475], [-0.048751177655310229, -0.12276233133949566, -1.3785540605356399,
3.5237077876677145], [2.2450089428601441, -9.6618411848494166, -1.8082922208828265, 2.2040575708503494]]],
[[[-7.9679301491720409, -2.321142149397855, -3.2126421995980214, -2.1991929605333667], [-4.8056553186348507,
3.227994150324581, -1.0350565445698399, 5.4580318298284407], [-1.3686140757054828, 1.2894079914259895,
-3.9859046659217965, 6.4914860664060035]], [[4.0595857450641475, -0.36802716731294982, 0.070609189471286804,
1.8283080562422205], [-5.4894709341468708, 8.3587749804613871, -2.4455566043117982, 9.2763237177203255],
[1.1577847125028797, -9.4693206806507035, 0.89593901240680118, 0.3621168144128184]]], [[[2.203595359736787,
2.4924675048782676, 2.6817415548867816, -1.514568773393667], [-5.6912831358095808, 0.33087519079817351,
1.4601739403478629, -1.667675622908102], [-4.684009571661127, 3.5451139289057059, -0.29099969194160025,
2.9899760263223194]], [[-6.932888145626233, -2.4965690284377491, 0.50535339023663717, 9.5389477731970214],
[2.0202625805507228, 2.3384241305888738, -5.908904833787874, -1.2658852065831177], [-1.2732206822641765,
6.1269251800142399, -3.4200636833189497, 4.1981908821391158]]]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_taggedData_rank0_Symbol_rank0(self):
arg0=Data(-2.29417952191,self.functionspace)
arg0.setTaggedValue(1,-4.27612309963)
arg1=Symbol(shape=())
res=arg0-arg1
s1=numpy.array(-2.86386679086)
sub=res.substitute({arg1:s1})
ref=Data(0.569687268944,self.functionspace)
ref.setTaggedValue(1,-1.41225630877)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_taggedData_rank0_Symbol_rank1(self):
arg0=Data(-4.72691427991,self.functionspace)
arg0.setTaggedValue(1,0.483106242273)
arg1=Symbol(shape=(2,))
res=arg0-arg1
s1=numpy.array([-0.58516003749737244, 2.93231182282255])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([-4.1417542424175267, -7.6592261027374491]),self.functionspace)
ref.setTaggedValue(1,numpy.array([1.0682662797700972, -2.4492055805498252]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_taggedData_rank0_Symbol_rank2(self):
arg0=Data(4.84060376911,self.functionspace)
arg0.setTaggedValue(1,-3.32867505476)
arg1=Symbol(shape=(4, 5))
res=arg0-arg1
s1=numpy.array([[3.5332516865172998, 4.2256878903288939, -4.6404295927681405, 4.9721874322243114,
-1.5545932240349902], [0.40603544670242542, -2.879718425724147, -2.1385047584627337, 4.6127992237598132,
0.57646645021785048], [-2.6334801212800754, -2.3655947826469701, 0.48086858542515643, 1.0360291664664301,
-3.4378490059536082], [-0.23853194944872236, -2.0363663305583768, -2.3289186751171798, 3.5102407359843486,
4.1303419895739388]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[1.3073520825884426, 0.6149158787768485, 9.4810333618738838, -0.13158366311856895,
6.3951969931407326], [4.434568322403317, 7.7203221948298895, 6.9791085275684761, 0.2278045453459292,
4.2641373188878919], [7.4740838903858178, 7.2061985517527125, 4.359735183680586, 3.8045746026393124,
8.2784527750593497], [5.0791357185544648, 6.8769700996641188, 7.1695224442229222, 1.3303630331213938,
0.71026177953180358]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[-6.8619267412736988, -7.5543629450852929, 1.3117545380117415,
-8.3008624869807104, -1.7740818307214088], [-3.7347105014588244, -0.44895662903225197, -1.1901702962936653,
-7.9414742785162122, -3.9051415049742495], [-0.69519493347632366, -0.96308027210942893, -3.8095436401815554,
-4.3647042212228291, 0.10917395119720918], [-3.0901431053076767, -1.2923087241980222, -0.99975637963921926,
-6.8389157907407476, -7.4590170443303379]]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_taggedData_rank0_Symbol_rank3(self):
arg0=Data(-3.20552188916,self.functionspace)
arg0.setTaggedValue(1,-0.473083670166)
arg1=Symbol(shape=(6, 2, 2))
res=arg0-arg1
s1=numpy.array([[[0.71230320805011704, -3.008236723891188], [0.81066003773158002, -3.6043239509733382]],
[[3.691034498943317, -3.3919882986743777], [0.84551364067512935, 3.3207859438709946]], [[0.41963337446652105,
-3.6038224020133991], [-2.3537235378574151, -3.7120927558232997]], [[-3.4588851001838727, -0.31880183563871789],
[-1.3379489058063267, -3.9118810181560226]], [[4.4984539881701195, -3.2158956295350851], [1.5013508852420685,
2.8717656529358955]], [[-0.13701019263353231, -3.1176264463626078], [-1.67955120335195, 4.317481449568719]]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[-3.917825097207726, -0.19728516526642093], [-4.016181926889189, 0.3988020618157293]],
[[-6.896556388100926, 0.18646640951676874], [-4.0510355298327383, -6.5263078330286035]], [[-3.62515526362413,
0.39830051285579016], [-0.85179835130019388, 0.50657086666569073]], [[0.2533632110262638, -2.886720053518891],
[-1.8675729833512822, 0.70635912899841369]], [[-7.7039758773277285, 0.010373740377476182], [-4.7068727743996774,
-6.0772875420935044]], [[-3.0685116965240766, -0.087895442795001166], [-1.525970685805659,
-7.523003338726328]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[-1.1853868782160886, 2.5351530537252165], [-1.2837437078975515,
3.1312402808073667]], [[-4.1641181691092886, 2.9189046285084062], [-1.3185973108411009, -3.7938696140369661]],
[[-0.89271704463249257, 3.1307387318474276], [1.8806398676914435, 3.2390090856573281]], [[2.9858014300179012,
-0.15428183452725364], [0.86486523564035522, 3.4387973479900511]], [[-4.9715376583360911, 2.7428119593691136],
[-1.97443455540804, -3.344849323101867]], [[-0.33607347753243921, 2.6445427761966362], [1.2064675331859784,
-4.7905651197346906]]]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_taggedData_rank0_Symbol_rank4(self):
arg0=Data(-0.215341183726,self.functionspace)
arg0.setTaggedValue(1,-3.01917111711)
arg1=Symbol(shape=(3, 2, 3, 4))
res=arg0-arg1
s1=numpy.array([[[[3.1718058337950783, -4.3218518167555349, 4.7360170033398816, 2.6415781893387447],
[1.7953624357215787, 0.37239845986582054, 0.85595953231170441, -4.2093909477304852], [-4.0724848735753412,
-2.3789549933876364, 3.8266481046469991, -4.4686983670793881]], [[-1.3807814097985793, -0.9345570079736385,
3.2111606830229267, 2.5248569160832579], [-0.19847478717542089, 3.6200277417416071, -1.3367301493578787,
-1.9914051287776093], [4.2384277387383236, -3.1625190831895669, -4.8267032630177118, -3.7590986361039294]]],
[[[-0.96721285038350846, 0.23717549644533698, -2.0558971771798862, -2.1889488119398925], [2.1163450477817447,
-4.308535473047935, 0.96468545582662735, 0.58036767508710252], [-0.26889479983427034, -4.6749066439752021,
-2.6908936581627731, 3.3090528029139286]], [[1.0683391958055246, -4.3705975019062535, 4.6959723711804546,
-0.58815635047014858], [-1.7921642772643898, 2.8079866307247423, 4.5837878995413348, -3.6656523242301429],
[2.1083853748587442, -0.44280454111162726, -2.5427523262585563, 3.9551312168955626]]], [[[4.0479839543530591,
1.694708528108122, -1.8081650371476021, 2.5627212563151982], [2.9443513555348222, -3.4330381296191126,
-2.3471872352829837, 2.9291777099369405], [0.92208424820838264, -1.7857214370413055, 3.2638247404414695,
3.3713981402987798]], [[-2.3853121535462418, 2.1417428055374232, 3.1558224539661612, -4.4802179321245248],
[-3.0197245205703069, 2.7624146301708477, -4.6790033997765104, -4.0453165901737584], [4.8295161047601614,
-3.5764718373510842, 4.356981591617421, -4.7034098127513264]]]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[[-3.3871470175211567, 4.1065106330294565, -4.95135818706596, -2.856919373064823],
[-2.0107036194476571, -0.5877396435918989, -1.0713007160377828, 3.9940497640044068], [3.8571436898492628,
2.163613809661558, -4.0419892883730775, 4.2533571833533097]], [[1.165440226072501, 0.71921582424756014,
-3.426501866749005, -2.7401980998093363], [-0.01686639655065747, -3.8353689254676855, 1.1213889656318003,
1.776063945051531], [-4.4537689224644019, 2.9471778994634885, 4.6113620792916334, 3.543757452377851]]],
[[[0.7518716666574301, -0.45251668017141533, 1.8405559934538078, 1.9736076282138142], [-2.3316862315078231,
4.0931942893218567, -1.1800266395527057, -0.79570885881318087], [0.053553616108191981, 4.4595654602491237,
2.4755524744366948, -3.5243939866400069]], [[-1.283680379531603, 4.1552563181801752, -4.911313554906533,
0.37281516674407023], [1.5768230935383114, -3.0233278144508207, -4.7991290832674132, 3.4503111405040645],
[-2.3237265585848226, 0.2274633573855489, 2.3274111425324779, -4.1704724006216409]]], [[[-4.2633251380791375,
-1.9100497118342004, 1.5928238534215238, -2.7780624400412766], [-3.1596925392609005, 3.2176969458930342,
2.1318460515569053, -3.1445188936630188], [-1.137425431934461, 1.5703802533152271, -3.4791659241675479,
-3.5867393240248582]], [[2.1699709698201635, -2.3570839892635016, -3.3711636376922396, 4.2648767483984464],
[2.8043833368442286, -2.977755813896926, 4.463662216050432, 3.8299754064476801], [-5.0448572884862397,
3.3611306536250058, -4.5723227753434994, 4.4880686290252481]]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[[-6.1909769509085075, 1.3026806996421056, -7.7551881204533109,
-5.6607493064521739], [-4.8145335528350079, -3.3915695769792498, -3.8751306494251336, 1.1902198306170559],
[1.0533137564619119, -0.64021612372579284, -6.8458192217604283, 1.4495272499659588]], [[-1.6383897073148499,
-2.0846141091397907, -6.2303318001363559, -5.5440280331966871], [-2.8206963299380083, -6.6391988588550364,
-1.6824409677555505, -1.0277659883358199], [-7.2575988558517528, 0.14334796607613765, 1.8075321459042826,
0.73992751899050013]]], [[[-2.0519582667299208, -3.2563466135587662, -0.96327393993354304, -0.83022230517353668],
[-5.1355161648951739, 1.2893643559345058, -3.9838565729400566, -3.5995387922005317], [-2.7502763172791589,
1.6557355268617728, -0.32827745895065608, -6.3282239200273578]], [[-4.0875103129189538, 1.3514263847928243,
-7.7151434882938839, -2.4310147666432806], [-1.2270068398490395, -5.8271577478381715, -7.602959016654764,
0.64648120711671364], [-5.1275564919721734, -2.576366576001802, -0.47641879085487293, -6.9743023340089918]]],
[[[-7.0671550714664884, -4.7138796452215512, -1.2110060799658271, -5.5818923734286274], [-5.9635224726482514,
0.41386701250568336, -0.67198388183044555, -5.9483488270503697], [-3.9412553653218119, -1.2334496800721237,
-6.2829958575548988, -6.390569257412209]], [[-0.63385896356718741, -5.1609139226508525, -6.1749935710795905,
1.4610468150110956], [0.0005534034568777102, -5.7815857472842769, 1.6598322826630811, 1.0261454730603292],
[-7.8486872218735906, 0.55730072023765498, -7.3761527087308503, 1.6842386956378972]]]]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_taggedData_rank1_Symbol_rank0(self):
arg0=Data(numpy.array([3.3101673523710691, 0.048409361416743124]),self.functionspace)
arg0.setTaggedValue(1,numpy.array([0.70887806236646611, -0.73932065177372408]))
arg1=Symbol(shape=())
res=arg0-arg1
s1=numpy.array(1.15960287006)
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([2.1505644823090515, -1.1111935086452744]),self.functionspace)
ref.setTaggedValue(1,numpy.array([-0.45072480769555145, -1.8989235218357416]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_taggedData_rank1_Symbol_rank1(self):
arg0=Data(numpy.array([-2.0708546339036071, 2.2714034647505121]),self.functionspace)
arg0.setTaggedValue(1,numpy.array([-0.16265022615439584, -0.29272834777410406]))
arg1=Symbol(shape=(2,))
res=arg0-arg1
s1=numpy.array([1.8495632665872739, -2.2808524667130694])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([-3.920417900490881, 4.5522559314635815]),self.functionspace)
ref.setTaggedValue(1,numpy.array([-2.0122134927416697, 1.9881241189389653]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_taggedData_rank2_Symbol_rank0(self):
arg0=Data(numpy.array([[4.703380807076492, -4.2567944639019304, -2.0784707905046593, 0.18023637488621791,
1.1164321428411501], [3.3809585074696322, 1.5795463086222137, 1.5300027430790495, -1.6695215658775489,
-4.9671698822372887], [-0.56875186129757704, -0.88988163011215704, 1.0953422249288387, 1.2629450835517639,
1.9829321534877584], [-2.3470243950738103, -1.5345245349366401, 1.7913793425402638, 3.2778179482022125,
3.2743088989127749]]),self.functionspace)
arg0.setTaggedValue(1,numpy.array([[2.1331140495285128, 4.902243346193929, -3.8569193535703947,
-1.2051025219030698, 4.8526791592750644], [-1.9285295160668192, -2.2715983725035862, -1.6280809153232632,
0.63571110979312273, -4.5616322454088643], [1.1933837591252878, -2.4657544917793928, 3.8511059475300904,
-3.0018611957635444, 3.560382804940847], [-4.284584247208282, -4.3366343606789348, 3.6048395763720524,
-2.2301793774115106, 4.6397261587379131]]))
arg1=Symbol(shape=())
res=arg0-arg1
s1=numpy.array(0.0560012612314)
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[4.6473795458450571, -4.3127957251333653, -2.1344720517360942, 0.12423511365478301,
1.0604308816097152], [3.3249572462381973, 1.5235450473907788, 1.4740014818476146, -1.7255228271089837,
-5.0231711434687236], [-0.62475312252901194, -0.94588289134359194, 1.0393409636974038, 1.206943822320329,
1.9269308922563235], [-2.4030256563052452, -1.590525796168075, 1.7353780813088289, 3.2218166869707776,
3.21830763768134]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[2.0771127882970779, 4.8462420849624941, -3.9129206148018296,
-1.2611037831345047, 4.7966778980436295], [-1.9845307772982541, -2.3275996337350211, -1.6840821765546981,
0.57970984856168783, -4.6176335066402991], [1.1373824978938529, -2.5217557530108277, 3.7951046862986555,
-3.0578624569949793, 3.5043815437094121], [-4.3405855084397169, -4.3926356219103697, 3.5488383151406175,
-2.2861806386429455, 4.5837248975064782]]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_taggedData_rank2_Symbol_rank2(self):
arg0=Data(numpy.array([[0.044613582775737015, -0.22965054883260905, -3.3954728255423361, -0.043404784226975579,
-0.81018025865095922], [4.0980455142640473, 3.3299876326958326, 4.4694158188546833, 0.047800124529065791,
-4.1128886475115927], [-0.86793714814288414, 3.7852706993586231, 2.8168181178475837, -2.6081900317073039,
1.795227525921204], [-2.7964436060814792, 2.46599228887926, -4.3894587372918519, -3.0809581135280197,
4.5629513161933648]]),self.functionspace)
arg0.setTaggedValue(1,numpy.array([[0.18467263707487369, -2.906541382403959, -4.2471361917218733,
1.7478696798949915, -2.0555035204044225], [-4.1703824796767011, -0.58145273211245829, -1.3034416354534684,
-4.4238643252257699, -3.0019960418182654], [-0.011560599410600503, 4.5614736908410478, -4.1865499712522745,
0.41611035316936196, 1.4719370557053075], [3.3285499812876207, 4.2147545548351992, 3.8796865015190463,
-2.8665673368928459, 3.8754754018195001]]))
arg1=Symbol(shape=(4, 5))
res=arg0-arg1
s1=numpy.array([[-0.34040680852948757, 0.51480179015857086, 2.6579250902566542, -3.8908104282358877,
-1.0766494604779266], [-1.7785348143550985, 1.7875285221080928, -0.26464821727786259, 3.7856697734154743,
0.14935084548977784], [1.6454427368239299, -3.0878902261983701, 2.1577262475041596, -3.540342914142153,
2.8529020416879671], [2.8849125795379305, -3.1409630887157123, -0.30215664293811351, 3.5493007526176896,
0.27226779139430857]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[0.38502039130522459, -0.74445233899117991, -6.0533979157989908, 3.8474056440089122,
0.26646920182696743], [5.8765803286191458, 1.5424591105877399, 4.7340640361325459, -3.7378696488864085,
-4.2622394930013705], [-2.5133798849668141, 6.8731609255569932, 0.65909187034342409, 0.93215288243484906,
-1.0576745157667631], [-5.6813561856194097, 5.6069553775949723, -4.0873020943537384, -6.6302588661457094,
4.2906835247990562]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[0.52507944560436126, -3.4213431725625298, -6.9050612819785275,
5.6386801081308793, -0.97885405992649588], [-2.3918476653216025, -2.3689812542205511, -1.0387934181756058,
-8.2095340986412442, -3.1513468873080432], [-1.6570033362345304, 7.6493639170394179, -6.3442762187564341,
3.9564532673115149, -1.3809649859826596], [0.44363740174969024, 7.3557176435509115, 4.1818431444571598,
-6.4158680895105356, 3.6032076104251916]]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_taggedData_rank3_Symbol_rank0(self):
arg0=Data(numpy.array([[[-0.70323441272603926, -1.4205742401701604], [-3.6004008923276585, 4.1739347100888349]],
[[-2.7687391296703767, -0.96114141211843496], [0.45711266950319906, 0.36713165606152121]], [[3.8726070188081287,
2.6611494194452137], [-0.28060302358441547, 1.0399275995737964]], [[2.5912385881777, -0.12172669528696911],
[1.831517522951442, -4.9891623764024926]], [[3.8572507842255241, 2.9719918728052663], [0.42882676434271261,
-1.4826468418372341]], [[0.16110396579090835, 4.8052378752678955], [2.4890225545274554,
-1.4594734254395068]]]),self.functionspace)
arg0.setTaggedValue(1,numpy.array([[[3.4601998637619467, 3.5105292543746671], [-1.9715134513187751,
1.6897677346566677]], [[0.99895689216195205, 3.7908023259957879], [-2.9811497902134496, 0.46336396583979944]],
[[-2.0979181014824011, 0.68992077008736707], [4.5817275596392033, 3.1112543881649586]], [[-1.0666850119171398,
-3.7136243224538679], [-2.1842168128700248, -0.60998709362389292]], [[-1.0817587775668578, 1.1357523207967555],
[0.72114300996433212, 2.0871085948686607]], [[2.6196090777455074, -4.8403131105182826], [4.4462612480444346,
2.6275786734235638]]]))
arg1=Symbol(shape=())
res=arg0-arg1
s1=numpy.array(3.40075496466)
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[-4.1039893773891789, -4.8213292048333001], [-7.0011558569907981, 0.77317974542569523]],
[[-6.1694940943335164, -4.3618963767815746], [-2.9436422951599406, -3.0336233086016184]], [[0.4718520541449891,
-0.73960554521792599], [-3.6813579882475551, -2.3608273650893432]], [[-0.80951637648543961, -3.5224816599501088],
[-1.5692374417116977, -8.3899173410656331]], [[0.4564958195623845, -0.42876309185787331], [-2.971928200320427,
-4.8834018065003733]], [[-3.2396509988722313, 1.4044829106047558], [-0.91173241013568429,
-4.8602283901026464]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[0.059444899098807014, 0.10977428971152747], [-5.3722684159819147,
-1.7109872300064719]], [[-2.4017980725011876, 0.39004736133264828], [-6.3819047548765893, -2.9373909988233402]],
[[-5.4986730661455407, -2.7108341945757726], [1.1809725949760637, -0.28950057649818106]], [[-4.4674399765802795,
-7.1143792871170071], [-5.5849717775331644, -4.0107420582870326]], [[-4.4825137422299974, -2.2650026438663842],
[-2.6796119546988075, -1.3136463697944789]], [[-0.7811458869176322, -8.2410680751814223], [1.0455062833812949,
-0.77317629123957587]]]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_taggedData_rank3_Symbol_rank3(self):
arg0=Data(numpy.array([[[-2.8893927498914151, -3.9495986710021471], [2.0674301637688552, -4.9323681378020368]],
[[-3.9365223323164567, -3.9166796931279513], [-2.1295831296849688, 0.049270642730291137]], [[1.1604521699930164,
-4.7263968957110194], [0.18403419227820805, -3.9919770732677948]], [[-4.4683480884742268, 3.1077188243660192],
[0.090355977211302729, -0.013539049772621325]], [[1.2239143556433882, 4.66468811676115], [4.6443599318212119,
2.902664355759085]], [[3.1499666861977964, 3.5678517696258449], [0.73557701807290599,
-4.1703133219986768]]]),self.functionspace)
arg0.setTaggedValue(1,numpy.array([[[0.62745401025262382, 0.69024538347902542], [4.3685303267738433,
2.2109723240557235]], [[-0.7348498808881363, -2.7513236139357309], [2.5887407011037489, 4.1931952710033542]],
[[2.1336250254996258, -2.1610465999144091], [-4.054796877122568, 0.054975312915938268]], [[2.8778982280083021,
0.031841424972327559], [-1.6040852288365626, -0.14653197703489251]], [[1.0241081083490533, 2.0236436389548764],
[-4.7683548819587331, 0.81201234013234735]], [[-3.2923450240347405, 2.2531528995219965], [-3.594199051432386,
-1.9523442452177875]]]))
arg1=Symbol(shape=(6, 2, 2))
res=arg0-arg1
s1=numpy.array([[[0.67454553417657603, 2.9833990689244789], [-3.9375622829117427, 0.0094498156860893801]],
[[2.1574617938010734, -0.48892733726965609], [0.62118276066421352, 0.99065918564407696]], [[1.7968244154456219,
-1.6314349433046926], [1.8612952961850224, 4.6630470176393288]], [[0.43763307675500052, 4.0271951272236688],
[-1.1711764825930993, -4.5547560714878275]], [[2.514477748308436, 3.7600620047710827], [1.5805136896170069,
2.4948517124974012]], [[-0.74781838229224817, -2.9876928953003903], [4.1339271192034222, 4.4719827170790509]]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[-3.5639382840679912, -6.932997739926626], [6.004992446680598, -4.9418179534881261]],
[[-6.0939841261175296, -3.4277523558582952], [-2.7507658903491823, -0.94138854291378582]], [[-0.63637224545260551,
-3.0949619524063268], [-1.6772611039068144, -8.6550240909071228]], [[-4.9059811652292273, -0.91947630285764959],
[1.261532459804402, 4.5412170217152061]], [[-1.2905633926650477, 0.90462611199006737], [3.063846242204205,
0.40781264326168376]], [[3.8977850684900446, 6.5555446649262352], [-3.3983501011305162,
-8.6422960390777277]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[-0.047091523923952217, -2.2931536854454535], [8.3060926096855852,
2.2015225083696341]], [[-2.8923116746892097, -2.2623962766660748], [1.9675579404395354, 3.2025360853592773]],
[[0.33680061005400397, -0.52961165660971643], [-5.9160921733075904, -4.6080717047233906]], [[2.4402651512533016,
-3.9953537022513412], [-0.43290874624346332, 4.4082240944529349]], [[-1.4903696399593827, -1.7364183658162062],
[-6.34886857157574, -1.6828393723650539]], [[-2.5445266417424923, 5.2408457948223868], [-7.7281261706358082,
-6.4243269622968384]]]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_taggedData_rank4_Symbol_rank0(self):
arg0=Data(numpy.array([[[[3.1002455029763922, 2.6515488300516923, -0.77582358496211956, -3.4443694355246803],
[-2.6599091620789581, -0.70044327546902529, -4.3223485855396966, 4.9338402947088049], [-4.5546987200991147,
-4.159833516760548, -1.2113818643763619, 1.341501344402797]], [[-0.99132126989665803, -3.81966827017445,
-1.5631671743562592, -2.9170370396917167], [0.94015514336519956, -4.5328623228274036, 2.5469993786586862,
4.5298447080413311], [-1.8826808741220304, -0.21100480137345734, -1.7750931594239239, -3.5343470478632764]]],
[[[-3.4624410933639691, 3.7419877938482422, -4.1641241285521557, -2.8763768520849711], [4.3838179808162643,
-0.076650368742670949, -2.2790272387608601, 1.4407514353417152], [-0.58059366739859364, 3.0282179950037378,
4.3946428646333242, -3.9361840734571896]], [[-0.40769305246403231, -0.93123230765280152, -3.5500981163613665,
-1.4382421516555786], [0.18862577968690264, 3.8234595158976035, 1.2783334948832605, -0.84599833008897818],
[-1.5452449895609535, -2.1285283532469434, 2.9517034908101669, -1.043778516582341]]], [[[2.5188074736534176,
4.926760464276164, -1.2494158315784532, -4.1847607799981805], [1.764772573553314, 4.6090994448443769,
-3.7864884573437072, 2.5743244083963681], [-0.44624416686502322, -0.44288726525437028, -2.5180469174818598,
-4.8009656021603]], [[-1.0967276921708047, -1.5639987059537273, -3.3122649580537331, -3.947879272385495],
[4.1267460589959857, -4.5801997177900287, 0.85366271506547697, -3.5573421152778972], [-4.7127368302025108,
-4.5592524679039892, -1.8586387462495613, -3.2614675219884837]]]]),self.functionspace)
arg0.setTaggedValue(1,numpy.array([[[[-3.6140016210408508, -4.1545999292001445, 4.9863169403898908,
-2.2007289242442383], [-2.3634275248295822, 1.4929955627211893, 1.1905831175627091, -3.1298255396253936],
[-0.78867439130174599, -2.5664248245819756, -1.882393556334109, -2.3300345925878529]], [[3.7578772846055983,
-1.9632657478837121, -1.3792653830852455, -0.23840250166856869], [-1.650781665029756, -3.2744446113480907,
-1.2541229166086589, -2.3471598629273149], [-1.939332795628903, 0.81542234976851624, 0.52422540705571663,
0.91808367692950554]]], [[[-3.0689349511345867, -4.8032602579819264, 3.769084882991141, -1.5864959564378189],
[-3.2063200431555905, -0.3347729502698602, 1.763270929850381, 0.65936335478094321], [-3.6143633139881959,
0.15424644431103118, 3.7156782910709154, -3.2826914978804203]], [[-0.091940996157960697, 2.5331247115220021,
3.4383904670893202, 0.77887041122794898], [4.2850997491436988, 3.3877021574758341, 3.9303516193668084,
0.97217787674818279], [-1.8219977615256742, 3.7582967180633755, -3.967674705101544, 3.2183851949652524]]],
[[[3.8000102844693906, -2.9266220460152672, 0.11901081743168795, -0.70455205529677301], [4.6787843021952913,
-3.2637583894745239, 4.6693989140352041, 2.042172937625808], [-2.9445501417858964, 0.36254085518902812,
2.8333171427728354, -2.7757509476245721]], [[3.8180860212706147, -3.4817247466262815, -3.2683613783585006,
-2.0706219843820262], [4.8065072235822566, 2.2788211866672707, 3.8562835841415382, -1.1633706258500731],
[2.652336823163191, -2.6060953909144513, 0.62089818312127321, -1.6242126976534612]]]]))
arg1=Symbol(shape=())
res=arg0-arg1
s1=numpy.array(-4.55573857649)
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[[7.6559840794689205, 7.2072874065442205, 3.7799149915304087, 1.1113691409678479],
[1.8958294144135701, 3.8552953010235029, 0.23338999095283164, 9.4895788712013331], [0.0010398563934135296,
0.3959050597319802, 3.3443567121161664, 5.8972399208953252]], [[3.5644173065958702, 0.73607030631807824,
2.992571402136269, 1.6387015368008115], [5.4958937198577278, 0.02287625366512458, 7.1027379551512144,
9.0855832845338593], [2.6730577023704978, 4.3447337751190709, 2.7806454170686044, 1.0213915286292519]]],
[[[1.0932974831285591, 8.2977263703407704, 0.39161444794037248, 1.6793617244075572], [8.9395565573087925,
4.4790882077498573, 2.2767113377316681, 5.9964900118342435], [3.9751449090939346, 7.583956571496266,
8.9503814411258524, 0.61955450303533866]], [[4.1480455240284959, 3.6245062688397267, 1.0056404601311617,
3.1174964248369497], [4.7443643561794309, 8.3791980923901317, 5.8340720713757888, 3.70974024640355],
[3.0104935869315748, 2.4272102232455848, 7.5074420673026951, 3.5119600599101872]]], [[[7.0745460501459458,
9.4824990407686922, 3.3063227449140751, 0.3709777964943477], [6.3205111500458422, 9.1648380213369052,
0.76925011914882102, 7.1300629848888963], [4.109494409627505, 4.1128513112381579, 2.0376916590106684,
-0.24522702566777177]], [[3.4590108843217235, 2.991739870538801, 1.2434736184387951, 0.60785930410703326],
[8.6824846354885139, -0.024461141297500433, 5.4094012915580052, 0.99839646121463099], [-0.15699825370998255,
-0.0035138914114609676, 2.697099830242967, 1.2942710545040446]]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[[0.94173695545167746, 0.40113864729238369, 9.542055516882419,
2.35500965224829], [2.192311051662946, 6.0487341392137175, 5.7463216940552373, 1.4259130368671347],
[3.7670641851907822, 1.9893137519105526, 2.6733450201584192, 2.2257039839046753]], [[8.3136158610981266,
2.5924728286088161, 3.1764731934072827, 4.3173360748239595], [2.9049569114627722, 1.2812939651444375,
3.3016156598838693, 2.2085787135652133], [2.6164057808636252, 5.3711609262610445, 5.0799639835482449,
5.4738222534220338]]], [[[1.4868036253579415, -0.24752168148939813, 8.3248234594836692, 2.9692426200547093],
[1.3494185333369377, 4.220965626222668, 6.3190095063429093, 5.2151019312734714], [0.94137526250433234,
4.7099850208035594, 8.2714168675634436, 1.273047078612108]], [[4.4637975803345675, 7.0888632880145304,
7.9941290435818484, 5.3346089877204772], [8.8408383256362271, 7.9434407339683624, 8.4860901958593367,
5.527916453240711], [2.7337408149668541, 8.3140352945559037, 0.58806387139098426, 7.7741237714577807]]],
[[[8.3557488609619188, 1.629116530477261, 4.6747493939242162, 3.8511865211957552], [9.2345228786878195,
1.2919801870180043, 9.2251374905277324, 6.5979115141183362], [1.6111884347066319, 4.9182794316815563,
7.3890557192653636, 1.7799876288679561]], [[8.3738245977631429, 1.0740138298662467, 1.2873771981340276,
2.4851165921105021], [9.3622458000747848, 6.834559763159799, 8.4120221606340664, 3.3923679506424551],
[7.2080753996557192, 1.9496431855780769, 5.1766367596138014, 2.931525878839067]]]]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_taggedData_rank4_Symbol_rank4(self):
arg0=Data(numpy.array([[[[2.1869721643026576, 0.35542091272423715, 2.5099944114031967, 4.7276012581949995],
[-0.23596027111215712, 3.2557128306673206, -2.4174678213407566, 4.9025765849007588], [3.4987602616867228,
-2.3969967727517094, 2.614715035832643, -3.9538109091356577]], [[0.54151166641114745, 4.3433313907072311,
-3.9824411189395126, 0.11193040884063787], [-4.3326960505433521, -2.6555021449849603, -1.6650005107909016,
-0.21278258756168267], [2.9438726263016104, 4.614591333740627, -1.4283352855346321, 4.195747529596801]]],
[[[0.4129039465707498, 0.25218586208094607, 4.2227877593235625, -3.8395686827717723], [-4.246422814789943,
-4.2708029152046789, -4.4791253262093615, 2.3703854064691221], [-0.32074671911367325, -4.0633264555676574,
-4.8034904727622223, 0.101245496731595]], [[3.3860052077100544, 4.4048456672981686, 3.3258905421337257,
-0.60591078242426555], [2.9574702297232829, 2.9390786518156196, 3.0627580449874809, -2.1902821038190523],
[1.2765769390449559, 4.5442832941192819, 0.47031486471564055, -3.2094801674304509]]], [[[1.4972627407797212,
-2.7514173987810633, 0.19744444113354387, 1.3720920976100972], [-3.147124860705004, -3.6707691951555885,
1.1521564952279704, -0.12493802519996233], [1.3717811158015873, -1.737983464544548, -2.5919544001996897,
-4.4195022009129206]], [[-3.5078213357756582, 1.5909514876001909, 3.932618549290213, 0.32844467348406869],
[-0.037083415286228494, 2.358949404615915, -3.7082781631298478, -4.9441324919087766], [1.219588665287433,
-2.1155364750524797, 2.3443039764677165, 4.1618790582351313]]]]),self.functionspace)
arg0.setTaggedValue(1,numpy.array([[[[3.8216987557975131, -0.59039813916696193, -1.9474433412604117,
4.1666345075852202], [1.0033840403657788, -1.8365638623400207, -1.1472895447555285, 0.49043998461267968],
[1.525782098623524, 0.98710575843395354, 1.9521603305269073, 1.4982217977497818]], [[4.8105014981222372,
0.18255767851204219, 0.10092997041413909, 2.3610713615733667], [3.8639541584797801, 1.8455276769077198,
3.9278199867001007, 2.5501176762845867], [3.2925051662999447, 0.78129602184334157, -0.73105877010655362,
2.9378923845982694]]], [[[1.3162347911484948, -1.7534583809398363, -4.4745574675152744, 0.84388146264593455],
[-2.1398633576757309, 1.6224556269216279, 4.0151064679341637, 0.81646760002277574], [0.95506629968888479,
-3.384786519820715, 2.08961451298733, 1.4802214615087061]], [[2.5752388025402837, -2.7094797245847468,
-2.6808155024703106, -1.7780191613070642], [-0.58755728186204248, -4.3097624692690948, 3.6757907841395685,
-1.8312242243207608], [-3.7229135985460826, -1.5786991892133564, 2.6894504757052617, -0.48567336902160463]]],
[[[3.4562176552233623, -1.5291903913231595, 4.9276217294297595, -1.4641622460496571], [-3.9633150641051529,
-1.3895475276782743, -2.0928641563143735, 4.286214622292805], [-0.016872120519226819, -0.86571000346058913,
4.2635805792181465, 4.0351866281897113]], [[-1.973695982407413, -4.452260246087465, -2.5681734906597109,
3.0954829513656215], [2.6526834215550927, -4.3976717675273207, 2.0111485813735106, 2.7969396373439324],
[-0.72100288848623784, 1.4868693846138363, 2.3876845459322045, -3.759851286518614]]]]))
arg1=Symbol(shape=(3, 2, 3, 4))
res=arg0-arg1
s1=numpy.array([[[[-1.2326165508314046, 0.019536700697927678, 3.3313535404093759, -2.4782775769684271],
[3.9342491756801525, 1.2904741959913864, -2.7701975380199206, 2.4757520771582744], [2.5202328466158281,
-1.3683915774027189, 3.4678638218372768, -2.2884507446983129]], [[-4.9275394706777931, 4.7975831194456333,
1.7829898690658723, -0.96339421834763073], [-2.7923805247323799, -0.026981154987572253, 2.5136604629187271,
0.14658337947380495], [1.1254475424349959, 4.8000437885357261, 3.3479331374253167, 1.6298765760037002]]],
[[[-0.46473842692243572, 1.2430212762010644, -0.23618382206216726, -1.2230171932711418], [2.0127498669810855,
-0.31475870950595031, -0.20645609212011973, -4.9825089187683691], [-4.6108703987985988, -0.47963035537661725,
-3.1919702863790422, -3.9993603357626117]], [[3.8402219409685951, 3.04406815317755, 4.7640360318949195,
1.5279973254325983], [-4.9716807317737235, -3.4706635767559693, -1.2581696190523903, -2.591452040312936],
[1.6191001515432157, -3.5419762128533741, 0.92904425652178801, 4.6966930122512043]]], [[[-2.4787875268428614,
4.8717538415307775, 3.6264063974305554, 2.0645154974740256], [-4.5070489852671329, 2.3540394703493703,
3.2007816723140134, -0.44359603196672026], [2.5406621078154732, 3.6651768892659895, -2.7039262200534422,
-1.9309627063916244]], [[-0.037762488646412962, -4.6825147640959859, -3.1180187992817956, -0.3407644296025687],
[-1.6601757648009907, -1.0174825465103088, 0.060955158106047236, 1.2341204474061849], [-0.24621306712976931,
-1.3620636349151272, -0.12322079758969373, 2.3717593913603183]]]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[[3.4195887151340623, 0.33588421202630947, -0.82135912900617924, 7.2058788351634266],
[-4.1702094467923096, 1.9652386346759343, 0.35272971667916408, 2.4268245077424844], [0.97852741507089469,
-1.0286051953489905, -0.85314878600463384, -1.6653601644373448]], [[5.4690511370889405, -0.45425172873840225,
-5.7654309880053844, 1.0753246271882686], [-1.5403155258109722, -2.628520989997388, -4.1786609737096292,
-0.35936596703548762], [1.8184250838666145, -0.18545245479509909, -4.7762684229599488, 2.5658709535931008]]],
[[[0.87764237349318552, -0.99083541412011833, 4.4589715813857298, -2.6165514895006305], [-6.2591726817710285,
-3.9560442056987286, -4.2726692340892418, 7.3528943252374912], [4.2901236796849256, -3.5836961001910401,
-1.6115201863831801, 4.1006058324942067]], [[-0.45421673325854073, 1.3607775141206186, -1.4381454897611938,
-2.1339081078568638], [7.9291509614970064, 6.4097422285715888, 4.3209276640398713, 0.40116993649388366],
[-0.34252321249825979, 8.0862595069726559, -0.45872939180614747, -7.9061731796816552]]], [[[3.9760502676225826,
-7.6231712403118408, -3.4289619562970115, -0.69242339986392842], [1.359924124562129, -6.0248086655049589,
-2.0486251770860431, 0.31865800676675793], [-1.1688809920138858, -5.4031603538105379, 0.11197181985375249,
-2.4885394945212962]], [[-3.4700588471292453, 6.2734662516961768, 7.0506373485720086, 0.66920910308663739],
[1.6230923495147622, 3.3764319511262237, -3.7692333212358951, -6.1782529393149614], [1.4658017324172024,
-0.7534728401373525, 2.4675247740574102, 1.7901196668748129]]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[[5.0543153066289177, -0.60993483986488961, -5.2787968816697877,
6.6449120845536473], [-2.9308651353143738, -3.127038058331407, 1.6229079932643922, -1.9853120925455947],
[-0.99445074799230415, 2.3554973358366724, -1.5157034913103695, 3.7866725424480947]], [[9.7380409688000302,
-4.6150254409335911, -1.6820598986517332, 3.3244655799209974], [6.6563346832121599, 1.872508831895292,
1.4141595237813736, 2.4035342968107818], [2.1670576238649488, -4.0187477666923845, -4.0789919075318704,
1.3080158085945692]]], [[[1.7809732180709306, -2.9964796571409007, -4.2383736454531071, 2.0668986559170763],
[-4.1526132246568164, 1.9372143364275782, 4.2215625600542834, 5.7989765187911448], [5.5659366984874836,
-2.9051561644440977, 5.2815847993663727, 5.4795817972713179]], [[-1.2649831384283114, -5.7535478777622968,
-7.4448515343652302, -3.3060164867396624], [4.384123449911681, -0.83909889251312553, 4.9339604031919588,
0.76022781599217515], [-5.3420137500892988, 1.9632770236400177, 1.7604062191834737, -5.1823663812728089]]],
[[[5.9350051820662237, -6.400944232853937, 1.3012153319992041, -3.5286777435236827], [0.54373392116198005,
-3.7435869980276446, -5.293645828628387, 4.7298106542595253], [-2.5575342283347, -4.5308868927265786,
6.9675067992715887, 5.9661493345813357]], [[-1.935933493761, 0.23025451800852093, 0.54984530862208469,
3.4362473809681902], [4.3128591863560839, -3.3801892210170119, 1.9501934232674634, 1.5628191899377475],
[-0.47478982135646852, 2.8489330195289635, 2.5109053435218982, -6.1316106778789319]]]]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_expandedData_rank0_Symbol_rank0(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*(2.42413566075)+(1.-msk_arg0)*(2.73592046896)
arg1=Symbol(shape=())
res=arg0-arg1
s1=numpy.array(0.0730314190245)
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*(2.35110424173)+(1.-msk_ref)*(2.66288904994)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_expandedData_rank0_Symbol_rank1(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*(-2.38585027921)+(1.-msk_arg0)*(-2.14546935212)
arg1=Symbol(shape=(2,))
res=arg0-arg1
s1=numpy.array([1.0449404678521192, -2.9654578889240057])
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([-3.4307907470591283,
0.57960760971699665])+(1.-msk_ref)*numpy.array([-3.1904098199744872, 0.81998853680163775])
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_expandedData_rank0_Symbol_rank2(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*(2.15276640076)+(1.-msk_arg0)*(-2.04284766814)
arg1=Symbol(shape=(4, 5))
res=arg0-arg1
s1=numpy.array([[-2.5429314638433684, 2.0318827224945402, -2.3636856893688076, 3.4855417570765717,
0.44952339669472341], [2.5403509140391156, 2.3524971436536095, 3.9461465487262188, 2.6955339698780154,
-0.45702899742654868], [-1.0602022717036155, 0.74771157767510843, 1.6452939357358289, -3.0322095528230921,
1.6787335078454735], [-4.263078102519902, 3.2046384335109863, 4.0147512257312048, 3.3998288702285713,
-0.56118778404289138]])
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([[4.6956978646047602, 0.12088367826685165, 4.5164520901301994, -1.3327753563151798,
1.7032430040666684], [-0.38758451327772381, -0.19973074289221771, -1.793380147964827, -0.5427675691166236,
2.6097953981879405], [3.2129686724650073, 1.4050548230862834, 0.50747246502556287, 5.1849759535844839,
0.47403289291591832], [6.4158445032812939, -1.0518720327495945, -1.861984824969813, -1.2470624694671795,
2.7139541848042832]])+(1.-msk_ref)*numpy.array([[0.50008379570506278, -4.0747303906328458, 0.32083802123050198,
-5.5283894252148773, -2.4923710648330291], [-4.5831985821774213, -4.3953448117919152, -5.9889942168645245,
-4.7383816380163211, -1.585818670711757], [-0.98264539643469018, -2.7905592458134141, -3.6881416038741346,
0.98936188468478647, -3.7215811759837791], [2.2202304343815964, -5.247486101649292, -6.0575988938695104,
-5.4426765383668769, -1.4816598840954143]])
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_expandedData_rank0_Symbol_rank3(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*(3.30825297654)+(1.-msk_arg0)*(-3.92076322418)
arg1=Symbol(shape=(6, 2, 2))
res=arg0-arg1
s1=numpy.array([[[-0.52002332126128437, 4.3478222442071139], [3.3434922005534364, 2.8013302606159396]],
[[-2.3200079969586795, -3.0556917667690642], [-2.7103276420969582, 4.1511200748037105]], [[-0.92404095393396624,
2.6484690327098859], [-2.1529217611726503, 4.4602897709717144]], [[0.58271708006920253, 1.9322598870751975],
[-3.5184596230462182, -4.4222029485403436]], [[-4.3953168785776278, -4.450145776704125], [4.2137072146995536,
3.8966485797913304]], [[3.1838339108927798, -3.6438064267677328], [1.3789445362861974, -2.9975552731311272]]])
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([[[3.8282762978010325, -1.0395692676673658], [-0.035239224013688286,
0.50692271592380855]], [[5.6282609734984277, 6.3639447433088119], [6.0185806186367063, -0.84286709826396233]],
[[4.2322939304737144, 0.65978394382986227], [5.4611747377123985, -1.1520367944319663]], [[2.7255358964705456,
1.3759930894645507], [6.8267125995859663, 7.7304559250800917]], [[7.7035698551173759, 7.7583987532438732],
[-0.90545423815980541, -0.58839560325158224]], [[0.12441906564696836, 6.952059403307481], [1.9293084402535507,
6.3058082496708749]]])+(1.-msk_ref)*numpy.array([[[-3.4007399029178136, -8.2685854683862114], [-7.2642554247325339,
-6.7220934847950371]], [[-1.6007552272204184, -0.86507145741003377], [-1.2104355820821397, -8.071883298982808]],
[[-2.9967222702451317, -6.5692322568889843], [-1.7678414630064476, -8.3810529951508119]], [[-4.5034803042483009,
-5.853023111254295], [-0.40230360113287977, 0.50143972436124562]], [[0.47455365439852981, 0.52938255252502708],
[-8.1344704388786511, -7.8174118039704279]], [[-7.1045971350718773, -0.27695679741136514], [-5.2997077604652958,
-0.92320795104797071]]])
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_expandedData_rank0_Symbol_rank4(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*(4.28115160685)+(1.-msk_arg0)*(-2.99624588284)
arg1=Symbol(shape=(3, 2, 3, 4))
res=arg0-arg1
s1=numpy.array([[[[-3.7034845683259832, -3.2006988280486115, 4.6850337347787345, -1.5431340070704103],
[-3.9508001556883876, 4.7231128873762902, -3.0051096732527691, -0.071944916970104522], [3.2109725637398565,
4.0910170733379978, -3.7166755556626772, -4.402146700420734]], [[-1.5273991623031669, -1.4865381526416344,
3.902360473786171, -1.3538484671917517], [0.38707743115008331, 4.3855048056490773, 1.9022231675241139,
1.397387379628614], [1.0431068102446126, 3.0934379513218886, 2.0138255231319624, 4.2870052231295865]]],
[[[-4.2737086360299941, 4.2752748398653857, -3.7092106416006629, 1.417380944080846], [-2.4275128587779737,
-2.879911926405645, -4.23153844815229, -0.30555854124221682], [-2.6571106905165331, 2.6754859746804112,
-4.5544081791240201, -0.020082609244357563]], [[1.0570642052363857, -1.7647078574502792, 2.6330635742775668,
3.717540829723692], [4.9220552078075279, -3.9060168420798869, 1.4799017868437296, 2.7842835488914588],
[-2.0839669385912343, -4.8850626605172867, 1.7595980725429907, 3.0026383083452117]]], [[[-0.83195539201513036,
-1.2109400306251725, 2.0638657571201078, -0.86905066581365009], [-0.54092453152611775, 3.4954317917180884,
3.7826658876966359, -2.5779636206330894], [1.6720368874738147, 0.42564364358069096, -4.9027760864384096,
0.66861897918883617]], [[-4.1302737255553801, -3.2949127465748109, 1.5706320204575341, -2.2912291830881903],
[-2.19574275564025, 3.983182476523945, 2.032922034582441, -2.7459308093848711], [4.6025690264891459,
3.7012963844874829, 0.1748188819614116, 4.2002322255258893]]]])
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([[[[7.984636175171131, 7.4818504348937598, -0.40388212793358669, 5.8242856139155581],
[8.2319517625335354, -0.44196128053114236, 7.286261280097917, 4.3530965238152524], [1.0701790431052913,
0.19013453350715004, 7.997827162507825, 8.6832983072658827]], [[5.8085507691483151, 5.7676897594867818,
0.37879113305897683, 5.6350000740368991], [3.8940741756950645, -0.10435319880392946, 2.3789284393210339,
2.8837642272165338], [3.2380447966005352, 1.1877136555232592, 2.2673260837131854, -0.0058536162844387007]]],
[[[8.5548602428751419, 0.005876766979762138, 7.9903622484458108, 2.8637706627643018], [6.7086644656231211,
7.1610635332507933, 8.5126900549974387, 4.5867101480873647], [6.938262297361681, 1.6056656321647367, 8.835559785969167,
4.3012342160895054]], [[3.2240874016087622, 6.0458594642954271, 1.6480880325675811, 0.5636107771214558],
[-0.64090360096238008, 8.1871684489250356, 2.8012498200014182, 1.496868057953689], [6.3651185454363821,
9.1662142673624345, 2.5215535343021571, 1.2785132984999361]]], [[[5.1131069988602782, 5.4920916374703204,
2.2172858497250401, 5.1502022726587979], [4.8220761383712656, 0.78571981512705946, 0.49848571914851192,
6.8591152274782372], [2.6091147193713331, 3.8555079632644569, 9.1839276932835574, 3.6125326276563117]],
[[8.4114253324005279, 7.5760643534199588, 2.7105195863876137, 6.5723807899333382], [6.4768943624853978,
0.29796913032120287, 2.2482295722627068, 7.0270824162300194], [-0.32141741964399806, 0.57985522235766496,
4.1063327248837362, 0.080919381319258576]]]])+(1.-msk_ref)*numpy.array([[[[0.70723868548106505, 0.20445294520369339,
-7.6812796176236526, -1.4531118757745078], [0.9545542728434695, -7.7193587702212083, 0.0088637904078510132,
-2.9243009658748136], [-6.2072184465847746, -7.0872629561829159, 0.72042967281775905, 1.4059008175758159]],
[[-1.4688467205417512, -1.5097077302032837, -6.8986063566310891, -1.6423974156531664], [-3.3833233139950014,
-7.3817506884939954, -4.898469050369032, -4.3936332624735321], [-4.0393526930895307, -6.0896838341668067,
-5.0100714059768805, -7.2832511059745046]]], [[[1.277462753185076, -7.2715207227103038, 0.71296475875574483,
-4.4136268269257641], [-0.56873302406694437, -0.11633395643927313, 1.2352925653073719, -2.6906873416027013],
[-0.33913519232838496, -5.6717318575253293, 1.558162296279102, -2.9761632736005605]], [[-4.0533100880813038,
-1.2315380253946389, -5.6293094571224849, -6.7137867125686101], [-7.918301090652446, 0.90977095923496876,
-4.4761476696886477, -5.7805294317363769], [-0.91227894425368383, 1.8888167776723686, -4.7558439553879088,
-5.9988841911901298]]], [[[-2.1642904908297877, -1.7853058522197456, -5.0601116399650259, -2.127195217031268],
[-2.4553213513188004, -6.4916776745630065, -6.778911770541554, -0.41828226221182874], [-4.6682827703187328,
-3.4218895264256091, 1.9065302035934915, -3.6648648620337543]], [[1.134027842710462, 0.29866686372989282,
-4.5668779033024522, -0.70501669975672776], [-0.8005031272046681, -6.9794283593688631, -5.0291679174273591,
-0.250315073460047], [-7.598814909334064, -6.697542267332401, -3.1710647648063297, -7.1964781083708074]]]])
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_expandedData_rank1_Symbol_rank0(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*numpy.array([0.57185536765716005,
-4.5016440600070959])+(1.-msk_arg0)*numpy.array([-0.4418100919929735, 1.7838290839713755])
arg1=Symbol(shape=())
res=arg0-arg1
s1=numpy.array(4.01685432532)
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([-3.4449989576654145,
-8.5184983853296714])+(1.-msk_ref)*numpy.array([-4.4586644173155481, -2.2330252413511991])
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_expandedData_rank1_Symbol_rank1(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*numpy.array([-4.1734209340603439,
4.5527582003296185])+(1.-msk_arg0)*numpy.array([-1.7000682822887789, 0.76683988376374757])
arg1=Symbol(shape=(2,))
res=arg0-arg1
s1=numpy.array([-1.5016152385157842, 0.80809700227400683])
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([-2.6718056955445597,
3.7446611980556117])+(1.-msk_ref)*numpy.array([-0.19845304377299477, -0.041257118510259261])
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_expandedData_rank2_Symbol_rank0(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*numpy.array([[3.3500126396534871, 3.4943903203535527, -1.7005861531401179, 1.4952347206139418,
-4.5979578172283739], [-2.3055331093587372, -3.6474162865795225, -3.0632961186256935, 4.7258384683418715,
-0.58388337502415943], [4.7641302227265427, -0.11182220465882864, 2.8628458472454756, 1.6967713595739653,
2.8474759788446562], [2.5863322473986914, 1.6349340161801535, -2.9934700314340712, 3.4068691472223609,
-0.97913156666695667]])+(1.-msk_arg0)*numpy.array([[-0.34407378508566389, 2.6789454460601672, -3.3795587578901665,
-4.1659261688389009, 2.3147542825953309], [-2.0615148857755603, -2.1181768528675784, 4.7855957803525566,
2.4248630846228734, 4.4597452365342818], [4.5985091304874572, 2.9992334161018466, 0.73974708846994552,
-0.24440017509511858, -0.49166350583553875], [1.5878740787090537, 3.0210382196579779, 3.6343442933400869,
1.5494651243470852, -3.3635312675197349]])
arg1=Symbol(shape=())
res=arg0-arg1
s1=numpy.array(-3.53998589595)
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([[6.8899985356048354, 7.0343762163049011, 1.8393997428112305, 5.0352206165652902,
-1.0579719212770256], [1.2344527865926112, -0.10743039062817417, 0.47668977732565487, 8.2658243642932199,
2.9561025209271889], [8.3041161186778911, 3.4281636912925197, 6.402831743196824, 5.2367572555253137,
6.3874618747960046], [6.1263181433500398, 5.1749199121315019, 0.54651586451727718, 6.9468550431737093,
2.5608543292843917]])+(1.-msk_ref)*numpy.array([[3.1959121108656845, 6.2189313420115155, 0.16042713806118192,
-0.6259402728875525, 5.8547401785466793], [1.4784710101757881, 1.42180904308377, 8.325581676303905, 5.9648489805742217,
7.9997311324856302], [8.1384950264388056, 6.539219312053195, 4.2797329844212939, 3.2955857208562298,
3.0483223901158096], [5.1278599746604021, 6.5610241156093263, 7.1743301892914353, 5.0894510202984335,
0.17645462843161352]])
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_expandedData_rank2_Symbol_rank2(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*numpy.array([[2.0867998826855514, 2.311725929216629, -3.4719596731221403, 1.7817832811577139,
1.5141982978301929], [3.1010865709749673, -2.1704923524391537, 3.7204405507466163, 4.629811066660821,
1.6635344950905893], [-2.574527711983543, -1.6203338172344193, 3.7119433126415871, -4.2495237660622687,
-2.1154248806831588], [0.14708606411584846, -4.3739162090051034, 0.28212084215683131, -3.2454357930486841,
4.0490170686662843]])+(1.-msk_arg0)*numpy.array([[2.5664289274057825, -3.8876267007915413, 3.0606117231617365,
0.45673258502894409, -2.4098041248367421], [2.4831763479900646, -4.4003484897067748, -3.6829879581883054,
-4.4939546625771341, 0.095882545889256932], [4.8837698588887477, 4.688682977288769, -1.7221295814057069,
-2.8466915452782313, -1.7320653753684723], [0.9117963691890596, -0.77307239329958843, -4.179217925450148,
-2.8549317288801146, 2.3840070557997581]])
arg1=Symbol(shape=(4, 5))
res=arg0-arg1
s1=numpy.array([[3.235357796595256, -1.480870361715898, 4.6623764990451662, -0.75539625063053251,
1.764045470290668], [0.076611613190003425, -4.3375283365611939, -0.16726979393963415, 3.2199460507232871,
-3.1622960810628884], [0.33570324662007156, 1.8340432936621101, 3.3105489280357343, -4.5476113052695135,
1.6510039686145541], [1.9731991965232831, -1.2055959073743616, 3.1802938969891557, 1.6969195493915894,
4.7202410276309497]])
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([[-1.1485579139097046, 3.792596290932527, -8.1343361721673055, 2.5371795317882464,
-0.24984717246047516], [3.0244749577849639, 2.1670359841220401, 3.8877103446862504, 1.4098650159375339,
4.8258305761534777], [-2.9102309586036146, -3.4543771108965293, 0.40139438460585275, 0.29808753920724484,
-3.7664288492977129], [-1.8261131324074347, -3.1683203016307417, -2.8981730548323243, -4.9423553424402735,
-0.67122395896466536]])+(1.-msk_ref)*numpy.array([[-0.66892886918947347, -2.4067563390756432, -1.6017647758834297,
1.2121288356594766, -4.1738495951274102], [2.4065647348000612, -0.062820153145580981, -3.5157181642486712,
-7.7139007133004212, 3.2581786269521453], [4.5480666122686761, 2.854639683626659, -5.0326785094414408,
1.7009197599912822, -3.3830693439830264], [-1.0614028273342235, 0.43252351407477319, -7.3595118224393037,
-4.5518512782717035, -2.3362339718311915]])
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_expandedData_rank3_Symbol_rank0(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*numpy.array([[[-4.4842044121065818, 4.4928465492805714], [-0.8488426741388917,
-4.6899835951885827]], [[-3.8520901512994188, -4.2130527577972394], [-1.9136871018997637, -1.6373131250552273]],
[[-4.193784678832186, -0.65999502399047039], [-0.57202165743856082, -2.5346604397115016]], [[-3.7518661140733869,
-4.2333127035460327], [-1.185325910878734, 0.27013359391463077]], [[4.7175093778110231, -2.5123249429723304],
[3.8200721640724424, 4.5930030128887935]], [[-4.7065467532244636, -1.7055937731234607], [-1.331474083968188,
-4.3474657122786562]]])+(1.-msk_arg0)*numpy.array([[[3.7753100517410338, 3.7909180149825001], [0.99488027773324017,
-3.0286171370440904]], [[3.6374742681576677, 1.145681069564648], [-0.002113196116643401, -0.3884450840387661]],
[[-2.5595107128502024, -3.4455619811895488], [-4.5771852308072871, 1.3642969267838581]], [[-0.18445690526205638,
0.49675060587106934], [-3.8670923300147821, -4.1547783162827496]], [[-3.9389053222472947, 1.3272580810242838],
[3.5990473792265725, 2.2029039321052881]], [[-4.5403710311302428, -0.87738273585574866], [3.383530992594503,
3.353845130538776]]])
arg1=Symbol(shape=())
res=arg0-arg1
s1=numpy.array(-4.30522721091)
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([[[-0.17897720120150851, 8.7980737601856447], [3.4563845367661816,
-0.38475638428350933]], [[0.45313705960565454, 0.092174453107833898], [2.3915401090053097, 2.667914085849846]],
[[0.11144253207288735, 3.6452321869146029], [3.7332055534665125, 1.7705667711935718]], [[0.55336109683168644,
0.071914507359040591], [3.1199013000263394, 4.5753608048197041]], [[9.0227365887160964, 1.7929022679327429],
[8.1252993749775158, 8.8982302237938669]], [[-0.40131954231939027, 2.5996334377816126], [2.9737531269368853,
-0.042238501373582871]]])+(1.-msk_ref)*numpy.array([[[8.0805372626461072, 8.0961452258875735], [5.3001074886383135,
1.2766100738609829]], [[7.942701479062741, 5.4509082804697213], [4.3031140147884299, 3.9167821268663072]],
[[1.7457164980548709, 0.8596652297155245], [-0.2719580199022138, 5.6695241376889314]], [[4.1207703056430169,
4.8019778167761427], [0.43813488089029118, 0.15044889462232369]], [[0.36632188865777859, 5.6324852919293571],
[7.9042745901316458, 6.5081311430103614]], [[-0.23514382022516944, 3.4278444750493247], [7.6887582034995763,
7.6590723414438493]]])
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_expandedData_rank3_Symbol_rank3(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*numpy.array([[[2.0747863514072602, 3.5162387004014164], [0.60187542749819123,
-1.4464372144424988]], [[-0.79722414010768361, -3.2668034589151995], [3.6997621801618905, -2.3676159172001379]],
[[-4.2988464154229238, 2.9969641407457024], [2.6364527127840898, 1.1108844479920323]], [[-1.1644293703177389,
4.7657018637717314], [3.954999187366635, -3.1405571932690379]], [[1.9169748083366978, 3.5980880196573022],
[4.6505164496107305, -3.4906561986190576]], [[-4.799933282554286, 3.4274402953401566], [-3.2690807817244751,
3.3152263479112811]]])+(1.-msk_arg0)*numpy.array([[[-0.19327777478115316, 1.1303927856512574], [0.070720562815962396,
0.2691348667587512]], [[-2.2293468476711373, 4.4261476420776269], [-2.1677478226049174, 3.9963032240053238]],
[[-3.0163006244468358, 0.039611843610902753], [-1.7062014469652445, -0.85393662791937786]], [[2.3270289055995583,
1.3694144393292564], [1.7400166142803206, 1.0276232159123833]], [[0.34573570990013103, -3.0575470737366093],
[-0.16902625990476849, -2.0541180978179363]], [[1.4322948650591076, 1.3766358910177399], [-1.2248059444270067,
3.8606015627846109]]])
arg1=Symbol(shape=(6, 2, 2))
res=arg0-arg1
s1=numpy.array([[[-0.065448649070245501, -1.0082288113108073], [2.940556380714975, 2.6943538184190166]],
[[4.4012174975531551, 2.1536331083101858], [0.42620647125632161, -3.3632985397458972]], [[0.7816306898353016,
-4.1519556164933835], [4.587159382474109, 3.7392943755961223]], [[1.0899221203445091, -3.3294088440228276],
[4.0864323956812836, -1.4550107947057112]], [[3.7465493005246273, 3.1852429656365171], [-3.8082443104157484,
-2.7860725384289688]], [[-4.7267420836233232, 0.9545260667209563], [-4.6866116848499395, 0.18931611034152862]]])
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([[[2.1402350004775057, 4.5244675117122242], [-2.3386809532167838, -4.1407910328615154]],
[[-5.1984416376608387, -5.4204365672253854], [3.2735557089055689, 0.99568262254575934]], [[-5.0804771052582254,
7.1489197572390859], [-1.9507066696900193, -2.62840992760409]], [[-2.2543514906622479, 8.095110707794559],
[-0.13143320831464855, -1.6855463985633268]], [[-1.8295744921879296, 0.41284505402078508], [8.4587607600264789,
-0.70458366019008878]], [[-0.07319119893096282, 2.4729142286192003], [1.4175309031254644,
3.1259102375697525]]])+(1.-msk_ref)*numpy.array([[[-0.12782912571090765, 2.1386215969620648], [-2.8698358178990127,
-2.4252189516602654]], [[-6.6305643452242924, 2.2725145337674411], [-2.593954293861239, 7.359601763751221]],
[[-3.7979313142821374, 4.1915674601042863], [-6.2933608294393535, -4.5932310035155002]], [[1.2371067852550492,
4.698823283352084], [-2.346415781400963, 2.4826340106180944]], [[-3.4008135906244963, -6.2427900393731264],
[3.6392180505109799, 0.73195444061103254]], [[6.1590369486824308, 0.42210982429678356], [3.4618057404229328,
3.6712854524430822]]])
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_expandedData_rank4_Symbol_rank0(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*numpy.array([[[[-2.3915780501354655, -1.5713847750997409, -2.0965030994469647,
-2.1551686098873679], [3.3436095870548073, 3.0694003316976595, -0.81504636610563619, -2.4693923873770118],
[-2.319226534159692, -3.4872612814422221, -0.52829274680925575, 0.30144897282215677]], [[-4.8403842893778295,
2.5434758702608882, -0.65369410807785577, 2.38720324090216], [-4.436083252026509, -1.9729315005424199,
2.1429634466708958, -4.4793507786187812], [-4.6789538924900809, -1.2258958461641156, 4.6704546977809684,
-4.1766942543918724]]], [[[-0.99705682132654605, 0.63139391938552247, 3.648090293364568, 0.9177900253507385],
[-2.6400178360936186, -4.9693099498708015, -2.0831389743339668, -4.8827387011810046], [0.92728596167706101,
-0.70240094092455596, -4.2968994749977032, -1.8662843872126853]], [[4.4680529533242801, -0.62442225881816338,
3.3149308543703011, -4.9724764458198436], [-2.5558868654124858, 0.54392866523319405, -4.0956874823606899,
-3.1974475782258649], [-1.1195425378333166, 1.4289844369376148, -1.2628376022909036, -2.0331849636301413]]],
[[[0.15561155815826755, 1.0566531457111576, -1.6429274914523804, -3.1609988875067607], [-3.9169786589191657,
-0.0862972748194899, -2.2849500782464229, -3.593674280376046], [4.5374768908066745, 1.9914470295786906,
-2.6711697877422749, -4.6476473568202383]], [[4.7467779338409635, 1.4258515187294085, 1.3579719892502204,
3.8840821581627232], [-0.74410865296963191, -4.1369032807050301, 0.10505268333553275, 0.20183690831668066],
[3.2258506139241625, -0.19145900822910011, -1.9876999864961387,
-1.118642852906222]]]])+(1.-msk_arg0)*numpy.array([[[[-4.1006205365267823, 0.98436034016399709, -2.0659912052394889,
-4.829130675843226], [-3.2904658009023189, 1.8958877020661227, -4.4668040074389035, 4.7329845357071445],
[-1.7781671949524789, -0.77841298536212911, -2.7909279205249824, -3.9295744077558559]], [[-1.3638768752460773,
4.4638784666445588, 2.5119633402011221, 3.4658456524464523], [-2.3171958056993116, -2.6568253529176222,
2.2419660036743707, -1.9675169728612851], [-1.1143935824519682, 1.186871291556292, 1.8459225649295181,
3.4239497313955702]]], [[[3.2057908235968178, 1.0054526017670806, 2.8530443499731035, 2.9117538931912392],
[3.7690204040343271, -0.2740720613800427, -1.6755030321386211, -4.0975186439069002], [1.5575983081276483,
-1.4138189638564769, -4.5799310621318954, -2.1831715499767892]], [[4.2307751851678876, -4.574159194957832,
3.6142529396906227, 2.9695212799242778], [1.1073212833019772, 3.5565535386979832, 4.1163170572300576,
-2.6051576587226011], [-2.4885332002171778, 2.7193644049811461, 3.6891018981647203, 2.2279362582877482]]],
[[[3.8371440925068896, 0.098907118846149444, -4.8484985466419248, -3.2646614116360695], [-1.3166337092696869,
3.8989945382792683, 2.4522596196795661, 4.8579102565531542], [2.8065577922030176, -2.6140964300168221,
0.26485552888380326, -3.2455906809923696]], [[3.4179005303801677, -4.9507538637080364, -3.9286015574556798,
0.67686821786057827], [1.2296342635912527, -1.0149250475477691, 1.9729311750755993, -4.6474538783990385],
[-1.2276432760256037, 2.5170369379074629, 0.97453825295943464, 3.8596939709877667]]]])
arg1=Symbol(shape=())
res=arg0-arg1
s1=numpy.array(3.09580908291)
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([[[[-5.4873871330441668, -4.6671938580084422, -5.1923121823556659, -5.2509776927960692],
[0.24780050414610599, -0.026408751211041803, -3.9108554490143375, -5.5652014702857127], [-5.4150356170683933,
-6.5830703643509239, -3.624101829717957, -2.7943601100865445]], [[-7.9361933722865308, -0.5523332126478131,
-3.749503190986557, -0.70860584200654131], [-7.5318923349352103, -5.0687405834511212, -0.95284563623780549,
-7.5751598615274824], [-7.7747629753987821, -4.3217049290728173, 1.5746456148722672, -7.2725033373005736]]],
[[[-4.0928659042352473, -2.4644151635231788, 0.55228121045586676, -2.1780190575579628], [-5.7358269190023199,
-8.0651190327795028, -5.1789480572426676, -7.9785477840897059], [-2.1685231212316403, -3.7982100238332572,
-7.3927085579064045, -4.9620934701213866]], [[1.3722438704155788, -3.7202313417268646, 0.21912177146159983,
-8.0682855287285449], [-5.651695948321187, -2.5518804176755072, -7.1914965652693912, -6.2932566611345662],
[-4.2153516207420179, -1.6668246459710865, -4.3586466851996049, -5.1289940465388426]]], [[[-2.9401975247504337,
-2.0391559371975436, -4.7387365743610816, -6.256807970415462], [-7.012787741827867, -3.1821063577281912,
-5.3807591611551242, -6.6894833632847472], [1.4416678078979732, -1.1043620533300107, -5.7669788706509761,
-7.7434564397289396]], [[1.6509688509322622, -1.6699575641792928, -1.7378370936584808, 0.7882730752540219],
[-3.8399177358783332, -7.2327123636137314, -2.9907563995731685, -2.8939721745920206], [0.13004153101546123,
-3.2872680911378014, -5.0835090694048404, -4.2144519358149228]]]])+(1.-msk_ref)*numpy.array([[[[-7.1964296194354835,
-2.1114487427447042, -5.1618002881481901, -7.9249397587519272], [-6.3862748838110202, -1.1999213808425786,
-7.5626130903476048, 1.6371754527984432], [-4.8739762778611802, -3.8742220682708304, -5.8867370034336837,
-7.0253834906645576]], [[-4.459685958154779, 1.3680693837358575, -0.58384574270757916, 0.37003656953775099],
[-5.4130048886080129, -5.7526344358263231, -0.85384307923433056, -5.063326055769986], [-4.2102026653606694,
-1.9089377913524093, -1.2498865179791832, 0.32814064848686897]]], [[[0.1099817406881165, -2.0903564811416206,
-0.24276473293559775, -0.18405518971746204], [0.67321132112562587, -3.369881144288744, -4.7713121150473228,
-7.1933277268156015], [-1.5382107747810529, -4.5096280467651777, -7.6757401450405967, -5.2789806328854905]],
[[1.1349661022591864, -7.6699682778665332, 0.5184438567819214, -0.12628780298442344], [-1.9884877996067241,
0.46074445578928191, 1.0205079743213563, -5.7009667416313023], [-5.5843422831258795, -0.3764446779275552,
0.59329281525601907, -0.86787282462095305]]], [[[0.74133500959818832, -2.9969019640625518, -7.9443076295506261,
-6.3604704945447708], [-4.4124427921783882, 0.80318545537056707, -0.6435494632291352, 1.762101173644453],
[-0.28925129070568367, -5.7099055129255234, -2.830953554024898, -6.3413997639010713]], [[0.32209144747146645,
-8.0465629466167385, -7.0244106403643816, -2.418940865048123], [-1.8661748193174486, -4.1107341304564704,
-1.1228779078331019, -7.7432629613077397], [-4.323452358934305, -0.57877214500123841, -2.1212708299492666,
0.76388488807906541]]]])
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_expandedData_rank4_Symbol_rank4(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*numpy.array([[[[4.7227008802641297, 2.8863279383915197, -2.1896969198324809, 4.4302312776864046],
[-2.0197165879770251, -4.9702841021278301, 4.7902292515365534, 4.5956841306054539], [2.93934146754248,
1.646059013328439, 0.99083597111362476, 3.3910212199665182]], [[1.2787313823171234, 1.3787912835332463,
-0.071420926539018659, -3.2388329800378415], [3.349975825307892, -1.7816480803027677, 2.2965490165680036,
-1.9018094630451774], [-1.1200280993595113, -0.66137198885848481, 0.21843601647543931, -1.4087126883569634]]],
[[[-1.9454924644698748, 0.7634003631723596, 4.1323447887625271, 1.443298202567032], [-3.7655189569283309,
0.99421618018940361, 4.6046246816957677, 0.74421638400845325], [-3.5622774770791485, 1.2397714154717114,
-1.0582462113989424, 1.2971002709503896]], [[1.3379013865706302, -0.39532127021252883, 4.7111583084640927,
-0.88268548014728054], [-4.8134271904014936, -3.1339427094271266, 2.1759548242117912, -1.5082214108365442],
[-3.2684776044041417, -1.1500337944886265, -4.7387724227769104, -0.87161000308836023]]], [[[-0.77862807704750203,
4.0600646358304022, -0.60854424079166769, -2.1879855865994302], [-3.1756330451114421, 2.3997197078889831,
3.48971886859092, 1.3336201149028941], [-3.1489389998309738, -3.126335897832373, 4.4951625025311497,
4.8070472911835367]], [[-0.48575660505239959, 2.6019068715787999, 2.0846243088005867, -4.1696787529733781],
[4.548381761398181, 2.8671236182352331, -2.4623488980316131, -2.7420314450035965], [-2.6608024266156169,
1.9100775920001078, -4.7415568715485605, 4.335606352714283]]]])+(1.-msk_arg0)*numpy.array([[[[-0.066813883761502879,
2.3352842907581648, 1.9822821047204409, 2.2488083940398429], [-3.5461097023132315, 3.9394596449218593,
-3.1392993492194812, -1.195903516669877], [-2.4035485197244641, -1.2564828050011667, 4.2913267573861269,
3.3717435374804552]], [[2.0596631449037996, 1.766648199074103, 3.7051853214349446, 2.3190764926321314],
[2.0765914923551589, -3.0883600344375619, 3.3829116246346622, -0.77543432130851642], [-1.0720358718139797,
1.757742252759785, 0.37504357425332913, 2.5917331896567894]]], [[[-4.2390464570087687, 0.22513794491672012,
2.4848124269039893, 0.40807584912367822], [3.4683686407549388, 0.16480295091112396, 2.9378502257107231,
-4.5549662855752659], [-3.9215318218115671, -0.029245135802563382, -2.553738608483358, 2.451028936659565]],
[[-3.607109515954888, -3.993893382807582, 0.073725334847121182, -2.1709804092290019], [0.33344114744651687,
-0.58055248058879805, -2.3870396661749513, 4.2208499406342153], [2.5521039977169968, 0.99728976648084799,
-2.2262756306598854, -1.1032924331956737]]], [[[4.966005471135805, -2.2223123277417969, 3.9211131181159882,
1.2020059492898092], [-1.9322416802383922, 1.9213246599314813, -4.9244981020217091, 3.9198631295221293],
[0.441626539964318, -4.8403835386932315, 0.067761940060105275, -3.3198352912434692]], [[3.7289362256010783,
4.4966066758919183, 4.7606849535179343, -4.2473703353049705], [-4.1395576165491477, 1.394341710895656,
1.1616039705755199, 3.875922408907579], [3.2845443210135308, 4.7114056834057489, 1.3775615753886044,
-3.9913085015805105]]]])
arg1=Symbol(shape=(3, 2, 3, 4))
res=arg0-arg1
s1=numpy.array([[[[-1.3801587913318047, -1.0654264899468902, 0.63456255053258914, 0.019001978473544234],
[2.9683175293448718, 3.6853848061948291, -3.378954186340446, 0.041808892524110242], [-0.43021730340079323,
3.8311628728062068, -2.1793361115232388, 3.1444277139479642]], [[4.3924534836876852, 4.6258828194696662,
-1.337885612990688, 4.1259618924749724], [-4.4897704263159222, 0.15369009386017485, 2.6962829695946411,
2.0341337355139855], [-2.3651800760864128, 2.5627722574362446, 4.595047097795911, -4.3290029697233141]]],
[[[4.178160967558556, -4.7760981608958755, -1.9186656311572339, 1.2206352911034513], [-0.085772738154105355,
-1.3074976307569877, -1.956712770838227, 3.2327542991796854], [1.1038448851539249, 3.3037436446982742,
-0.27900134960503031, -0.91401651221828928]], [[-2.3182986051061674, -1.849076104179066, 1.7255801749968178,
-4.800626013966669], [-1.065938515473257, -1.7027357609246607, -4.5450782032387052, 4.2110109611691939],
[0.42483398312984555, -2.0300778021420198, -1.8600131931716146, -0.65565335271777681]]], [[[1.5409796879232402,
3.8526060616448135, 4.5046749363240419, -2.3425879056067043], [-2.0020629040094438, 4.0201378578644089,
-0.91649358299543415, -4.6048272916989044], [2.9771669103984353, -0.18712141889004474, 4.7604346415721732,
3.1208517541066438]], [[-2.0397258049700016, 4.8476984980922424, -0.83670507076672873, -0.65429392708528322],
[1.8648243434781451, -3.0663012456143979, -0.46932892583376429, 1.7337690374943984], [-2.830192293285001,
2.4237706377557764, -4.5545542759999593, -1.4623880986500994]]]])
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([[[[6.1028596715959349, 3.9517544283384098, -2.8242594703650701, 4.4112292992128603],
[-4.9880341173218969, -8.6556689083226601, 8.169183437876999, 4.5538752380813436], [3.3695587709432733,
-2.1851038594777679, 3.1701720826368636, 0.24659350601855401]], [[-3.1137221013705618, -3.2470915359364199,
1.2664646864516693, -7.3647948725128138], [7.8397462516238141, -1.9353381741629425, -0.3997339530266375,
-3.9359431985591629], [1.2451519767269015, -3.2241442462947294, -4.3766110813204717, 2.9202902813663507]]],
[[[-6.1236534320284308, 5.5394985240682351, 6.051010419919761, 0.22266291146358075], [-3.6797462187742256,
2.3017138109463913, 6.5613374525339943, -2.4885379151712321], [-4.6661223622330734, -2.0639722292265628,
-0.77924486179391206, 2.2111167831686789]], [[3.6561999916767975, 1.4537548339665372, 2.9855781334672749,
3.9179405338193884], [-3.7474886749282366, -1.4312069485024659, 6.7210330274504964, -5.7192323720057381],
[-3.6933115875339873, 0.8800440076533933, -2.8787592296052957, -0.21595665037058343]]], [[[-2.3196077649707423,
0.20745857418558877, -5.1132191771157096, 0.15460231900727406], [-1.1735701411019983, -1.6204181499754258,
4.4062124515863541, 5.9384474066017985], [-6.1261059102294091, -2.9392144789423282, -0.26527213904102354,
1.6861955370768928]], [[1.553969199917602, -2.2457916265134426, 2.9213293795673154, -3.5153848258880949],
[2.6835574179200359, 5.9334248638496305, -1.9930199721978488, -4.4758004824979949], [0.16938986666938405,
-0.51369304575566854, -0.18700259554860121, 5.7979944513643824]]]])+(1.-msk_ref)*numpy.array([[[[1.3133449075703019,
3.4007107807050549, 1.3477195541878517, 2.2298064155662987], [-6.5144272316581038, 0.25407483872703018,
0.2396548371209648, -1.2377124091939873], [-1.9733312163236709, -5.0876456778073731, 6.4706628689093657,
0.22731582353249102]], [[-2.3327903387838855, -2.8592346203955632, 5.0430709344256321, -1.8068853998428409],
[6.5663619186710811, -3.2420501282977368, 0.68662865504002113, -2.8095680568225019], [1.293144204272433,
-0.80503000467645958, -4.2200035235425819, 6.9207361593801036]]], [[[-8.4172074245673247, 5.0012361058125956,
4.4034780580612232, -0.81255944197977303], [3.5541413789090441, 1.4723005816681116, 4.8945629965489506,
-7.7877205847549513], [-5.025376706965492, -3.3329887805008376, -2.2747372588783277, 3.3650454488778543]],
[[-1.2888109108487207, -2.144817278628516, -1.6518548401496966, 2.6296456047376671], [1.3993796629197739,
1.1221832803358627, 2.1580385370637538, 0.0098389794650213247], [2.1272700145871513, 3.0273675686228678,
-0.36626243748827081, -0.44763908047789691]]], [[[3.4250257832125648, -6.0749183893866103, -0.58356181820805375,
3.5445938548965135], [0.069821223771051599, -2.0988131979329276, -4.008004519026275, 8.5246904212210346],
[-2.5355403704341173, -4.6532621198031867, -4.6926727015120679, -6.4406870453501135]], [[5.7686620305710798,
-0.35109182220032409, 5.597390024284663, -3.5930764082196873], [-6.0043819600272927, 4.4606429565100534,
1.6309328964092842, 2.1421533714131806], [6.1147366142985318, 2.2876350456499726, 5.9321158513885637,
-2.5289204029304111]]]])
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_mult_overloaded_constData_rank0_Symbol_rank0(self):
arg0=Data(4.62465145684,self.functionspace)
arg1=Symbol(shape=())
res=arg0*arg1
s1=numpy.array(-4.83968289402)
sub=res.substitute({arg1:s1})
ref=Data(-22.3818465465,self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_mult_overloaded_constData_rank0_Symbol_rank1(self):
arg0=Data(3.61282962415,self.functionspace)
arg1=Symbol(shape=(2,))
res=arg0*arg1
s1=numpy.array([-1.5808963858957537, 2.6509696096021864])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([-5.7115092956749542, 9.5775015382906385]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_mult_overloaded_constData_rank0_Symbol_rank2(self):
arg0=Data(1.49025589385,self.functionspace)
arg1=Symbol(shape=(4, 5))
res=arg0*arg1
s1=numpy.array([[4.1668790679299175, -2.8153908003971773, 3.9030969110369043, -0.17962827030748763,
2.1351246321738504], [-3.6496148227091672, 1.387147904486504, -1.1823464175604426, 1.1752089388333173,
-1.7608073918316345], [1.1775740286573697, 1.5139307880954602, -4.5926484431964676, -1.3565377974967943,
2.5163302873137585], [-4.9066894975281929, -0.71152046468118701, 3.1503673258369584, -1.582645414456433,
4.0919813724626888]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[6.2097160899280084, -4.1956527337729987, 5.8166131759266637, -0.26769208852717874,
3.1818820671938379], [-5.4388599998117435, 2.0672053402977806, -1.7619987173376985, 1.7513620475973966,
-2.6240535936055083], [1.7548866366471667, 2.2561442798348774, -6.8442214108383101, -2.0215884479450947,
3.7499760415336878], [-7.3122229429659207, -1.0603475660835118, 4.6948534751098423, -2.3585466567627744,
6.0980993578224529]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_mult_overloaded_constData_rank0_Symbol_rank3(self):
arg0=Data(-4.8189372207,self.functionspace)
arg1=Symbol(shape=(6, 2, 2))
res=arg0*arg1
s1=numpy.array([[[-2.1082618280451539, -1.3955382894516588], [-3.4395290563777836, 4.8101517504101992]],
[[-2.0990702249254642, -0.19805771431177988], [3.5892783984855807, 2.3466998684539329]], [[1.0198837335276068,
1.3499161645518498], [2.1694219720539518, -4.6399343230104897]], [[0.39865646301668001, -4.6889954108117458],
[-1.8077039797673278, -2.0729006873515732]], [[-1.4351307436857752, -0.27135779329323384], [-0.11640854123168598,
3.5462285443877146]], [[-4.7284739075521287, -2.9968208233684992], [-0.63458679250348737, -0.3430241153835798]]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[10.159581394141497, 6.7250114059464261], [16.574874591447742, -23.179819307252547]],
[[10.11528763575015, 0.95442769134320948], [-17.296507269905895, -11.308599341897418]], [[-4.9147556842796076,
-6.5051612501794489], [-10.454308288528681, 22.359552210744798]], [[-1.9211004679024974, 22.595974512838154],
[8.7112119921028786, 9.9891782770868964]], [[6.9158049573139522, 1.307656170226968], [0.56096545214841331,
-17.089052725648113]], [[22.786218910197551, 14.441491409490299], [3.058033914157781,
1.6530116772185961]]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_mult_overloaded_constData_rank0_Symbol_rank4(self):
arg0=Data(-0.0961090764765,self.functionspace)
arg1=Symbol(shape=(3, 2, 3, 4))
res=arg0*arg1
s1=numpy.array([[[[-1.3508458008250446, -1.7001494994927766, -1.1781435318758104, 0.94544141796815939],
[0.48819490611616523, -1.7587019774587032, -3.7925425828521075, 4.7932646338579481], [0.58005604545753187,
-1.1056764808385546, -0.51753380988709718, 0.78769257503235046]], [[2.2158543714442409, 2.8083791437737293,
4.2821497276046312, 3.557157624087198], [1.3939326777613186, 1.1701609581270382, 0.2011137853628675,
2.0268107124192767], [4.9657490768337222, -0.36506867019444478, 1.9579179042357691, 2.3678413818675184]]],
[[[4.6392916421533705, 4.4042259724661363, -0.16399061756687949, -2.3651374585729892], [-3.2439831086523521,
-0.75751658111162179, 0.90117784493079078, 3.4349562957397168], [2.5698732564560274, 2.3597838208145365,
-4.9703254679114597, 3.3611480514859409]], [[3.5875371307296735, 3.7296897049486226, 0.57748787491421005,
-3.5628770219331427], [-2.1948129576712505, 4.1072354163079758, 3.4869674239178412, 1.087096416125072],
[-1.8854495462953502, -0.47642374646276409, -4.0405955013288795, 2.2924875059879755]]], [[[4.5285297494026793,
-2.5139281223909449, 2.208483948920561, -2.7970423581490991], [-1.7523437586040416, 1.8001610169726279,
-2.0370614441012389, -1.7571467006380495], [-4.7379091083725093, -1.7199833882948159, -1.9229770651511577,
1.983783387985067]], [[3.6886437141901496, 1.3947955105400336, -1.5553692753039892, -3.1732704417249646],
[-3.1233013224428783, -0.18349036348379588, -2.6033177681431141, -4.8475794865627444], [0.44359684613617745,
2.4458348840139719, -4.4593547045866213, 2.8568083531481321]]]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[[0.12982854237943392, 0.16339979826821033, 0.11323028680532936, -0.090865501543539021],
[-0.046919961567349337, 0.16902722285092533, 0.36449776513566223, -0.4606762372674883], [-0.055748650833525863,
0.10626554545516434, 0.049739696513606095, -0.075704405933744209]], [[-0.21296371724588994, -0.26991072590391707,
-0.41155345565411638, -0.34187513413231108], [-0.13396958233003523, -0.11246308901442943, -0.019328860177915387,
-0.19479490576326494], [-0.47725355778845102, 0.035086412742886867, -0.18817368159287634, -0.22757104845409318]]],
[[[-0.44587803523243974, -0.42328609080747298, 0.015760986805161359, 0.2273111768833927], [0.31177622067789706,
0.072804219026262926, -0.086611370417368036, -0.33013047732063483], [-0.24698814533960797, -0.22679664370263819,
0.4776933905085276, -0.32303683512905357]], [[-0.34479488045953066, -0.35845703308646898, -0.055501826334373089,
0.34242482017728643], [0.21094144640040824, -0.39474260273287426, -0.33512921881633451, -0.10447983259467819],
[0.18120881463745528, 0.045788646284003677, 0.38833790204776181, -0.22032885703438657]]], [[[-0.43523281201138325,
0.2416113101712597, -0.21225535274389759, 0.26882115790732197], [0.16841614030876842, -0.17301181285021075,
0.19578009411842659, 0.16887774663202701], [0.45535606883521218, 0.16530601500391165, 0.18481554981714085,
-0.19065958934863894]], [[-0.35451214082160976, -0.13405250839155117, 0.1494851046293674, 0.30498009156431655],
[0.30017760565777185, 0.017635089376762319, 0.25020246647106092, 0.46589638759990259], [-0.042633683210029791,
-0.23506693191655562, 0.42858446233889236, -0.27456521249137733]]]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_mult_overloaded_constData_rank1_Symbol_rank0(self):
arg0=Data(numpy.array([0.27659058494851418, 4.1541405281225892]),self.functionspace)
arg1=Symbol(shape=())
res=arg0*arg1
s1=numpy.array(3.8719303416)
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([1.0709394780619239, 16.08454275409127]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_mult_overloaded_constData_rank1_Symbol_rank1(self):
arg0=Data(numpy.array([3.7426337922420245, 3.9291817340183464]),self.functionspace)
arg1=Symbol(shape=(2,))
res=arg0*arg1
s1=numpy.array([0.24137850302835329, -1.7566967446888535])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([0.90339134215470884, -6.9023807614409334]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_mult_overloaded_constData_rank2_Symbol_rank0(self):
arg0=Data(numpy.array([[-4.5985565783651303, 1.9500350746471993, 4.9713294753194841, -4.1975843272554876,
-2.7203349447146974], [0.6454726334120906, 4.0369461924736108, -0.05835204361358759, -4.4624467830414529,
-3.7659400185729366], [2.0977327530498648, 0.21380148281940414, -2.0069737126558609, 2.3624658088422841,
4.2025541873980536], [3.414052245462365, -0.88493614463302261, -1.2061553540091854, 4.49473416391168,
2.5601548526811913]]),self.functionspace)
arg1=Symbol(shape=())
res=arg0*arg1
s1=numpy.array(1.33330352327)
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[-6.1312716879061586, 2.5999886355337587, 6.6282911047962196, -5.5966539727668181,
-3.6270321662717726], [0.86061093630488039, 5.3824745816900785, -0.077800985340197651, -5.9497960182490823,
-5.021141095199475], [2.7969144705273297, 0.2850622703241823, -2.6759051222010943, 3.1498839865422879,
5.603280304805077], [4.5519678875143299, -1.179888479511161, -1.608171183115485, 5.9928448969206256,
3.4134634852052463]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_mult_overloaded_constData_rank2_Symbol_rank2(self):
arg0=Data(numpy.array([[-0.80021427988594684, -1.1537754790930244, 2.8892977581295964, 3.7382841198075329,
2.3764815828521835], [0.29503112693550992, -2.1167484120073929, -3.8919266120092, 1.3224523748721193,
2.9631976530881987], [-2.8104707825838995, -2.0632038633268568, -3.0589018293825343, -1.678440560478224,
-4.5543583893018766], [-0.82692491278534597, 3.2768573560743448, 3.9160327845735843, 2.3191381875549339,
4.4585962073227758]]),self.functionspace)
arg1=Symbol(shape=(4, 5))
res=arg0*arg1
s1=numpy.array([[0.84019739061084842, -0.26041818053762533, -4.1862385457734819, 2.5897362481178323,
1.3358121377857461], [-2.9413855411668366, -0.72774497299829122, -3.5296778480396993, -2.4701281082949977,
1.8361878086654535], [2.766871279727253, -4.8815007323834605, -2.5917825490500368, -4.9262737607479554,
-2.1516904953173679], [1.8198771262398861, -3.8256760862124253, 4.2065485224111754, 2.1262330288097564,
0.59770635028954899]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[-0.67233794988971163, 0.3004641110143324, -12.095289645299022, 9.6811698908288335,
3.174532943598229], [-0.86780029096226652, 1.5404530159404959, 13.737247148605071, -3.2666267830530953,
5.4409874052666343], [-7.7762108908439682, 10.071531169886436, 7.9280083806508852, 8.2684576920589663,
9.799569658529764], [-1.5049017338959638, -12.536194825062895, 16.472981923661731, 4.9310282127532963,
2.6649312664937215]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_mult_overloaded_constData_rank3_Symbol_rank0(self):
arg0=Data(numpy.array([[[-0.93968623179119604, 3.5541982898616844], [-4.4072293043240576, -3.6076482066197646]],
[[1.7484917853830488, 4.9003868861327007], [4.7568448118670297, -2.9711396711199276]], [[3.8141950057192862,
1.8792050658033421], [-3.9338669245816726, 1.8697569231208337]], [[2.7319382834348493, 4.8419678958442489],
[1.6265368297291136, 2.7028283208639312]], [[-2.4918167983349662, 0.66893189862375824], [-0.98140319773957252,
-4.1758241797866216]], [[2.3073753559238792, -1.9496085178777891], [-0.68687199404203181,
-1.8466377636332689]]]),self.functionspace)
arg1=Symbol(shape=())
res=arg0*arg1
s1=numpy.array(-4.7863046684)
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[4.497624598049291, -17.011475867168713], [21.094342193976221, 17.267303453273268]],
[[-8.3688143950302809, -23.454744630041624], [-22.767708529872653, 14.22077967833674]], [[-18.255899362045483,
-8.9944479793272372], [18.828685625972383, -8.9492262898982791]], [[-13.075888959772939, -23.175133544101051],
[-7.7851008214498698, -12.936559810022931]], [[11.926594374657277, -3.2017118692216404], [4.6972947069193065,
19.986766766112023]], [[-11.04380143779947, 9.331420350662329], [3.2875786316735858,
8.8385709489135635]]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_mult_overloaded_constData_rank3_Symbol_rank3(self):
arg0=Data(numpy.array([[[-3.7703901363085679, -1.2567782536949124], [-4.6867702962008151, -4.0264117118029379]],
[[4.9065305059289148, -2.9124024019871833], [2.1575041693932544, -2.447359134212459]], [[-4.2014241124001597,
2.1567970243772496], [-0.21332683825097298, -4.1649402688134085]], [[2.4269573018600399, 1.862212593201459],
[-0.8497946023648062, -0.85421311729337468]], [[-3.9387175807922148, 2.8138330952953154], [-3.5224045840887532,
2.3328843983658345]], [[-4.1835084349073455, 1.5103476384019734], [3.5299839973193272,
0.52130047189201001]]]),self.functionspace)
arg1=Symbol(shape=(6, 2, 2))
res=arg0*arg1
s1=numpy.array([[[3.7025504227899226, 0.94126871779523835], [0.2494407260336402, -1.4801201322289135]],
[[2.2922758002791603, -0.52083778427225891], [-3.8446937528722511, 4.7302904730413342]], [[4.5317616448777596,
4.7075623194640848], [3.6913207941396351, -0.94890681756314521]], [[0.10066689821055874, -0.2379214158604368],
[3.5670839439496831, 1.12875998069849]], [[-3.2904800400470879, 0.6205159319494804], [3.0069046365957437,
-0.47921917980699202]], [[-2.06878107963567, -2.1195493051668968], [-1.6911454119040039, 1.930100136733504]]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[-13.960059593272241, -1.182966055408349], [-1.1690713854372303, 5.9595730352818101]],
[[11.247121142072317, 1.5168892139602093], [-8.2949428018620797, -11.576719596675883]], [[-19.039852646439627,
10.153256402690602], [-0.78745779398387905, 3.952140215820322]], [[0.24431426366771691, -0.44306025680762678],
[-3.0312886817506057, -0.9642015817884666]], [[12.960271582979336, 1.7460282654774637], [-10.591534675862574,
-1.1179629479694033]], [[8.6547630966325499, -3.2012562875353661], [-5.9697162411611355,
1.0061621120780087]]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_mult_overloaded_constData_rank4_Symbol_rank0(self):
arg0=Data(numpy.array([[[[0.62243391611320931, 2.0870921150140633, -1.966483871868502, 2.0460162713266978],
[-4.0678614422698969, -1.0483170813789053, -3.3875965478845105, -0.56278682367596566], [-3.1888119343249111,
1.1466037700100813, -0.08422750542817603, 1.334817640609244]], [[0.94229536045356443, -0.46607285357294259,
3.2913807414193705, 4.4571709007297127], [-4.8728928412169248, 1.2037128407211304, 1.7363887141819356,
-1.8638393114984897], [0.2688971714658468, -1.0619718642775746, -1.5898347593643845, 4.0497830306245604]]],
[[[-3.0176256202980776, 4.6481698965552418, 2.9546667018974437, -0.80203271850496982], [0.920018392214903,
2.5683755474820238, -0.39881454964042007, -1.8042576364273657], [-2.3945875160036332, 4.1111523243175156,
0.78082260002804471, -4.7775799631740909]], [[2.8847051100624723, 3.801654962351078, 1.7703173227715148,
4.1273910274214991], [-0.89823810002448035, -0.13134866838888204, -2.9139897570290261, 3.0266778096414111],
[-4.6565287368895367, 4.2608689122302597, 4.4884714229987637, 0.39909756290562726]]], [[[-3.0170682335317931,
3.5429625779103553, -1.5481240066779933, 2.2741378666795491], [0.99402869902853208, -2.7818018223223495,
-4.1871147291249109, -3.2256430704428585], [-1.1555881857999983, -1.3659488684092769, -3.6647624059065964,
-4.1746014766224038]], [[-0.16412546559365726, 2.0500472712024926, 0.70515501953914139, 4.9173519977774696],
[4.8984165381421718, 2.4090796502395673, -4.3641606786463338, -2.9169347489558826], [-2.5705157992905558,
-2.1873999149378887, 0.30099797008132612, -0.40586512460845547]]]]),self.functionspace)
arg1=Symbol(shape=())
res=arg0*arg1
s1=numpy.array(-4.38997505647)
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[[-2.7324693660399006, -9.1622823254735639, 8.6328151464594995, -8.9819603962624388],
[17.857810264753809, 4.6020858385281453, 14.87146434660761, 2.470620118049252], [13.998804851470311,
-5.033561950002353, 0.36975664789865109, -5.8598161472149455]], [[-4.13665312822154, 2.046048201684489,
-14.449079356187196, -19.566869076641478], [21.39187802580896, -5.2842693459222208, -7.6227031436002131,
8.1822080867524907], [-1.1804518754912556, 4.6620299948548602, 6.9793349375236682, -17.778446488570129]]],
[[[13.247301202882928, -20.405349904126975, -12.970913121521617, 3.5209036287121829], [-4.0388577933199716,
-11.2751045891017, 1.7507859250800237, 7.9206460193673678], [10.51217946579799, -18.047856157115579,
-3.4277917376536409, 20.973456868640252]], [[-12.663783478454937, -16.689170458038671, -7.7716488890093069,
-18.119143658691538], [3.9432428538813187, 0.5766173779281577, 12.79234234817573, -13.287040088306636],
[20.442045004695569, -18.705108243592797, -19.704277588657195, -1.7520283462549344]]], [[[13.244854288882133,
-15.55351734304433, 6.7962257736436893, -9.9834085097043221], [-4.3637611941537289, 12.212040612046716,
18.381329219449761, 14.160492620329668], [5.0730033112170725, 5.9964814607344739, 16.088215549830551,
18.326396353088406]], [[0.72050670008820039, -8.9996563851698248, -3.0956129467236795, -21.587052614141587],
[-21.503926418659777, -10.575799573608803, 19.158556521698415, 12.805270789276145], [11.284500241155726,
9.6026310651088593, -1.3213735807060782, 1.7817377733234934]]]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_mult_overloaded_constData_rank4_Symbol_rank4(self):
arg0=Data(numpy.array([[[[0.92475193690629176, -2.8751640037436377, 2.3253579475929946, -1.4883270341411139],
[0.99308590109939665, -4.571048057187971, 0.29582761355193554, 0.13789701080785477], [-4.8513899543422578,
-3.6257472257559966, -0.35978952486470916, 3.2517553292871213]], [[1.4829500707237964, -2.4100404084555849,
-1.3007935048236199, -1.9592333033911915], [-3.2571300156448268, 3.5809336502177818, 4.572845912197149,
0.11594932874247199], [4.1148495529245857, -4.948814678027361, 0.20924193900359267, -1.1213985895158896]]],
[[[-2.5470666325037494, 3.6152403556802923, 3.973428980280806, -2.5374743265932209], [-1.5104931521079923,
1.9242528516494346, 1.2141691608491234, 4.0577747879052453], [3.8342739616484334, -3.0938013228729275,
-0.013278763548063566, -0.42321566529779808]], [[2.6294282632341996, 3.3952741627917256, 2.4458864889971466,
4.2648559736340506], [-3.7590978982471257, 1.9398108969769856, -0.72053544714256734, -2.2852607861733709],
[-3.2806396751883415, 0.51560443280332802, 3.3391590838269085, -2.5163633463539634]]], [[[4.01150830599126,
-0.63639401929707073, -3.1327871719882161, 3.2114179123865725], [2.4499611723235173, 2.7753873236361759,
-4.685579988073755, -2.2124483061230249], [-1.592101861637012, -4.6991157531893437, 1.8862972499478818,
-2.4723033101868044]], [[1.6629673331527997, -3.797885972234567, -2.7734235709739954, -0.64462102158809298],
[-0.88743325197640388, 2.5092803298320216, 1.525489125478364, 2.2445055975567962], [-1.160115769838137,
-1.3355312013577905, -1.0067006671660481, 3.6058946626271364]]]]),self.functionspace)
arg1=Symbol(shape=(3, 2, 3, 4))
res=arg0*arg1
s1=numpy.array([[[[1.1454670721774276, -0.011324883951625786, -3.6679325603974888, 0.094693748129400568],
[-0.94166879518392843, -4.1783274612573384, -0.26913364710175003, -3.1060599945156477], [2.1413905443235972,
3.4882436840724846, 2.2658723445074678, 2.1819015356595139]], [[-1.9800856525493749, 2.7480580905119893,
-3.1758715478375219, 4.1124829257304629], [2.0216531202224841, 4.0515397505305888, 4.044920456333422,
-2.3883049937748946], [1.5444958016374528, -1.2594632181245204, 3.5895692628257976, -1.8412492131846401]]],
[[[-0.79280116164120962, -1.2215336421582137, -4.9961696612945055, -2.7749169673973384], [-0.23664202961414915,
-3.5042031492270143, -4.0093512501017203, 0.23193992235689542], [3.1633334993556197, 1.6180559532355963,
2.5015418045872986, 4.5068540065663516]], [[-4.1132185738946214, 4.1318631987586905, -4.2991786205876377,
0.23269781735957817], [1.6926075754432439, -3.007700180988361, 3.1039787293459948, 3.355203306995362],
[-4.1457163793408522, -1.2887202396242636, -2.7544192723911931, -4.0173861352686675]]], [[[1.4375090967135709,
0.047062319770922123, -0.75634196908244178, -0.17805460405137996], [2.5278812537929483, -3.9412959574596886,
-0.1516720332141368, -2.385490768218621], [2.0634420046107511, 3.7303717046166387, -3.7967716979561583,
-0.36579638919369373]], [[-0.89677692789005903, -0.33159623329473931, -2.0783805922287799, 3.3237758621528677],
[1.8764406996924805, 3.8567013916314448, 2.4876054261100879, -3.122046411865298], [-3.9505368448428069,
-3.9474451391708176, 0.76222063661286921, 2.065165407462576]]]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[[1.0592728936584552, 0.032560898684288464, -8.5292561305554226, -0.14093526530513639],
[-0.93515800400241478, 19.099335624075504, -0.079617164548639499, -0.42831638863356958], [-10.388720575054998,
-12.647489860286688, -0.81523713423442645, 7.0950099465605785]], [[-2.9363681584872698, -6.6229310429171893,
4.1311530815811848, -8.0573135077187672], [-6.584787059098673, 14.508295027869941, 18.496797973906915,
-0.27692236086049277], [6.3553678588617721, 6.2328500602902022, 0.75108843274136661, 2.0647742706124967]]],
[[[2.0193173850265365, -4.4161377189515036, -19.851925322587327, 7.0412805631986641], [0.35744616523310901,
-6.7429729026590115, -4.8680306428853894, 0.94115996924851042], [12.129087268589474, -5.0059436486027034,
-0.033217382128710969, -1.9073712167890253]], [[-10.815413171058387, 14.028808362935354, -10.515302901880693,
0.99242267641760229], [-6.3626775794058616, -5.8343695859208742, -2.2365267016703343, -7.6675145471157151],
[13.600601636143761, -0.6644698681936374, -9.1974441340729562, 10.10920321894068]]], [[[5.7665796814044832,
-0.029950178836461127, 2.3694584183777816, -0.57180774483350039], [6.1932109200372141, -10.938622839032124,
0.71067144357861733, 5.2777750094174012], [-3.2852098569207846, -17.529448442415831, -7.1618400125346513,
0.90435962385794955]], [[-1.4913107362062921, 1.2593646828759113, 5.76422972394219, -2.1425757917908261],
[-1.6652158722689767, 9.6775449400565687, 3.7948150260119111, -7.0074506472637719], [4.5830800930287374,
5.2719361490107728, -0.76732802340590534, 7.4467689202114986]]]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_mult_overloaded_taggedData_rank0_Symbol_rank0(self):
arg0=Data(3.142013671,self.functionspace)
arg0.setTaggedValue(1,-2.04077395087)
arg1=Symbol(shape=())
res=arg0*arg1
s1=numpy.array(-0.905206509275)
sub=res.substitute({arg1:s1})
ref=Data(-2.84417122722,self.functionspace)
ref.setTaggedValue(1,1.84732186428)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_mult_overloaded_taggedData_rank0_Symbol_rank1(self):
arg0=Data(1.54368119889,self.functionspace)
arg0.setTaggedValue(1,-0.973182859739)
arg1=Symbol(shape=(2,))
res=arg0*arg1
s1=numpy.array([-0.97051483006179051, -4.8243289242685101])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([-1.4981654964063673, -7.4472258576349226]),self.functionspace)
ref.setTaggedValue(1,numpy.array([0.94448839773843207, 4.6949542188401541]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_mult_overloaded_taggedData_rank0_Symbol_rank2(self):
arg0=Data(0.576275206322,self.functionspace)
arg0.setTaggedValue(1,-0.446417285252)
arg1=Symbol(shape=(4, 5))
res=arg0*arg1
s1=numpy.array([[-3.7798020411794298, 2.8125776443752777, -0.74593700484018655, 4.9042983986909512,
-1.1062378936297144], [1.3747147971013396, 1.7312150406230939, -2.4865059810459189, -3.9444781957615138,
-4.8713070674060148], [4.7677542872819085, -0.65669250050514094, -2.2966507465733335, 4.6331137703181184,
-4.2587467390331817], [-2.2122452558031123, -0.89486317692759698, -2.7263171047505361, 1.4136050574112167,
1.5057522304514919]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[-2.1782062011384205, 1.6208187623100703, -0.42986500136777495, 2.8262255715721345,
-0.63749747039309623], [0.79221405333400008, 0.99765630472347699, -1.4329117472490631, -2.2731049860965866,
-2.8072134853290578], [2.7475385855977827, -0.37843560621895822, -1.3235028828319906, 2.6699485939051231,
-2.4542101557111002], [-1.2748620912236397, -0.5156874619142493, -1.5711089520403427, 0.81462554611800553,
0.86772767727381395]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[1.6873689660148408, -1.255583276563631, 0.33299917267007023,
-2.189363577211358, 0.49384371731752186], [-0.61369644771829823, -0.77284431862310299, 1.1100192498224006,
1.7608832478891809, 2.1746356766622736], [-2.1284079256789306, 0.29315888332112217, 1.0252645914581893,
-2.0683020716109741, 1.9011781578167486], [0.98758452140814967, 0.39948239011636527, 1.2170750806399457,
-0.63105773214859318, -0.6721938229809169]]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_mult_overloaded_taggedData_rank0_Symbol_rank3(self):
arg0=Data(4.6954921918,self.functionspace)
arg0.setTaggedValue(1,3.80656545201)
arg1=Symbol(shape=(6, 2, 2))
res=arg0*arg1
s1=numpy.array([[[3.3283491776040073, -3.9068878738203718], [-2.1695978858355423, -2.2223735496995447]],
[[-2.3211489651914921, 4.272069872491878], [1.636342469753175, -4.2787938517786497]], [[2.7410635950334186,
-3.5668158773147507], [-1.0064480730166228, 1.389332564769]], [[0.77463712529690731, -0.94041585508240466],
[3.6978341544417166, -2.6780892355753592]], [[-4.4954676727861065, -4.2409706282499835], [2.3785224394198679,
-4.1039517994892138]], [[-2.0175257524312817, 0.85038925666007348], [-3.2277420742959917, -3.855794844823607]]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[15.628237575034362, -18.344761505774329], [-10.187329932293613, -10.43513764988427]],
[[-10.89893684206883, 20.05947072912349], [7.6834332898420676, -20.091043121362375]], [[12.870642707715545,
-16.747956101531248], [-4.7257690683049729, 6.5236002096908132]], [[3.6373025733125388, -4.4157153045874011],
[17.36315139876443, -12.574947094596416]], [[-21.10843335607106, -19.913444470614692], [11.168333542324792,
-19.270073630038475]], [[-9.4732764173030475, 3.9929961146407393], [-15.155837707011633,
-18.104854587064445]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[12.669578991701924, -14.871824405371543], [-8.258716356981127,
-8.4596103757528684]], [[-8.8356054598727027, 16.261913585211406], [6.2288447130233893, -16.28750885246448]],
[[10.434037982623636, -13.5773180922763], [-3.8311104639897189, 5.2885853424057174]], [[2.948706919001554,
-3.5797545044815582], [14.07604773957002, -10.194321961547963]], [[-17.112291933867024, -16.143532276496508],
[9.0540013447323737, -15.621961136660534]], [[-7.6798438277506191, 3.2370623651649013], [-12.286611468022571,
-14.677335446353782]]]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_mult_overloaded_taggedData_rank0_Symbol_rank4(self):
arg0=Data(-1.72281700023,self.functionspace)
arg0.setTaggedValue(1,1.23448641864)
arg1=Symbol(shape=(3, 2, 3, 4))
res=arg0*arg1
s1=numpy.array([[[[-4.3265612601968471, 4.9346403281714331, 2.8024200919119817, -3.9056671936664311],
[0.98069732637570617, -2.2852413242790757, 3.2075463029671312, -2.6966078512789116], [-2.1601129611240619,
-2.6503532815304762, -4.675750160645002, 3.2350739199006568]], [[-4.1760984392537086, 4.3345400423125842,
2.3141216950646779, 0.60673873055732486], [3.6895192429599195, -1.0430965175426432, 0.33936966744652075,
-4.9652989404647769], [0.016939166262534222, -3.5215478761207564, 0.96881594277756378, 2.4707123930500092]]],
[[[-4.0598585401879825, 0.32726568454206451, -3.8317591404661555, -4.8432615549786364], [-1.8707032325346216,
0.11786029243200069, -1.2644962697725761, 4.5016381310909193], [1.0052891428203132, 3.5573702004465542,
0.94853515124922705, -3.266716026917611]], [[4.4268917686602247, 1.7045644573811822, -4.2672635941058026,
4.4735466129490451], [-3.3659634968161098, -3.7740307778271154, -0.23936175808445981, 1.638694221507726],
[-2.6562820856857803, -1.8386899346245853, -3.8721446565337256, 2.2142808663189424]]], [[[-4.9689140219050429,
3.0036100506068504, 1.7161971518176031, 1.2296325439044953], [-4.2017528414854652, -1.8394187611478952,
-4.4722717389932569, -2.3151891625454821], [1.0583223957426515, 4.9808003293003509, -0.20896133794562566,
-3.9944246041361611]], [[-3.3354149131160451, 1.5689046088326091, 1.0657585673339192, -2.4003243575280555],
[0.12124021598431511, -1.1303400850693057, -1.9271523374197388, -1.7678094654193863], [1.3900959283471721,
1.5973269294693555, 3.1820328180383193, 0.020208485606988624]]]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[[7.4538732915852082, -8.5014822473732448, -4.8280569761201289, 6.7287498384724653],
[-1.6895620259559927, 3.9370526030863635, -5.5260152997629799, 4.6457618491254991], [3.7214793318324864,
4.5660736900247683, 8.0554618655674215, -5.5734403461917594]], [[7.1946533857624484, -7.4676192730552966,
-3.9868081968486213, -1.045299799699541], [-6.3563664744313337, 1.7970644132987275, -0.58467183243782017,
8.554301425835547], [-0.029183083606744185, 6.06698254808967, -1.6690925763069098, -4.2565853134149645]]],
[[[6.9943933115474914, -0.56381888491958132, 6.601419787965443, 8.344053343456924], [3.2228793313878823,
-0.20305171545342732, 2.1784956702862215, -7.7554987011078413], [-1.7319292253931926, -6.1286978574257542,
-1.6341524838838573, 5.6279539060835297]], [[-7.626724397207207, -2.9366526251568561, 7.3517142643698472,
-7.7071021560908708], [5.7989391344540557, 6.5019643834157081, 0.41237650603182713, -2.8231702629851867],
[4.5762879346145349, 3.1677262775151811, 6.6709966416095421, -3.8148007197688423]]], [[[8.5605295495980389,
-5.1746704572343623, -2.9566936288903536, -2.1184318506694821], [7.2388512260579478, 3.1689819122397545,
7.7049057815666941, 3.988647247971739], [-1.823295815105068, -8.5810074820485838, 0.36000214540263886,
6.8816626141257302]], [[5.7463095151227659, -2.702935531829326, -1.8361069779390997, 4.1353196092052489],
[-0.20887470520881798, 1.9473691145940035, 3.3201308089314878, 3.0456122001844874], [-2.3948808973010833,
-2.7519019890081795, -5.4820602341926197, -0.034815522552537087]]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[[-5.3410811151064834, 6.0917464659774998, 3.4595495427759313,
-4.8214931062905171], [1.2106575302027247, -2.8210993781265854, 3.9596723481566771, -3.3289257687890399],
[-2.66663011322566, -3.2718251306347788, -5.7721500702479123, 3.9936548173985087]], [[-5.1553368061426292,
5.3509308132656104, 2.8567518036265622, 0.74901072253301881], [4.554661396727484, -1.2876884842321341,
0.41894724535949618, -6.1295941064678363], [0.020911170694103174, -4.3473030256449352, 1.1959901235162222,
3.0500608935740701]]], [[[-5.0118402294424564, 0.40400504285255151, -4.7302546183870042, -5.9789406115194401],
[-2.3093577338610776, 0.14549693030368552, -1.5610034714491989, 5.5572111344423751], [1.2410157936131636,
4.39152519850908, 1.1709537618153174, -4.0327165687679427]], [[5.4649377651792932, 2.104261672325459,
-5.2678789516603199, 5.5225325368173852], [-4.1552362224415518, -4.6589897387390575, -0.29548883949592541,
2.0229457607473758], [-3.2791441588432502, -2.2698377523754134, -4.7801099894819963, 2.7334996565146623]]],
[[[-6.1340568754081053, 3.7079158143505366, 2.1186220756193546, 1.5179646753620302], [-5.1870068172759387,
-2.2707374788199806, -5.5209587222334928, -2.85806957773395], [1.3064846240818391, 6.1487303604553372,
-0.25795993371372866, -4.9310629240686339]], [[-4.1175244107552791, 1.9367914317381283, 1.3156644769179004,
-2.963167819687802], [0.14966940002504314, -1.3953894834570877, -2.3790433871858725, -2.1823367757951],
[1.7160545441447639, 1.9718784005502887, 3.9281762975200034, 0.024947101023013681]]]]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_mult_overloaded_taggedData_rank1_Symbol_rank0(self):
arg0=Data(numpy.array([-0.099233059085104713, 4.771977048069223]),self.functionspace)
arg0.setTaggedValue(1,numpy.array([3.9729085267773208, 4.512809517509826]))
arg1=Symbol(shape=())
res=arg0*arg1
s1=numpy.array(0.26176969234)
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([-0.025976207346631468, 1.249158963725014]),self.functionspace)
ref.setTaggedValue(1,numpy.array([1.0399870427481233, 1.1813167589860505]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_mult_overloaded_taggedData_rank1_Symbol_rank1(self):
arg0=Data(numpy.array([0.84702689091359229, -3.3372769586299422]),self.functionspace)
arg0.setTaggedValue(1,numpy.array([-2.152707415414048, 1.9005183627662312]))
arg1=Symbol(shape=(2,))
res=arg0*arg1
s1=numpy.array([0.22148437875716098, 4.0581595354793194])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([0.18760322472460655, -13.543202312199522]),self.functionspace)
ref.setTaggedValue(1,numpy.array([-0.47679106454891407, 7.7126067162133252]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_mult_overloaded_taggedData_rank2_Symbol_rank0(self):
arg0=Data(numpy.array([[-4.9957974390113735, 4.9127660926149055, -0.033400607153987849, -4.5745875540405283,
-3.40034229393263], [4.6311740546030773, 4.0795994583149682, 4.8540687237153293, 4.9306788508967045,
3.1060981817064288], [-1.3242874361820456, -3.3839454855009707, 0.088505407790738566, -4.4328915815516297,
-3.0958370529970693], [-2.3333608177639089, -4.3040231210385214, 4.1339174077369343, -4.5703847879440351,
-3.3406709387044389]]),self.functionspace)
arg0.setTaggedValue(1,numpy.array([[-0.78277733029185015, 4.3160080804422201, -3.0818130197239957,
1.7055665928942068, -3.3364799279772583], [4.5669273627829092, 3.6704066897391243, 2.6983979089447621,
-1.2237350853460538, -0.1257348607090929], [2.1891872096029914, -0.7503980382583979, 1.9746042593444724,
-2.0584330310875232, -0.7673935307397155], [-0.23746062225782705, 2.8663010003293437, 3.8185722602896526,
4.8671017855990222, -1.9042813962136051]]))
arg1=Symbol(shape=())
res=arg0*arg1
s1=numpy.array(-0.123633480243)
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[0.61764782397173934, -0.60738236964752024, 0.0041294333046613622, 0.56557217998023424,
0.4203961518147894], [-0.57256816597952853, -0.50437507902705625, -0.60012540964938543, -0.60959698629464243,
-0.38401772817938795], [0.16372626457665548, 0.41836895732351315, -0.01094223158545413, 0.54805381376508067,
0.38274910912583321], [0.28848151856172877, 0.53212135749834688, -0.51109059615373209, 0.56505257738107939,
0.41301877449713931]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[0.096777485598944848, -0.53360309973999165, 0.38101526908524591,
-0.21086513366492038, 0.41250062525520415], [-0.56462512387573127, -0.4537851529579448, -0.33361232456203066,
0.15129462749623307, 0.015545038417276023], [-0.27065683362567039, 0.092774321037059626, -0.24412719668449809,
0.25449123947954888, 0.09487553292096082], [0.029358083150294065, -0.35437076809338752, -0.47210337809722808,
-0.60173673244828274, 0.23543293637500923]]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_mult_overloaded_taggedData_rank2_Symbol_rank2(self):
arg0=Data(numpy.array([[-2.5363493699555439, 1.9315826441262116, -4.797663921800063, -2.3131658171459835,
-1.4174075473244754], [-0.4937783451212221, -4.7652740781432534, 1.5781017135273068, -4.2362357361072114,
-3.918073606586808], [2.094919785395116, 1.3684348598821918, -4.2376402301126852, -1.6050592311847534,
3.151025223042982], [-2.6417620339476366, 0.27296872857386667, -1.4323869283247213, -4.6402797342361799,
-3.9199666124863741]]),self.functionspace)
arg0.setTaggedValue(1,numpy.array([[-0.69802384928264605, 0.87453186333400357, 0.81014803662176149,
-4.214756633799734, 0.78685864942817574], [0.48682400140861759, -2.7486583171634758, 0.40554914153896249,
-1.7609786982015061, -0.39145780725801416], [1.2068445571926318, 0.18236245525374706, 4.017808328373075,
-1.0950851034750277, 0.12173582687690843], [0.22180579191198468, -4.2110674925319236, 2.9122016067639365,
4.5406571257464297, 3.0637655540581346]]))
arg1=Symbol(shape=(4, 5))
res=arg0*arg1
s1=numpy.array([[-2.7914057802277092, -3.2955192040483841, -0.4909883356152811, -4.414815259808397,
1.1535659837090115], [0.30062418712185313, 4.6879078677821262, 2.641934893458421, -4.6213986057014331,
2.2307025160830776], [4.0559589148649486, -0.33010334091372151, -1.5019795108463163, 1.5894091005782052,
4.3064711265533191], [2.9888346593766579, -4.5884630123207506, 2.4921626108815289, -3.5186629218511625,
-1.0861727773454932]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[7.0799802919708137, -6.3655676979244857, 2.3555970238060953, 10.212199748003247,
-1.6350731316459357], [-0.14844171362044126, -22.339165843065977, 4.1692419823943165, 19.577333924268451,
-8.7400566524118908], [8.4969085795002854, -0.45172491906991191, 6.364848799967322, -2.5510957490121045,
13.569799142075835], [-7.895789928888072, -1.2525069145814096, -3.5697411470863107, 16.327580247874213,
4.2577610225859299]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[1.9484678076243735, -2.8820365501694258, -0.39777323610290649,
18.607371903277738, 0.90769337196755784], [0.14635106969487355, -12.885456950875437, 1.071434428043893,
8.1381845005383653, -0.87322591559081664], [4.8949119406016965, -0.060198455736490949, -6.0346657877240473,
-1.7405382293708347, 0.52425182351249966], [0.66294083851702656, 19.32232743186902, 7.2576799597261958,
-15.977041869203234, -3.3277787409867781]]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_mult_overloaded_taggedData_rank3_Symbol_rank0(self):
arg0=Data(numpy.array([[[0.20286433747766175, 2.066180723397264], [-3.1327754856901735, -1.1194293005675835]],
[[-1.7914925359739922, 1.275772548969373], [-2.3842819867614953, -3.1968139299234077]], [[-3.5171630188865488,
-1.4300055611015186], [4.2854751694367756, 1.7799374077309524]], [[-4.2108803597952917, -1.5964309596888695],
[2.7414856168787471, 1.1873651110226469]], [[-3.5913507733928229, -1.3017853623346696], [-0.13258097661378798,
-3.1022689591044426]], [[4.9076894073951749, 2.8964781538465161], [2.969217301725779,
1.8197050412291595]]]),self.functionspace)
arg0.setTaggedValue(1,numpy.array([[[3.7913962593423918, 4.4765688935993317], [1.4500345756448763,
4.8443295010220275]], [[1.7391435441206662, 4.4187921026078829], [-4.2876409136689784, -4.1427096121048841]],
[[0.11488507950500271, 2.1339510129273167], [-2.8155795121378926, -4.6369329094888991]], [[0.67434242728218052,
4.9095299484356563], [0.94463745614236405, 3.557499141589803]], [[0.038621679734069048, -0.10332111066950311],
[-2.0403842705827979, 1.0573287011436552]], [[-2.1400599935190945, 4.2642563454671869], [3.9163707220927186,
-2.5416950274474726]]]))
arg1=Symbol(shape=())
res=arg0*arg1
s1=numpy.array(3.22032001333)
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[0.65328808596966281, 6.6537631347069341], [-10.088539593828681, -3.6049205801225788]],
[[-5.7691792673232181, 4.108395871899436], [-7.6781509993834112, -10.294763877415299]], [[-11.326390459854416,
-4.6050755275843107], [13.800601454753718, 5.7319680565855959]], [[-13.560382296374918, -5.1410185693811012],
[8.8284609982830986, 3.8236956301526179]], [[-11.565298770434771, -4.1921654553826446], [-0.42695317237583746,
-9.9902988157275558]], [[15.804330417828238, 9.3275865669967502], [9.5618299006647014,
5.8600325626225302]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[12.209509252413918, 14.41598439909561], [4.6695753639655058,
15.60029124329226]], [[5.6005987611804535, 14.229924642760029], [-13.807575844248428, -13.340850673264026]],
[[0.36996672076263859, 6.8720051543895364], [-9.0670670520514882, -14.932407848882294]], [[2.1715984144123999,
15.810257548976251], [3.0420349053536828, 11.456285683055723]], [[0.12437416819593333, -0.33272704048818802],
[-6.5706903014356595, 3.4049367769580914]], [[-6.8916780268502658, 13.732270051265186], [12.6119670159636,
-8.1850713646631412]]]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_mult_overloaded_taggedData_rank3_Symbol_rank3(self):
arg0=Data(numpy.array([[[-3.0362430129414495, -1.6103362752021533], [3.0322276873410949, 2.889681860828258]],
[[3.0356323227798061, 3.8849951306068178], [3.1682099248345352, 1.252560279633526]], [[-2.422448371330125,
-4.3051891736441767], [3.2099062879412248, 3.0454833071508354]], [[-1.1376898513557334, 0.97676409380038631],
[1.0009530341765513, -3.085670706338802]], [[3.7338110619145226, -3.4624334476005911], [-1.9009045069833541,
0.020021974502883566]], [[2.2281987737323306, -2.9210437430011229], [-1.3860392623437132,
0.463839486811219]]]),self.functionspace)
arg0.setTaggedValue(1,numpy.array([[[4.1305451146381422, 2.3814888644055499], [-0.89220558616836776,
3.8418880701208664]], [[3.3492033714884197, 2.1415021426686387], [1.4787086763681101, 0.38743271004052637]],
[[-4.99502836998282, 4.4948912080047858], [-3.7653670133834769, -4.0800035996907447]], [[-3.1718016142315641,
2.320405525041048], [-2.8237839197556944, 3.5858545025811086]], [[3.0016852702625556, -2.8784349824436584],
[-3.7323763876968008, 0.63313826152617381]], [[1.7585155020491481, -3.655987828892715], [0.54081193002197825,
4.9685421412273278]]]))
arg1=Symbol(shape=(6, 2, 2))
res=arg0*arg1
s1=numpy.array([[[-0.93345150384204523, -3.5420417816351479], [-3.5444663998891226, 2.3971219306415996]],
[[0.8384895676298747, -4.3543886014540583], [1.9437605664303446, -3.0820690979415186]], [[4.9690708795309,
-2.1112283314766356], [2.37080840790198, 4.7216280449696395]], [[-3.3735779321675219, 3.0438054138693342],
[-0.47550686063032277, 2.7155331110677619]], [[-1.3378665576363566, -2.6741065199531286], [-0.22177834028631249,
0.61204525154245371]], [[4.0531432724462295, -4.0695297515588145], [3.928681336032259, -4.8729434946660577]]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[2.8341856064600983, 5.7038783692487431], [-10.74762915459401, 6.9269197611686435]],
[[2.5453460338109117, -16.916778513418848], [6.1582415180666157, -3.8604773311674778]], [[-12.03731765914358,
9.0892373557640713], [7.6100728160284898, 14.379639393530271]], [[3.8380853761846505, 2.97307983678279],
[-0.47596003491968808, -8.3792409729148645]], [[-4.9953409522681316, 9.2589158571325303], [0.4215794466015394,
0.012254354420993967]], [[9.0312088694261341, 11.88727441774779], [-5.4453065809776664,
-2.2602636098259725]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[-3.8556635489463869, -8.4353330602232983], [3.162392721967159,
9.2094741479570601]], [[2.8082720868638438, -9.3249325200257633], [2.8742556143627427, -1.1940943831476432]],
[[-24.820650015712328, -9.4897416652449422], [-8.9269637741663139, -19.264259419876904]], [[10.700319930984929,
7.0628628994922567], [1.3427286267814176, 9.737506633230419]], [[-4.0158543396339219, 7.6972417538137563],
[0.82776024058721887, 0.3875092665369389]], [[7.1275152766229084, 14.87815124101582], [2.1246777357809301,
-24.211425105067871]]]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_mult_overloaded_taggedData_rank4_Symbol_rank0(self):
arg0=Data(numpy.array([[[[0.083529595185620309, 4.372823949648037, 4.0009796310147365, 0.68196650279571092],
[-3.6991633277760947, -2.7548332509536966, -1.0695165519831527, -4.9760591951223629], [2.4321680119150511,
3.8212450023110254, 0.8004705884830452, -2.0444757685121964]], [[-3.4279885095627494, 3.5526615517407674,
-0.37278949561560104, 1.8208812052515633], [-1.3542092169449638, 4.6164533745863388, 0.70466827998486359,
4.8882907830017537], [-1.0014606132552197, 0.027094091914280583, -0.50731223435619732, 1.3858925949581025]]],
[[[-0.92819420758339621, -0.63572501286400218, 1.5822275384230347, -4.1213856389411898], [0.019204126745418826,
1.2601369851282858, 3.1282675256554278, -1.2005085063042245], [0.31394545189514567, 4.3373088139034053,
-4.2967881792088045, -3.2133421015779429]], [[-4.6394850485838566, 2.7899856599199682, 3.4214279590576524,
0.75816457866836107], [2.6716538166314328, -0.78329465377730312, 0.9411007620209233, -4.3621528303217216],
[-0.21060811641931387, -1.0884058600082236, 3.3643361086045402, -0.59658223764974405]]], [[[-2.7722966748649869,
1.3359537198967564, 3.3994221388292836, 0.89269410005117322], [-2.5434807639867083, -2.2312407449400631,
2.1964509494368221, -3.8483462591031992], [-4.617770174657271, -4.0164566401957895, -2.0915606068178807,
1.3098480489351907]], [[-4.000475916402392, 3.4797401237531425, 4.727298203954307, -1.3658950385993265],
[4.3822054513768176, 4.7641649434095044, 2.2480529159500593, -3.370947660818576], [-0.12763750951483388,
-0.56331578609421484, 1.1108900947641267, 2.3086655633422826]]]]),self.functionspace)
arg0.setTaggedValue(1,numpy.array([[[[-0.99642554217400114, -3.1989491729058739, -0.40653827121759534,
-3.7494299986207311], [-4.962891998625155, 0.65199311462416087, -1.5646394746401904, -0.58561931277306201],
[-1.9266349397638294, -2.1350741612611923, -1.3141253220586746, 3.0489459946325113]], [[-2.4730670458068271,
-1.0264935833023214, -2.4514436230760808, -2.4564197681665032], [-0.48365426443540827, -0.92587219714034585,
-4.1751007096042496, -4.5526966630634966], [-2.3782862353872791, -2.5275939067663735, -2.6938709700385766,
-4.8403251647207153]]], [[[3.2867188889910004, -1.4242104085047957, 2.0936224809849646, -1.4601757832869966],
[-0.21399139868108641, -0.98887005738367506, -2.261387321435977, -3.6513388135428149], [2.9334655428664806,
-3.9524701563078288, 3.4584296338361185, 4.5958550113660674]], [[0.37781815561656451, -3.0593937474827717,
-2.0739947527751279, -2.4165789597896401], [4.5330104520530448, 4.9794431912053145, 1.7661478112868867,
3.078941742057026], [4.9858586211966696, -3.1080213069928195, -4.2716128061474183, -1.5876111863601041]]],
[[[0.90451414172461853, 0.1162545327223361, 3.2911315914907693, -1.4337863404739979], [2.0405912462551932,
4.8936580709384394, -1.1291930809589745, 2.111861338433255], [-2.0913683111797732, -1.55247331778124,
4.9769696268492716, -0.24856367420835213]], [[-2.1381113867577026, 1.6110287228762354, 0.19273167692598125,
-1.1320874579780638], [1.2876584378472149, 0.79519349199575018, -3.7974642196600819, -4.2341641389677163],
[-4.3323767453858073, -0.80301234928598664, 4.344905698376083, -0.27642913101571054]]]]))
arg1=Symbol(shape=())
res=arg0*arg1
s1=numpy.array(-1.40149736096)
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[[-0.11706650721432915, -6.1285012253547855, -5.6073623941039141, -0.955774253928249],
[5.1843676416216082, 3.8608915310841012, 1.4989246251026278, 6.9739338299224034], [-3.4086770500993597,
-5.3554647863033438, -1.1218574172814939, 2.8653273941073367]], [[4.8043168495380142, -4.9790457891328019,
0.52246349429720151, -2.5519602037735591], [1.897920643729998, -6.4699472214569242, -0.98759073474784653,
-6.8509266319601139], [1.4035444065780798, -0.037972298315350331, 0.71099675763065506, -1.9423248144015677]]],
[[[1.3008617323823231, 0.89096692782230835, -2.2174877195310199, 5.7761110964560265], [-0.02691453295315966,
-1.7660786590997284, -4.3842586815689133, 1.6825095033900148], [-0.43999372231503248, -6.0787268563347068,
6.0219372937459816, 4.5034904752087277]], [[6.5022260517828334, -3.9101575394815598, -4.7951222553187058,
-1.0625656561736561], [-3.7443157733956758, 1.0977853901194523, -1.3189502343655459, 6.113545679780513],
[0.29516671935747973, 1.5253979404500404, -4.7151081775766022, 0.83610843165905069]]], [[[3.8853664736090217,
-1.8723356127940041, -4.7642811563429852, -1.2511084253622755], [3.5646815783684827, 3.1270780156899103,
-3.0783202091039361, 5.3934471261761532], [6.4717927132812418, 5.6290533816266439, 2.9313166707337701,
-1.8357485838353969]], [[5.6066564394040377, -4.8768466002510378, -6.6252959572916961, 1.914298291939184],
[-6.1416493752694707, -6.676964595345189, -3.1506402289923527, 4.7243742505563926], [0.17888363274397381,
0.78948558759562215, -1.5569095361235428, -3.2355886943530807]]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[[1.3964877677455285, 4.4833188236584016, 0.56976231423887624,
5.2548162481543859], [6.9554800387803839, -0.91376662950692644, 2.1928380945550465, 0.82074392137602725],
[2.7001737836036899, 2.9923008024518656, 1.8417431708300456, -4.2730897651733448]], [[3.4659969381643139,
1.438628048035971, 3.4356917682723473, 3.4426658224843063], [0.67784017522110429, 1.2976074408742768,
5.851392626233844, 6.3805923585144591], [3.3331618824920928, 3.5424161899003064, 3.7754530552637315,
6.7837029445226396]]], [[[-4.6063278491235211, 1.9960271289648437, -2.9342063819375608, 2.0464325068078741],
[0.29990838051872154, 1.3858987757511469, 3.1693283630907771, 5.1173417111346877], [-4.1112442167813024,
5.5393764933208445, -4.8469800048716598, -6.4410786697637112]], [[-0.52951114801769439, 4.2877322632209003,
2.9066981726499224, 3.3868290346857983], [-6.3530021857360977, -6.978676491502144, -2.4752514965759262,
-4.3151287260286901], [-6.9876676997044234, 4.3558836595439399, 5.9866540748393779, 2.2250328879071364]]],
[[[-1.2676741825739934, -0.16293042080947032, -4.612512240031629, 2.0094477723483699], [-2.8598832464155737,
-6.858448871838867, 1.5825611229732273, -2.9597680925181833], [2.9310471689044393, 2.1757872578242567,
-6.9752097975849967, 0.34836133343241132]], [[2.9965574659698517, -2.2578525035345738, -0.2701129365842932,
1.5866175847270911], [-1.8046499024549696, -1.1144615804810425, 5.3221360821765895, 5.9341698666157248],
[6.0718145753232413, 1.1254196883389966, -6.0893738698746489, 0.38741469760974395]]]]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_mult_overloaded_taggedData_rank4_Symbol_rank4(self):
arg0=Data(numpy.array([[[[0.94895783352211716, -0.46506050687609246, -3.4973315286311935, -3.8305662019128084],
[-0.067011552625391602, -2.0053313702353224, -4.2991620654185292, 1.0093244189118113], [0.87798040910337161,
-3.1557405362751467, -2.7931609613797503, -4.9110293393501543]], [[-0.43377674574157687, 2.704453693295501,
-2.5592027474361601, 2.6750943602438779], [3.4305357783522936, 1.1310936272574317, 2.5896451684195103,
-2.5539402760310126], [4.5085098664567802, 0.11349764632152315, -1.3234736581612081, 2.9677015132450526]]],
[[[-1.1465164843195783, 2.7375150894033169, -0.10912903641971639, -3.2522173068497242], [4.7360277602148102,
-1.6692169008049049, 1.233494102169499, -1.3392318230236588], [4.3885022964524385, 4.7515889636089952,
2.7117463429070652, 2.1998096737486339]], [[-2.5131848249547706, 0.77921677989624794, -3.8934505794744254,
0.24453982436947097], [-3.4599112442822841, 0.95455762656251686, -2.8118915894259002, 0.78333576699382768],
[3.9748218483200848, 2.0642803168106862, -3.4033833226648325, 1.1533845398716842]]], [[[0.49409552443598681,
4.2995599265190414, 1.6569894487618955, -3.0328627613000969], [-4.8050094221736783, -1.8954288851770262,
-0.65657001508748358, -4.2826450941652627], [-0.74170635020795395, -0.84409250409421333, 3.6365860981881823,
1.6100686197509306]], [[2.2396538156436314, 4.3413750637744482, -4.4044991729772871, -2.290449250092359],
[3.1707282757549784, 3.5745559429744826, 1.7312931654177177, 3.6613171235792787], [-0.8687074400525221,
-3.7812204587391882, 0.42585417549408167, -2.1167068471762196]]]]),self.functionspace)
arg0.setTaggedValue(1,numpy.array([[[[-1.6280796155619202, 3.2372833297254093, 1.0760799245326114,
-0.6791544053399603], [-4.7288229986341772, 3.0364132365453695, -2.7881857643052821, -4.280550845421411],
[-3.2543576320948486, -0.55086025606976641, 2.7326721039171273, -4.2151748027883897]], [[-0.090786768277823171,
2.6835858500949072, -4.3650061828137812, 3.7514613040027918], [4.6631405077073573, -1.4292251728385805,
-1.3449901199338612, 1.8541088776332506], [-0.75859253853348552, 3.0321616898786914, -0.78197759748368689,
-0.88789890229887547]]], [[[-2.062426285540877, 0.30001445848584307, -0.83075755338807511, 3.1138362566950235],
[-2.3733111917258274, -2.9574688279506556, -1.7570853300314857, 4.3659538409493486], [-1.018783824996695,
0.89420512031921273, -1.8230865992410106, 3.1994892045316963]], [[0.3233781997620504, -1.3905319280411477,
4.9629769739530829, 1.1091529164244776], [2.7351448192501895, 2.0660484456167358, -4.0171925239278465,
4.3911761581077471], [2.1258515256453734, 1.5150132919784713, 2.3610249459029262, -4.5111733078663327]]],
[[[-2.4852355463920439, 2.8830764355289409, 1.862859073381494, -2.1509811646634249], [-2.6325170393160802,
-1.3580306364602079, 3.8152036772116542, -1.8416692569043969], [-3.1659556710428727, -2.074597485798928,
0.35812962834444573, 4.8712028428189775]], [[-3.0119309329000288, 0.64957909215262699, -1.0255988706704269,
4.4011917781031578], [3.4155148745532635, 0.92333362783500483, -4.4110812159393742, 3.9318582439553591],
[-0.47136877672690947, 2.9648508900721211, 4.2958677743999178, -3.5373956917354246]]]]))
arg1=Symbol(shape=(3, 2, 3, 4))
res=arg0*arg1
s1=numpy.array([[[[-0.89769175763822151, 3.1729441493671864, -4.6066637580891037, 4.9743040690579683],
[-2.6788549386226723, 4.5100260370904159, -1.4037388326608391, -1.3338419564469453], [-1.1381168569459419,
-3.1452061025455782, -1.5160789869753852, 4.1456610012343731]], [[-2.5938250596030543, -0.93202167450854922,
-0.77103273701479047, -2.7062810722150674], [-1.105689822011354, -1.8461034185063041, -2.0068461224928411,
-4.7319124228070422], [-4.5218149557274483, 4.8904398898663093, -3.825654173472147, 0.88914553957591913]]],
[[[2.5862303105862106, -2.0807587439433863, 2.3052217753786719, 4.1346828351021188], [0.68224921552396101,
-2.8879458749312326, 0.54057865399082239, -0.0023583593457887631], [1.9450000139899339, 2.3035289835776487,
0.43188049217124025, 2.7210621624224993]], [[-0.54240954949206355, 2.4426743469776238, -0.12851500556720108,
2.8020215668130284], [0.52130307899069095, -2.6755785781541075, 0.43102203402703765, 2.8334377967823654],
[-0.76961602631277248, -0.42105873518056569, -2.6486982753852963, 2.2122858679665658]]], [[[-3.480147294108944,
3.6715573223019664, 1.7695636383465487, 4.490486044765845], [-2.4274939795683825, 4.5761414724005753,
-3.0218037175811974, 3.7714386247368186], [1.1400240440382881, -1.8970197325492588, 3.6790698826844643,
0.066641356768850635]], [[-2.2050658457593322, 2.6656171339669479, -3.3829610334860827, -3.4782484246680303],
[-1.0395696415089963, 2.3867408856698438, -0.23958153915918956, 4.5415496456746567], [-4.7338880436477284,
0.94198626201791846, 4.0511716726168139, -3.3273461069241539]]]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[[-0.85187162549902817, -1.4756110143942358, 16.111030402967682, -19.05440104497081],
[0.1795142286953034, -9.0440966927555042, 6.034900739130368, -1.3462792576110065], [-0.99924430366884154,
9.9254543927430472, 4.2346526407878047, -20.359462808061743]], [[1.1251409933775647, -2.5206094598561033,
1.973229098931474, -7.2395572335172815], [-3.7931084941699296, -2.0881158119306398, -5.1970193648750147,
12.085021719258394], [-20.386647342189029, 0.55505341697671495, 5.0631525238248756, 2.6387185632945438]]],
[[[-2.9651556833340331, -5.6961084589529127, -0.25156663108082233, -13.446887074653596], [3.2311512241062563,
4.8206080630450217, 0.66680058145640564, 0.0031583898860055686], [8.5356370279948504, 10.945422895721002,
1.1711503452182641, 5.9858188677683906]], [[1.3631754486940078, 1.9033728389870741, 0.50036682289677803,
0.68520586182792786], [-1.8036623846788673, -2.5539939372442984, -1.2119872323378713, 2.2195331697718155],
[-3.0590865962052933, -0.86918325925444495, 9.0145355372174212, 2.551616317889247]]], [[[-1.7195252023972392,
15.786080730887091, 2.9321482776529417, -13.619027905308091], [11.664131444095956, -8.6737507294445777,
1.9840257124437006, -16.15173312417452], [-0.8455630728529504, 1.6012601363636387, 13.37925438963315,
0.10729715731115268]], [[-4.9385841352003395, 11.572443754974021, 14.90024907420384, 7.966751495915819],
[-3.2961928569490411, 8.5315388172113202, -0.41478588130656219, 16.628053485294025], [4.1123637638924597,
-3.561857725793407, 1.7252083724272129, 7.0430162874514943]]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[[1.4615136516687401, 10.271719200896161, -4.9571383891516385,
-3.3783205220012094], [12.667830843763641, 13.694302756185596, 3.9138846300274661, 5.7095783143275201],
[3.703839279617827, 1.7325690390404491, -4.1429467550425727, -17.474685793305618]], [[0.23548499463939337,
-2.501160177692904, 3.3655626642213927, -10.15250872017001], [-5.1559869979808832, 2.6384974773925669,
2.6991882069804505, -8.7734808313096], [3.4302150860439653, 14.82860448070719, 2.9915758593751893,
-0.78947134857339996]]], [[[-5.3339093730155467, -0.62425770780385803, -1.9150804021305003, 12.874725321875548],
[-1.6191896987491827, 8.5410099019178034, -0.94984282265544051, -0.010296488044085244], [-1.9815345538712901,
2.0598274119188451, -0.78735553775100031, 8.7060090135304602]], [[-0.17540342364848829, -3.3966166692794464,
-0.63781701343747121, 3.1078703927149545], [1.4258394157605607, -5.5278749625207295, -1.7314984927415895,
12.442124498712067], [-1.6360894036981373, -0.63790958050220026, -6.2536427023547434, -9.9800049569406735]]],
[[[8.6489857619996346, 10.585380397622536, 3.296447679619837, -9.6589509024752935], [6.3904192640509674,
-6.2145403162961061, -11.528796655127632, -6.9457425694795969], [-3.6092655873482475, 3.9355523676576474,
1.3175839297390308, 0.32462356654173891]], [[6.6415060299238959, 1.7315291579087375, 3.4695610154653864,
-15.308438368849195], [-3.5506655737079806, 2.2037581206676693, 1.0568136270709447, 17.856729414678441],
[2.2314070162963722, 2.7928488073795359, 17.403297836956384, 11.770139783546139]]]]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_mult_overloaded_expandedData_rank0_Symbol_rank0(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*(-0.0430204652553)+(1.-msk_arg0)*(1.78425217281)
arg1=Symbol(shape=())
res=arg0*arg1
s1=numpy.array(2.5646949317)
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*(-0.1103343692)+(1.-msk_ref)*(4.57606250448)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_mult_overloaded_expandedData_rank0_Symbol_rank1(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*(2.57326660208)+(1.-msk_arg0)*(3.29535894632)
arg1=Symbol(shape=(2,))
res=arg0*arg1
s1=numpy.array([4.0270481495052639, 2.7564226252917825])
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([10.362668508083143, 7.0930102828763504])+(1.-msk_ref)*numpy.array([13.270569146744505,
9.0834019581016197])
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_mult_overloaded_expandedData_rank0_Symbol_rank2(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*(-3.02551699988)+(1.-msk_arg0)*(0.626618362726)
arg1=Symbol(shape=(4, 5))
res=arg0*arg1
s1=numpy.array([[-0.87644218701217369, -3.616282438617354, 0.79667729938717624, 3.9811287325297613,
0.065796525107652215], [-1.4217337078130887, 4.8515183472866479, -0.78214368357519071, 3.1809506063985502,
0.95738137909039533], [0.39346667195906182, 4.3285617066713939, -4.5910865330304649, -4.7275376371854012,
-0.90249772270981055], [1.0196798908676152, -4.5635511009909653, -4.4978908227402012, 1.164740898313557,
-1.7685582007909115]])
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([[2.6516907362186282, 10.941123994409631, -2.4103607127155628, -12.044972658985373,
-0.19906850524632991], [4.3014795022930183, -14.678351234952618, 2.3663890110066546, -9.6240201354420911,
-2.8965736378079594], [-1.1904401048989282, -13.096137028570261, 13.890410353610562, 14.30324548888392,
2.730522202412847], [-3.0850588442572544, 13.807101435875975, 13.608445147811342, -3.5239433883048839,
5.3508029017726937]])+(1.-msk_ref)*numpy.array([[-0.54919476824925706, -2.2660289808399305, 0.49921262496268565,
2.4946483681778449, 0.041229310835994216], [-0.89088444822170665, 3.0400504835102153, -0.49010559441809565,
1.9932420608926262, 0.59991275226964935], [0.24655344177009811, 2.7123562495913784, -2.8768591264593351,
-2.9623618939370084, -0.56552164536805005], [0.63895014371973546, -2.8596049191178006, -2.8184609830641962,
0.72984803470084503, -1.108211044164624]])
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_mult_overloaded_expandedData_rank0_Symbol_rank3(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*(-3.56361070359)+(1.-msk_arg0)*(4.70518403887)
arg1=Symbol(shape=(6, 2, 2))
res=arg0*arg1
s1=numpy.array([[[4.2317874979104992, -1.1989677806745727], [1.3134595205642725, 4.4478340118436144]],
[[-0.017162439223159964, -0.41900720330017371], [-4.4667032138911269, 2.0617117365888351]], [[2.9794518983997751,
0.52772381685170533], [-2.6894168529203224, 0.41500536076126604]], [[-4.6733566211583097, 2.4911601334345299],
[-4.7318467145182375, -0.81771569841489722]], [[1.3136083167944523, 0.82585873482530836], [0.296465998582784,
-1.7304343680929755]], [[-1.3066203047314175, 2.65896658854032], [3.9719908108129438, -2.8680613980844938]]])
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([[[-15.080443222871585, 4.2726544164712799], [-4.6806584062148398, -15.850348892396909]],
[[0.061160252115363196, 1.4931785545617493], [15.917591382781623, -7.3471380122247991]], [[-10.617606675968551,
-1.8806022422720292], [9.5840346834818018, -1.4789175456560166]], [[16.654023676852269, -8.8775249158636207],
[16.862459599603678, 2.9140204153647811]], [[-4.6811886580533626, -2.9430390270766442], [-1.0564894058000638,
6.166594435995874]], [[4.6562861034687169, -9.475521795410085], [-14.154628967973551,
10.220654296766785]]])+(1.-msk_ref)*numpy.array([[[19.91133899106331, -5.6413640647508636], [6.1800687718624765,
20.927877600075174]], [[-0.080752435100909861, -1.971506005140051], [-21.016660668375366, 9.7007331557512781]],
[[14.018869516935215, 2.4830376799828495], [-12.654201250232003, 1.9526765994999067]], [[-21.989002981827273,
11.72136689810848], [-22.264209635536492, -3.8475028525162167]], [[6.1807688855097629, 3.8858173374624307],
[1.3949270845997368, -8.1420121690652945]], [[-6.1478890026873314, 12.510927152291805], [18.68894776558027,
-13.494756712769872]]])
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_mult_overloaded_expandedData_rank0_Symbol_rank4(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*(2.51859660817)+(1.-msk_arg0)*(3.69262914568)
arg1=Symbol(shape=(3, 2, 3, 4))
res=arg0*arg1
s1=numpy.array([[[[0.67581666027729526, 2.8108253236352603, -3.2853142689252701, 4.9303752145005504],
[-1.3657702337917108, 4.3904435823590919, 3.3571499478699067, 0.015664424638796248], [-3.1193276566121941,
2.4614116251071474, 4.8836311348501411, 4.5339873643248207]], [[0.92904130460985179, -4.0289182883108268,
1.1622216990935978, -3.453322460119399], [-4.9809628080889743, -3.091966318347751, -0.82579662341565552,
0.19293706069789529], [-4.1200195632312617, -4.252279729557471, -4.2626018577200098, 2.8104051978786853]]],
[[[4.553470024096578, -4.1556230388171853, -4.8792826237543903, -0.084660297479757673], [-1.3940013078624869,
-4.391326436454821, 1.8581067287379591, -4.9713750706033011], [1.1291380851369173, 0.083960164404878412,
1.6804470605141759, -4.9497828740616505]], [[1.5580688980525261, -0.37196449698866818, 2.6938341003534481,
-2.2283064145681664], [0.4008724735809448, 2.2505971863148471, -1.6587271003862281, -2.7339584931448382],
[-4.8561801785490113, 2.7658580526343668, 2.6247408695536976, 1.2921000397940583]]], [[[-3.378532765195783,
-4.7136195887628478, -2.2928079312088725, -3.2689147861576906], [2.1255291713840734, -2.9248168929356853,
-4.2298230449258032, 0.73722628474508767], [-4.0600914026090829, 4.8210888962888614, -1.5990379796232492,
-1.3511964372595688]], [[-2.5350622751408225, 3.9568471756339907, -2.5691428388833124, 3.4094397075929948],
[0.80244721075126435, -1.8355785194219432, -4.4146407757484631, 4.8328210906719811], [-4.920522191727871,
-2.4717933373462273, -4.5300856839069414, -2.5250375128130722]]]])
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([[[[1.7021095483176161, 7.0793351262596333, -8.2743813744801198, 12.417626292235164],
[-3.4398242783642043, 11.057756314881239, 8.455306471815506, 0.039452366764170722], [-7.8563280557071602,
6.1993029702993985, 12.299896811775723, 11.419285197263687]], [[2.3398802786380641, -10.147219935524449,
2.9271676292760325, -8.6975262349660429], [-12.545036033862335, -7.7874158819594053, -2.0798485747710065,
0.48593062666356401], [-10.376667297538809, -10.709777303843696, -10.735774580822937, 7.0782769989541201]]],
[[[11.468354158082919, -10.466338090388497, -12.288944666479384, -0.21322513807898585], [-3.510926965763792,
-11.059979868212249, 4.6798213046129975, -12.520888390750915], [2.8438433513788075, 0.21146178529132892,
4.2323682668163789, -12.466506357778224]], [[3.9241470419266724, -0.93682852047446297, 6.7846814281166719,
-5.6122049776897045], [1.009636052268762, 5.6683464398043348, -4.1776644489085921, -6.8857385877058555],
[-12.230758926344738, 6.9660807100382254, 6.6106634513770723, 3.2542787776386608]]], [[[-8.5091611630055262,
-11.87170630845092, -5.7746582787226588, -8.2330776928059901], [5.353350561609421, -7.3664339060593971,
-10.653217974099684, 1.8567756202110501], [-10.22573243546206, 12.142378141868068, -4.0273316318104415,
-3.4031187638502254]], [[-6.3847992476635591, 9.9657018755896978, -6.470634439909837, 8.587003283295978],
[2.0210408232317629, -4.6230818330415877, -11.118699284078886, 12.171926806847758], [-12.392810502499694,
-6.2254503155317185, -11.409458438197062, -6.3595509152671994]]]])+(1.-msk_ref)*numpy.array([[[[2.495540296879188,
10.379335513483992, -12.131447222167042, 18.206047216225837], [-5.0432829716077814, 16.212279934703215,
12.39670974393805, 0.057842910971599444], [-11.518520219746321, 9.0890803063976193, 18.033438665320531,
16.742333887671666]], [[3.43060499894721, -14.877301096998385, 4.2916537198201299, -12.751839165684238],
[-18.392848438720499, -11.417484944606104, -3.0493606800326032, 0.7124450136157725], [-15.213704319978612,
-15.70209206496787, -15.740207856266352, 10.377784144870411]]], [[[16.814276124980314, -15.345174751614865,
-18.017381226508029, -0.31261908195608412], [-5.1475298585355107, -16.215539987468492, 6.8612990623305103,
-18.357444479839728], [4.1694882026791129, 0.31003375015792717, 6.2052677934347074, -18.27771250557068]],
[[5.753370623933491, -1.3735269427402785, 9.947330312604274, -8.2283092119504264], [1.4802733796476881,
8.3106207653820228, -6.1250640356231392, -10.095494814878659], [-17.932072464006065, 10.213288057984194,
9.6921946347836005, 4.7712462660838089]]], [[[-12.475668558412432, -17.405649075135688, -8.4664893922387598,
-12.070890014125325], [7.8487909682557282, -10.80026410464507, -15.619167856781525, 2.7223032660145061],
[-14.992411847417884, 17.802493372372791, -5.9046542486134737, -4.9894673457699161]], [[-9.3610448433105873,
14.611169205765981, -9.4868917262874675, 12.589796434712371], [2.9631399582934548, -6.7781107400101064,
-16.301631196256576, 17.845816015294734], [-18.169663657162346, -9.1274161195937573, -16.727926428843457,
-9.3240271137605752]]]])
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_mult_overloaded_expandedData_rank1_Symbol_rank0(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*numpy.array([1.8003466734301279,
3.110968541428603])+(1.-msk_arg0)*numpy.array([-0.057900815820612905, 0.54416620499792501])
arg1=Symbol(shape=())
res=arg0*arg1
s1=numpy.array(-1.23860498141)
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([-2.2299183579697544,
-3.8532611324133779])+(1.-msk_ref)*numpy.array([0.071716238902929969, -0.67400697222367489])
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_mult_overloaded_expandedData_rank1_Symbol_rank1(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*numpy.array([-0.79032144715843966,
1.1989835373509852])+(1.-msk_arg0)*numpy.array([3.3888677818436879, 2.0461382220071824])
arg1=Symbol(shape=(2,))
res=arg0*arg1
s1=numpy.array([-0.92949574400567592, -1.2179599998896897])
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([0.73460042153017635,
-1.4603139890197456])+(1.-msk_ref)*numpy.array([-3.1499381802216635, -2.4921145086501579])
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_mult_overloaded_expandedData_rank2_Symbol_rank0(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*numpy.array([[2.9968239240536168, -0.22744917461103942, -3.368130646004337, 2.5739305354263671,
-3.5520990533562147], [4.2529038750041668, 2.5566407005371827, -0.59374756248383598, 0.27167900571967607,
3.5828984462512263], [-3.506627820649574, 2.8671976480545798, -4.502344160444026, -3.4451554059919767,
-0.53368053099846069], [3.3068381259500921, 0.9313565649084623, 2.680662417641706, 0.49678621396386813,
-4.6856891442661137]])+(1.-msk_arg0)*numpy.array([[-0.85596724799322921, -0.13058599556778994, -0.39878828275994316,
-4.0930080594310017, -4.4366277751460883], [2.6284949729177862, -0.28953336259360274, 4.6575690245651824,
-0.75846368797438046, 2.8728013263404817], [3.9714952839792605, 4.7929536309489222, 1.8212316872876864,
2.7021824250061401, 3.4917451398394661], [-3.9528673242547518, -0.39076547693401587, 4.8178679551326944,
-4.372708435816083, -4.6827874768603746]])
arg1=Symbol(shape=())
res=arg0*arg1
s1=numpy.array(2.10607695069)
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([[6.3115417917256531, -0.4790254641017731, -7.0935423204623218, 5.4208957733386178,
-7.4809939428412591], [8.9569428246464202, 5.3844820505972715, -1.2504780558755719, 0.57217689193258392,
7.5458598343126875], [-7.385228027718342, 6.0385388796403019, -9.4822832603848397, -7.2557623921047183,
-1.123972265367853], [6.9644555567263726, 1.9615085942275186, 5.6456813303761022, 1.0462699946498486,
-9.8684219048371684]])+(1.-msk_ref)*numpy.array([[-1.8027328915440832, -0.27502415534822766, -0.83987881052595881,
-8.6201899329560003, -9.3438794960261902], [5.5358126774666605, -0.60977954141415425, 9.8091987688843929,
-1.5973828911781676, 6.0503406573173217], [8.3642746773627188, 10.094329167867425, 3.835654078462837,
5.6910041218650154, 7.3538839566996979], [-8.3250427607485502, -0.82298216409611202, 10.146800651772885,
-9.2092604487599345, -9.8623107699953731]])
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_mult_overloaded_expandedData_rank2_Symbol_rank2(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*numpy.array([[1.328567726345117, -2.6681448855288812, 3.2894830838559965, 2.8760842998618212,
2.8603604495641051], [4.0060721329760867, 2.9201617632829659, 2.1721464751857216, 3.3782152232970972,
-0.56533591383663317], [2.812933639561118, -0.55158819240545931, -4.3300172419246827, 4.4161226276280559,
1.5663796584927505], [2.1573608096632757, 0.40478964915762283, -2.783180507622979, -1.5186696606391514,
-3.5877444638062372]])+(1.-msk_arg0)*numpy.array([[1.9794332133759385, -4.6552748863866809, 3.6163220859074414,
4.9277135493827373, 4.7113876120438363], [2.5672843138256862, -3.4663513236737495, 0.70662767196765763,
0.51073573024771424, 0.77820357093096604], [-4.9550804650749072, -1.458958922255027, -0.59095436462229589,
1.6503086087766805, -0.60037112049808439], [2.0892995102876348, 4.3232658919201121, -0.56155298698416622,
2.2070902567073523, 1.8732215024796837]])
arg1=Symbol(shape=(4, 5))
res=arg0*arg1
s1=numpy.array([[2.6575119197184351, 0.798780755830788, -4.7246974545048115, -3.594253030805985,
1.5756436650435841], [0.65656829496808733, 0.22663298200663462, 4.5542984252490282, 2.3375554718292921,
2.7227466581468063], [-1.0177447572471587, 1.532657082535188, 2.6217986442668364, 4.1467735088972546,
1.5155361075503855], [-2.6181643161762471, 0.56415045659076313, 3.6099145181301697, -1.4000081350932101,
0.95267578177457679]])
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([[3.5306845689153685, -2.1312627883288111, -15.541812352931064, -10.33737471163186,
4.5069088220969], [2.6302599498672778, 0.66180496835457081, 9.892603271348559, 7.8967654802351435,
-1.5392664701290637], [-2.8628484641474969, -0.84539554973300923, -11.35243333453016, 18.312660324289759,
2.3739049305782052], [-5.6483250889774848, 0.22836226539548773, -10.047043721045087, 2.1261498794140565,
-3.417957261864017]])+(1.-msk_ref)*numpy.array([[5.260367358833121, -3.7185439923480388, -17.086027753956419,
-17.711449359812622, 7.4234680444816901], [1.6855974846268469, -0.785589537166827, 3.2181932936796902,
1.1938731008992738, 2.1188511721101988], [5.0430071650677997, -2.2360837253220716, -1.5493633519903052,
6.8434560203802217, -0.90988411104533029], [-5.4701294236395936, 2.4389724268900039, -2.027158280413504,
-3.0899443142752547, 1.7845727593117799]])
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_mult_overloaded_expandedData_rank3_Symbol_rank0(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*numpy.array([[[-0.26449277327340059, 2.709494171087842], [0.49156618056888668,
1.1557293908754884]], [[2.9426463555847633, -2.2855487493843087], [-0.049194778717681409, 0.65490813497401845]],
[[-2.5662834716418259, -0.6481573871774593], [-4.7039097421222928, -3.6194060480671544]], [[-1.2083023577871419,
3.399741798797292], [2.1299564643809319, -0.094074363510819659]], [[-4.6384172111976261, -4.5399938401719275],
[0.25600785550817573, -2.5059486809113416]], [[0.78159444871409178, -3.8859384118790743], [-2.9903682045869107,
0.31191296888410847]]])+(1.-msk_arg0)*numpy.array([[[1.4148748264368773, 2.6806498611711493], [-4.0435092298014874,
4.2267476915751718]], [[3.125690778072709, -4.9216068996123754], [-0.39858451763556069, -4.7718632732732615]],
[[-1.5015409161151947, -4.3392117282980625], [-4.3901880811233127, -2.8392130815499939]], [[1.5905393663877287,
4.6064612161951075], [-3.1305192294513895, 2.10516821140763]], [[-1.310377796147919, -2.266123007043912],
[-3.9633905374201128, 2.6610704495417856]], [[-3.3823523217509011, 1.9759866351748991], [3.6164091081703162,
4.7947187843172081]]])
arg1=Symbol(shape=())
res=arg0*arg1
s1=numpy.array(-0.422116691453)
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([[[0.11164681436744017, -1.1437227150112377], [-0.2074982897720079,
-0.48785266669154376]], [[-1.2421401437361563, 0.96476827624502515], [0.020765937229078289, -0.27644765514099601]],
[[1.0832710883803918, 0.27359805181627767], [1.9855988172389765, 1.5278117060356851]], [[0.51004459354416765,
-1.4350877599033474], [-0.89909017568376171, 0.039710359075749616]], [[1.9579533267701619, 1.9164071790311268],
[-0.10806518895313126, 1.0578027661377205]], [[-0.32992406274935349, 1.6403194656131641], [1.2622843327469702,
-0.13166367044669411]]])+(1.-msk_ref)*numpy.array([[[-0.59724228055590678, -1.1315470503419538], [1.7068327379441424,
-1.7841807511750174]], [[-1.3194062497457233, 2.0774924210974488], [0.16824917784877888, 2.0142831369809828]],
[[0.63382548359210156, 1.8316536982639513], [1.8531716676608954, 1.1984792323144307]], [[-0.67139321496560267,
-1.9444641678876049], [1.3214444196665345, -0.88862664035176742]], [[0.55313233986365051, 0.95656834615927522],
[1.6730133005925627, -1.123282253884367]], [[1.4277473713864239, -0.83409694079570218], [-1.5265466476819478,
-2.0239308296843248]]])
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_mult_overloaded_expandedData_rank3_Symbol_rank3(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*numpy.array([[[3.8465426754336836, 0.6522330066175801], [1.1213564084832672, 2.2991484731703036]],
[[-3.146805277366008, -2.9092773950846063], [-0.7507570800585075, -4.3691574327961602]], [[2.6060037046129327,
-3.4798742840305676], [-2.2473704638275493, 0.50713868800830042]], [[-1.9023463196744839, -1.0711886388204297],
[4.0144718529996783, -1.1676181449431819]], [[2.4659181377476571, -3.3224615922640091], [0.19896825249062911,
-0.68785028874854248]], [[2.1633557767317377, 1.5521876431045971], [4.2492954867619375,
-4.7770400998496516]]])+(1.-msk_arg0)*numpy.array([[[3.5872129335758469, -1.4939601669977023], [2.6202996623479304,
-3.4258031677768743]], [[0.71987276551639834, 3.6960268147998683], [1.885662175199954, -0.80213652437160476]],
[[2.0323879500914588, -3.054629290525348], [0.64860207732057251, -4.2079981974529375]], [[1.2861885512240159,
0.023168662849575306], [-2.5160035334539876, 2.6814734060795251]], [[-1.257956004629972, -4.3314818687128174],
[-2.8595244901616734, -2.299231449232094]], [[-0.19237535961527463, -1.5019438253752684], [4.7223884225278852,
-0.33487628154856086]]])
arg1=Symbol(shape=(6, 2, 2))
res=arg0*arg1
s1=numpy.array([[[3.7703715218725122, 1.8825293253796147], [-3.5569112589449747, 3.7865091325165103]],
[[1.3234025154170119, -1.0706127854081848], [-0.91030140341539401, -4.0779906813336346]], [[2.4564691769773113,
-4.2202902733308969], [-1.8831049422439405, 0.41091633330696098]], [[2.5279194148611168, 0.406501938106687],
[-0.33008527781400687, -3.1142412235060757]], [[-2.6625713201810175, 2.3132871914260473], [-4.863897694906,
4.1604892545389323]], [[-0.92121754594451399, -3.0027046624253173], [2.5790377788725909, -2.626459339743711]]])
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([[[14.502894961122463, 1.2278477619381107], [-3.9885652346242333, 8.705746690670745]],
[[-4.1644900195937034, 3.1147095754765983], [0.68341522360130269, 17.817383296222328]], [[6.4015677754703555,
14.686079593308524], [4.232034427486715, 0.20839157015447368]], [[-4.8089781952947206, -0.43544025775836859],
[-1.3251180568739098, 3.6362445602957494]], [[-6.5656829114810957, -7.6858078453893226], [-0.96776122464864589,
-2.8617937350698126]], [[-1.9929212996456993, -4.6607610729091382], [10.959093593951833,
12.546701586580348]]])+(1.-msk_ref)*numpy.array([[[13.525125487647125, -2.8124238253222011], [-9.3201733708150698,
-12.971834980991126]], [[0.95268142866460237, -3.9570135631362282], [-1.7165209244518427, 3.2711052715447542]],
[[4.9924983550597704, 12.891422283435785], [-1.2213857773520564, -1.7291351898596623]], [[3.2513810098112819,
0.009418106351692759], [0.83049572532118243, -8.350755020948105]], [[3.3493975799772628, -10.019961526787521],
[13.908434576224618, -9.5659277382281029]], [[0.17721955668497666, 4.5098937271552346], [12.179218148209955,
0.87953893733186228]]])
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_mult_overloaded_expandedData_rank4_Symbol_rank0(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*numpy.array([[[[-0.71566615256646759, 3.7745591573508115, -1.8813261752186392,
-4.2234672474302322], [-4.5183089034463952, 0.90004730988224502, -4.3059651797874992, 2.1824530065115431],
[-3.2409114328950315, 1.425264269049185, -1.0363956947212296, -4.6538419691451374]], [[-0.021160056002790029,
-0.67358505413946013, -0.23398482533746634, -3.2333582167367658], [-4.3340836442891923, 1.8672903891253885,
3.3960333447618343, 1.2033623796151725], [-4.0570605641925273, -0.94056101372721734, -1.7965062790901385,
1.9686802501306877]]], [[[1.5047353385318765, 0.5056728068922558, 2.409658435923566, -2.1000430599662478],
[-4.064887735516467, -2.1275738111117271, -0.62603398290270729, 2.4810787925563362], [3.254883998023141,
3.9921703272954172, 1.7173288579737775, -2.2709735667109623]], [[-1.0333361509972105, 4.6923051335910078,
-4.2187417938772569, -0.26001229923624436], [4.0524307618320652, 3.1529491855521723, 3.8891703598304161,
4.9134254224440408], [-2.2621317317299292, -1.4221931139149491, 2.8628397330646003, 2.486038796267632]]],
[[[1.5100124197717211, -2.2988431057193734, -2.9336531752144488, -2.6131669437218363], [-2.8974794045596344,
-0.85417257745919439, 4.3573210605245887, 4.2307931931872957], [-0.12423354634863593, -4.6765195903139816,
-1.5778735854722301, 0.1126115177128213]], [[-4.1436398962257615, -0.76648956746585917, 2.1159176768980608,
-3.2536216330349363], [-2.2856244596599904, 3.705073565316475, 2.2220301188746312, 0.98343571363908122],
[2.4860129265223163, -3.1262241511848043, -3.8305865546189102,
3.1860325494154687]]]])+(1.-msk_arg0)*numpy.array([[[[2.4089227637145711, -2.6908075329445138, -0.26014840610314227,
-3.9215812800091445], [4.009158789021523, 2.7447580688873892, 4.7131775334041954, -4.4290013383016644],
[-1.7082394800006173, -0.49659550318463985, 3.9472706938122357, -1.6162682425555799]], [[-3.8246426595124583,
-2.6800405903617683, 4.0143006613192842, 2.2139414823010402], [-2.5044774188651475, -1.6949103444811264,
0.41642158825732167, 3.2060207528109235], [-3.5084114918483778, -2.1345709077268706, 1.4430246021304205,
1.2294065151503952]]], [[[3.5126626365828901, 3.8348878052528992, -1.5523954598502341, -1.2410335237730994],
[-2.1674583781546621, 3.3708587151697049, 0.56105884392031768, 2.2747443626383266], [-2.0719480048860914,
1.733990254553726, -0.050490856134826956, -3.7318225551204542]], [[1.4746408782374925, 4.0027176844782097,
2.8083560519562889, 2.9394382256580087], [-4.6069170484521784, -4.8378490055724885, 1.636720298987302,
-1.5376228376365288], [-2.6819306123720734, 1.1516574074480923, -3.6392748736610949, 3.6118499182970254]]],
[[[1.9375861833692003, 2.9438381921906851, 0.57660731138154819, 0.51102545141293199], [-3.0370737652416144,
4.5219314530432726, -2.0670940828628037, 2.8140667234989287], [-1.3644739955281162, -2.7108624230624456,
-2.1786469944211206, 2.8077953462368104]], [[4.022901264470141, 4.7164854778488916, -0.17421287538805963,
-3.832102145875953], [-4.8385573319832922, -4.2618026800252213, -4.5775319725945369, -2.3564870023253039],
[-4.2941599674913693, -3.8000625353557038, 1.1131824136314092, 1.8132425713432623]]]])
arg1=Symbol(shape=())
res=arg0*arg1
s1=numpy.array(0.717965207903)
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([[[[-0.51382339801631816, 2.7100021501484619, -1.350726738523665, -3.0323025403715427],
[-3.2439885912315676, 0.64620265396188303, -3.0915331855279722, 1.5669253265579597], [-2.3268616507127589,
1.0232901572442055, -0.7440960504300036, -3.3412966169236569]], [[-0.015192184007276179, -0.48361063343539723,
-0.16799296376949385, -2.3214387043033553], [-3.1117212647398351, 1.3406495324431462, 2.438233786416474,
0.86397232106270994], [-2.9128283314443832, -0.67529008376584743, -1.2898290041654794, 1.4134439250790429]]],
[[[1.080347620167597, 0.36305548193114656, 1.7300509199223877, -1.5077578521533153], [-2.9184479681312658,
-1.5275239736231971, -0.44947061868890531, 1.7813282511207205], [2.3368934663398959, 2.8662393990196935,
1.2329823705524707, -1.6304800089652003]], [[-0.74189940448410086, 3.3689118307816321, -3.0289098291289465,
-0.18667978447841244], [2.9095042944301057, 2.2637078175116527, 2.7922890059647112, 3.5276685049395078],
[-1.6241318790748014, -1.0210851747097522, 2.0554193241418712, 1.7848893612164991]]], [[[1.0841363808970814,
-1.6504893683335269, -2.1062609118572939, -1.8761629480337436], [-2.0802894030884849, -0.61326619216028622,
3.1284049211184035, 3.037562314540097], [-0.089195363932689647, -3.3575783599208848, -1.1328583368377678,
0.08085115172692571]], [[-2.9749892795677022, -0.55031284166088568, 1.5191552747991404, -2.3359871321986905],
[-1.6409988403673095, 2.6601139126172866, 1.5953403162639133, 0.70607262660183523], [1.7848707876394252,
-2.2445201726558786, -2.7502278720763003, 2.2874605217258854]]]])+(1.-msk_ref)*numpy.array([[[[1.7295227328719085,
-1.9319061898166923, -0.18677750447340177, -2.8155589190091503], [2.8784365234748241, 1.9706407975713816,
3.383897487652936, -3.1798688666551458], [-1.2264565134062644, -0.356538293687512, 2.8340030243311856,
-1.1604243647929684]], [[-2.7459603621904449, -1.924175899646795, 2.8821282088880955, 1.5895329566247054],
[-1.7981276507231641, -1.2168866578518498, 0.29897621218834564, 2.3018113563323048], [-2.5189173861531855,
-1.532547645549204, 1.0360414584772961, 0.88267110424690254]]], [[[2.5219695601663243, 2.7533160203819742,
-1.1145659290786001, -0.89101889190998895], [-1.5561597050922877, 2.4201592782474872, 0.40282072952090631,
1.6331873092471489], [-1.4875865800916528, 1.2449446736119429, -0.036250678022026972, -2.6793187566430881]],
[[1.0587408447256197, 2.8738120345122611, 2.0163019367076376, 2.1104143768017321], [-3.3076061564825183,
-3.473407267087782, 1.1751082297410078, -1.1039597002996688], [-1.9255328696923644, 0.82684994997116834,
-2.6128727412832053, 2.5931825775035184]]], [[[1.3911194669720908, 2.1135733996881303, 0.41398398819427723,
0.36689849446726314], [-2.1805132972775692, 3.2465894558060273, -1.4841016329570593, 2.0204020001890131],
[-0.97964485587718853, -1.9463049031696793, -1.5641927422961792, 2.0158993695091798]], [[2.8883031427173891,
3.386272476673903, -0.12507878329731756, -2.7513160138682586], [-3.4739158208065777, -3.0598260472046448,
-3.2865086943851498, -1.6918756805445263], [-3.0830574538274456, -2.7283126882399662, 0.7992262430365179,
1.3018450797125141]]]])
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_mult_overloaded_expandedData_rank4_Symbol_rank4(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*numpy.array([[[[2.9251174606479946, -4.9654026519140189, 0.30085621199683832,
-0.30061228591591238], [-2.513978591770071, -3.8844686856929558, -2.1891972059679632, 4.5141042306447474],
[-2.236512896721278, 1.8541668723658136, 0.26092160236246542, -1.1184332157429031]], [[1.8254032188716751,
3.8918830207374366, -3.02810273755754, 3.0709862855640271], [-3.2428568515005187, 2.0131925931621293,
1.9979305988685745, 4.1731959115852639], [-0.67035244538272032, -2.00829150675079, -4.7874365846650164,
4.1003641988688031]]], [[[-1.175586228847092, -1.8341477845829668, 2.4832839618398461, 0.89234831426067096],
[-1.7369861501359631, 1.8284772242403307, -0.27353996503704359, 4.8231831031252348], [0.099524325829133176,
1.2133893916939442, -4.6692295366623551, -4.4818711372137141]], [[3.8966804335291769, -0.47007955430217407,
1.9640723963394606, 4.8351918103493343], [2.1596571322083662, 3.4394328531876912, 2.846261179352954,
-1.8012535813987718], [0.41896290835312833, -4.2874267756908147, -0.37745703724418522, -2.6740921738817813]]],
[[[0.48734325359850583, 4.9758075524770824, -2.053696707710202, -1.2492068717010851], [-0.81009221983657476,
-0.032340552500626174, -2.7423954288910823, -4.1769441535542455], [-4.1686249915574001, 3.0106427920402847,
-3.5225347400306015, 3.9203298909772801]], [[-3.7843028776929879, 4.7534900290748308, 4.7905989355194496,
4.9295960701557782], [0.2236860439332089, 1.1116309427796969, -4.6113096924535757, 4.4416902722827007],
[-0.78621657417830626, 1.5380907655682377, 4.5160456196047676,
-3.7405412441349561]]]])+(1.-msk_arg0)*numpy.array([[[[4.3843342392813938, -1.6479745021651206, 1.0928655318928948,
-0.27867216892680435], [-1.8534416490446235, -0.60606977099904125, 3.7896814633860565, 1.6774705245027342],
[-2.1712907977922269, -0.9731023323951602, -2.2363323585193129, 3.0179076547691572]], [[3.5949000961561293,
-4.4826056152295042, 4.4507316837878168, -0.14917077276962942], [2.865875878248751, 0.65613479495564864,
-3.5114334284145352, -2.2657040605205969], [-3.3885348683628269, -4.1955144800795416, 0.19874114120673436,
0.77237878029658802]]], [[[3.6482369572987956, 4.1192696915374132, 4.7177908974828782, 3.267869957103418],
[-3.9817588302026605, 3.9472020063214721, -2.2489247733823725, 0.35633644710087964], [0.30626607261116678,
1.1534627023800335, 2.6743275744867319, 2.9389573367973432]], [[1.9302124685612068, -3.5741196455396427,
0.86533807292409826, 3.713340528874534], [-0.75399962875646054, -2.6842573138405124, -3.0230491895158327,
1.9153830313206104], [1.1778385254967532, 0.11792305960293703, 4.5139117168026992, 2.8139119131046897]]],
[[[-4.417619341991335, -4.8560564315233137, 4.580720581716939, -0.80247960909749949], [-3.4934853556245651,
1.0770893472402108, 3.378803127896246, 2.2030520956731934], [3.9240010232090405, -0.580967720625301,
-1.2967054817990578, 1.780069501914376]], [[-0.19831352998526164, -3.5200058893139854, 0.76880268223223958,
-3.5461945196094549], [2.6005628531204348, 4.7941479926695827, 4.9792519595550839, -2.3841553080010258],
[0.78378064155146721, 0.72888520432430148, -0.39840096436977745, 2.3135172058068862]]]])
arg1=Symbol(shape=(3, 2, 3, 4))
res=arg0*arg1
s1=numpy.array([[[[0.6868381582863794, 0.54627711634776066, -3.2186962400562225, -1.0774604195563131],
[-1.8950462499252776, -3.9532997580259446, 0.94174368416758636, -1.45380459144517], [2.0866672501914945,
4.3633150084832497, -4.2883759710766967, 1.1324613195015356]], [[-1.6473475256931112, -2.4240842974365195,
3.0575003656751942, -0.88291292348152517], [-0.26167034327218541, -0.11708643329371515, 2.69348684719085,
-4.9379016274841749], [0.9686171163405044, 4.8295378077612252, 3.7003121978510229, 4.056897346655898]]],
[[[-1.6908031207422525, 4.8230189206260157, 0.82952788955472112, 3.4446585748455014], [3.3176619789094879,
3.0037317679930418, 1.4804656022305664, 1.0055708904902172], [0.88162105416707792, -0.50313800229601746,
3.9994903610131427, 4.5365056130602301]], [[-4.8084467422206547, -0.19193357971699321, -3.9848748508964316,
-3.0854097037447001], [-1.4115589742338477, 1.453415069972718, 3.991034445603626, -4.9809560423233554],
[0.17116217177604565, 3.3177075206574909, 1.7537041099136621, -1.9103533234598826]]], [[[2.5308867325681046,
-1.042247247233361, -1.1846149285407979, 3.7060742981010737], [-1.297359352173022, 4.2498337462445868,
1.493118867126439, 3.1157447558047586], [0.15917981571003992, -4.2811882943532051, -2.892893263308518,
-0.15525299846753171]], [[0.70528939883089059, -3.5415574610175469, 0.91408910363181572, -4.9096466754450141],
[3.8808985862028376, -2.4998339203436348, -0.7493422147604818, -2.7415281675633221], [-2.6740604084639994,
1.5889649415442406, -3.0729275401598812, -3.7916787182652412]]]])
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([[[[2.0090822894427993, -2.7124858421931139, -0.96836475835178126, 0.3238978397067413],
[4.7641057027263036, 15.356469115209322, -2.061662642117656, -6.562625456773401], [-4.6668582162192029,
8.0903141424262, -1.1189299299060254, -1.2665823552745537]], [[-3.0070734760004947, -9.4342525180294281,
-9.2584252273842349, -2.7114134793590052], [0.84855946551469918, -0.23571754026667907, 5.3813997896526438,
-20.606830883627179], [-0.64931485257841604, -9.6991197608586983, -17.715009990674201, 16.634756638713682]]],
[[[1.9876848644362788, -8.8461294682679377, 2.0599533040300941, 3.0738352724669484], [-5.7627329081984522,
5.4922551255024175, -0.40496650907269482, 4.8500525280070113], [0.087742741052748138, -0.61050231454407089,
-18.674538525238951, -20.332033570982652]], [[-18.736980336078339, 0.090224051608984959, -7.8265826975130048,
-14.918547731118739], [-3.0484834062368544, 4.9989235409822532, 11.35952640798204, 8.9719649100247967],
[0.071710601287329825, -14.224428057977713, -0.66194795753096181, 5.108460871613123]]], [[[1.2334105747390316,
-5.1860217243322069, 2.4328397786485927, -4.629653480222637], [1.050980717527584, -0.13744197138935588,
-4.0947223559985781, -13.014291841725987], [-0.66356095792037362, -12.889128679761718, 10.190317019204748,
-0.60864297055611438]], [[-2.6690287016020968, -16.834758078342482, 4.3790342868285039, -24.202574957127123],
[0.86810285165369627, -2.7788927376642603, 3.4554490178896384, -12.177018993055025], [2.1023906134884078,
2.4439723034008711, -13.877480957101886, 14.182930630179902]]]])+(1.-msk_ref)*numpy.array([[[[3.0113280542199465,
-0.90025075885739858, -3.5176021783907041, 0.30025823205054236], [3.5123576464773363, 2.3959754790373493,
3.5689085831507947, -2.4387143505360127], [-4.5307613983952022, -4.2459520117298588, 9.5902339496154969,
3.4176636848536646]], [[-5.922049778516727, 10.86621388347861, 13.608113750703422, 0.13170480308403179],
[-0.74991472483682653, -0.076824482901260024, -9.4579997542208236, 11.187823767842159], [-3.282192872812852,
-20.262395804553826, 0.73540426902211176, 3.1334614243985466]]], [[[-6.1684504326080232, 19.867315661446234,
3.9135391265494457, 11.25669626921629], [-13.210129880150486, 11.856336061073677, -3.3294557689967741,
0.35832155842535174], [0.27001061779106766, -0.58035091979845577, 10.695947356351342, 13.332596454925692]],
[[-9.2813238562468232, 0.6859935779052545, -3.4482639243184217, -11.457176901097965], [1.0643149425401714,
-3.9013400316202884, -12.065093446111812, -9.5404386832200192], [0.20160140002551957, 0.39123422170360572,
7.9160655295443281, -5.3755659751229006]]], [[[-11.180494182182111, 5.0612114481650323, -5.426389984575974,
-2.9740490540264393], [4.5323058977990254, 4.5774506556220018, 5.0449546985677118, 6.8641480138584354],
[0.62462175972042311, 2.4872322049381017, 3.7512305527917205, -0.27636112765281257]], [[-0.13986843034333699,
12.46630312012565, 0.70275415467140356, 17.410562133681889], [10.092520700006713, -11.98457377122277,
-3.7311636912234754, 6.5362289327304204], [-2.095876782493292, 1.1581730360816256, 1.2242572954381448,
-8.7721139535984367]]]])
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_quotient_overloaded_constData_rank0_Symbol_rank0(self):
arg0=Data(3.43231999068,self.functionspace)
arg1=Symbol(shape=())
res=arg0/arg1
s1=numpy.array(-1.4540852929)
sub=res.substitute({arg1:s1})
ref=Data(-2.36046675352,self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_quotient_overloaded_constData_rank0_Symbol_rank1(self):
arg0=Data(-1.99888672106,self.functionspace)
arg1=Symbol(shape=(2,))
res=arg0/arg1
s1=numpy.array([-1.0166253896000677, 4.6638233509673803])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([1.966197914693572, -0.42859400338170606]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_quotient_overloaded_constData_rank0_Symbol_rank2(self):
arg0=Data(1.75603219348,self.functionspace)
arg1=Symbol(shape=(4, 5))
res=arg0/arg1
s1=numpy.array([[-1.9661246795683085, -1.3570192095877909, -1.8523576170966627, 0.73214584413366701,
4.0725024727547723], [1.8898911955656281, 3.704542947914863, -3.7748480598358505, -2.2414248653554889,
4.3999361819787648], [-3.981378759357713, 0.34597746243312777, 2.0496740598398322, -2.7919624238592222,
3.7661073743193256], [-2.3423350580789757, -0.53058380320099552, -0.56646103984835783, 1.7717941659157441,
-4.6013674001294715]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[-0.89314386403468715, -1.294036356358591, -0.94799847355391309, 2.3984732107050015,
0.43119241921436163], [0.92917105365848052, 0.47402128094425233, -0.46519281455793515, -0.78344459393917454,
0.39910401443456101], [-0.44106132564159456, 5.0755681631228065, 0.85673728710838137, -0.6289598235553272,
0.46627247153328438], [-0.74969299862837668, -3.3096226889880782, -3.1000052429972795, 0.99110394833928261,
-0.38163268454377053]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_quotient_overloaded_constData_rank0_Symbol_rank3(self):
arg0=Data(2.14054149252,self.functionspace)
arg1=Symbol(shape=(6, 2, 2))
res=arg0/arg1
s1=numpy.array([[[-1.3253901188041395, -2.78253495112182], [-4.300977060645458, 1.3835241184212173]],
[[-4.4915480214339922, 2.4570745420262323], [0.3669594834630967, 2.2625027935147521]], [[3.5861676332312769,
-0.23287586841902996], [-0.090748090278614413, 3.6431882097617247]], [[-2.6410132226145535, -3.1196251341148207],
[1.761434899422647, -2.1643804473513208]], [[1.5009114368679546, 3.1354061944070661], [4.4611923331014278,
-2.1935651672343202]], [[-0.031809360087940775, -2.6612631796034103], [-4.8635727802285782, -4.7669299996057299]]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[-1.6150275018307814, -0.76927748622185721], [-0.49768726090397136, 1.5471660117975328]],
[[-0.47657099118358148, 0.8711748284031503], [5.8331821058896702, 0.94609451915777054]], [[0.59688829732556048,
-9.1917703068822263], [-23.587730451975027, 0.58754622854454441]], [[-0.81050010435175179, -0.68615343206318313],
[1.2152260030870663, -0.98898578350352317]], [[1.4261610911502236, 0.68269989908859252], [0.47981376562515166,
-0.97582762732427908]], [[-67.292818422177291, -0.80433288557445193], [-0.44011708866066374,
-0.44903984172213457]]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_quotient_overloaded_constData_rank0_Symbol_rank4(self):
arg0=Data(-3.54974920415,self.functionspace)
arg1=Symbol(shape=(3, 2, 3, 4))
res=arg0/arg1
s1=numpy.array([[[[-4.6125564005877369, -0.69438643429163349, -1.2362005816145647, -0.30244075141731752],
[-1.1937784959185995, -0.0033389287251761601, -1.6183053769253828, -3.6545432537969038], [-4.6417500711286301,
1.7794083989940725, -4.5916358946296647, -4.2537071645246352]], [[0.99190852437837673, 4.9497457601229851,
2.1159345761594448, 4.5666165220115857], [3.2506291129568652, 0.42673325762989656, 2.0122314690593255,
4.7533864244368562], [2.9256051481580858, -2.1428797577186054, 1.0170937184162421, -0.51756150989596517]]],
[[[2.7697508281062682, 3.2216955050655187, 4.1307453643983649, -0.6109132252804681], [1.1663708850914603,
-1.2817940773962224, -0.0804200958300747, -0.059735834319028847], [-1.7817187949510171, -0.93987586758695407,
4.4482533799407129, 2.9112844484768345]], [[-1.2926950097177148, 4.8418522299498115, 4.081508190552551,
4.5792771623348134], [3.4344906345489221, 3.5115707226638531, -1.2336261997761913, -4.9918490638148807],
[2.9678262816222762, -1.7062580143959103, 4.1797990849162208, 3.392276837834407]]], [[[-3.2392133388383293,
-4.3004082192842166, 4.5627762584330434, -0.079623098707676476], [-1.9104374497872612, 4.7603569268729746,
-1.5974727715246164, 2.6969700051872891], [0.14884820101978846, -1.787020268808436, -2.4205499696318755,
2.1558951788757899]], [[-2.1125147886516817, 1.5310662008239184, -1.5322830724155088, 2.2222574246117084],
[1.1894588535421828, 3.3001015528617934, 1.1758339668590718, -0.75094141518856361], [-1.3255264932208934,
-0.45235303559717899, -2.4004740260439119, 3.7531870834047236]]]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[[0.76958391309789775, 5.1120658884563053, 2.8714993803941131, 11.737006959260613],
[2.9735409175867016, 1063.1401555185284, 2.1934977506490343, 0.97132499402245065], [0.76474371729496249,
-1.994904152501356, 0.77309030716063942, 0.83450718793092105]], [[-3.5787062182711575, -0.7171578857134806,
-1.6776271082029788, -0.77732587946434517], [-1.0920191386953129, -8.3184264190342141, -1.7640859208948241,
-0.74678321667685821], [-1.213338446025066, 1.6565321462218994, -3.4900905785521159, 6.858603540403533]]],
[[[-1.2816131935507342, -1.1018264136283962, -0.85934834781714486, 5.8105620524410613], [-3.043413762742126,
2.7693599672108506, 44.140076774461704, 59.4241169411154], [1.992317314162168, 3.776827692429984, -0.79800966827936781,
-1.2193068959669231]], [[2.7460067358999258, -0.73313869064233306, -0.8697150755119526, -0.77517675351608806],
[-1.0335591451147692, -1.0108721949521922, 2.8774917432793443, 0.71110908177906096], [-1.1960771511898256,
2.0804293220599646, -0.84926311816258404, -1.046420847661474]]], [[[1.0958676792255069, 0.82544470737243281,
-0.77798011629176977, 44.581902259059319], [1.8580818778151313, -0.74568971585096155, 2.2221031039929464,
-1.3161989926923772], [-23.848116267640666, 1.9864067946559978, 1.4665052358695663, -1.6465314450028652]],
[[1.6803428895353212, -2.3184818541737942, 2.3166406182071824, -1.597361837937973], [-2.9843396377924201,
-1.0756484754448787, -3.0189204464226163, 4.7270654306071433], [2.6779918940160439, 7.8472982931634876,
1.478770095254565, -0.94579596627218787]]]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_quotient_overloaded_constData_rank1_Symbol_rank0(self):
arg0=Data(numpy.array([3.9883962094707375, 1.3750251611114059]),self.functionspace)
arg1=Symbol(shape=())
res=arg0/arg1
s1=numpy.array(3.45871616654)
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([1.1531435415409816, 0.39755362825459506]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_quotient_overloaded_constData_rank1_Symbol_rank1(self):
arg0=Data(numpy.array([-1.2334048137186624, 1.377686460692952]),self.functionspace)
arg1=Symbol(shape=(2,))
res=arg0/arg1
s1=numpy.array([1.4844200870995552, 1.9547802909252443])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([-0.83090011004138475, 0.70477816207204547]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_quotient_overloaded_constData_rank2_Symbol_rank0(self):
arg0=Data(numpy.array([[-2.2020384928131884, -4.3740172257969014, 3.3041701150346512, -2.2405366435075358,
4.0852398702255694], [0.83572753098331365, -1.3774478891852282, -3.7695866211336106, 3.7407792775265367,
-3.8975656101620961], [3.4429597608670139, -4.1953097446124099, -3.4659921023383311, -1.6700250836404607,
1.8624190364059556], [1.5519146057464788, -3.9581877480458152, 1.023175615935096, 0.66728877091972283,
-2.159854941490873]]),self.functionspace)
arg1=Symbol(shape=())
res=arg0/arg1
s1=numpy.array(0.937332179697)
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[-2.3492615963795287, -4.6664537082363049, 3.5250791412093454, -2.3903336426916342,
4.3583693793011333], [0.8916023039487998, -1.4695408085000394, -4.0216122979493001, 3.9908789632457138,
-4.1581476605459811], [3.6731479356437879, -4.4757982660618616, -3.6977201651801295, -1.7816790245909124,
1.9869359835781315], [1.6556719585237416, -4.2228228516861241, 1.0915827260573738, 0.71190212538668518,
-2.3042577522397214]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_quotient_overloaded_constData_rank2_Symbol_rank2(self):
arg0=Data(numpy.array([[3.5835048539727268, 3.8571075798310233, 3.9190114681880495, 2.1261827038253269,
3.3617327213332029], [0.75329476297362508, 4.5554315239695242, -4.0352341285409521, -0.2623106802883246,
1.1321991848119719], [3.601642681322538, 0.35254643689157916, 4.411383875853911, -4.8746220027908125,
-2.380461998817911], [3.3624236591334427, 3.869796930431475, -3.0974921018602899, 3.9594635826095743,
-2.2054920280196821]]),self.functionspace)
arg1=Symbol(shape=(4, 5))
res=arg0/arg1
s1=numpy.array([[-0.59646319056279751, -3.6970890099277609, -1.8190897467441278, 1.7073115435483661,
-0.87921199151292839], [3.1618107337278349, -3.9520052671591479, -4.9928921272070461, -2.192093820543024,
4.5495573770272415], [1.8431133075959449, 3.1217428492223966, 4.9262597144433933, 3.4883151619796173,
-3.9635009262862084], [1.1267839216226863, -1.8588755230890319, -1.9192169107766244, -2.7631759225358712,
-2.8239106079831644]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[-6.0079228872304471, -1.0432823146734, -2.1543804945316398, 1.2453396170486881,
-3.8235746939124522], [0.23824789856585635, -1.1526886266637346, 0.80819573620514118, 0.11966215945234733,
0.24885919463923109], [1.9541081204716173, 0.11293256809394019, 0.89548341572818257, -1.3974144469287193,
0.60059579727369927], [2.9840891359997417, -2.0817945485670526, 1.613935394414, -1.4329393761421547,
0.78100631860823788]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_quotient_overloaded_constData_rank3_Symbol_rank0(self):
arg0=Data(numpy.array([[[4.0802610727283497, -2.8024539989346686], [1.8071267457266043, -0.13841814957606147]],
[[-1.7988661169618148, 0.027850264656436252], [-0.50642743316112071, 1.8452982570747869]], [[2.7405547448954763,
-3.706784288688544], [-3.7888483700042452, -0.58588961440206511]], [[0.7192047060638771, 3.626558913441384],
[-3.4305483052647547, 0.32455136413554975]], [[-4.74732548737156, -3.237460084387326], [3.9516124747522348,
2.0449010242900272]], [[2.5313399910511016, -4.0403085459172958], [0.34682792129693585,
4.8072404359213934]]]),self.functionspace)
arg1=Symbol(shape=())
res=arg0/arg1
s1=numpy.array(-3.09807610149)
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[-1.3170306148280686, 0.9045788118592859], [-0.58330611854821657, 0.0446787441759516]],
[[0.58063974480716518, -0.0089895353581049327], [0.16346513661097703, -0.59562715589453463]], [[-0.8845989107815041,
1.1964794173089723], [1.222968140835246, 0.1891140163149812]], [[-0.2321455905225138, -1.1705841931056031],
[1.1073156994483895, -0.10475900316959676]], [[1.5323463116641085, 1.044990496789675], [-1.2755052959650881,
-0.66005513011999017]], [[-0.81706837021672774, 1.3041346995883329], [-0.11194945183245521,
-1.5516857166971336]]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_quotient_overloaded_constData_rank3_Symbol_rank3(self):
arg0=Data(numpy.array([[[1.1578386437899475, 4.996126396500328], [-0.91635300388975871, -1.4064554437689711]],
[[1.9840437948610754, -4.7599852474548303], [3.8573711058777711, 3.2212451543838636]], [[-0.7067030968790311,
-1.8450312080991926], [-3.4904314203564146, 4.1106376595074803]], [[-1.5537756306237585, -3.2824613533646483],
[4.7975800514366824, -2.7680700289656945]], [[1.2578485281438132, 3.6884342068649723], [0.30686143258336962,
-0.1053444702926134]], [[2.8155471322537089, 3.9876357168096845], [-1.6272658547215357,
-4.1924212908247949]]]),self.functionspace)
arg1=Symbol(shape=(6, 2, 2))
res=arg0/arg1
s1=numpy.array([[[-3.55725957171527, -1.1670775665522681], [2.7862539310451577, 1.0863835234493537]],
[[4.9340858930189082, -1.5697941944512426], [-2.287640013757879, -2.7787914055587528]], [[3.7139854530312846,
-0.35242947963774185], [-2.1390766886024091, 0.4158275961222877]], [[3.5822790102073565, -2.1203769144295173],
[-0.76525921225763494, 0.97800877770561545]], [[1.6695611656178597, 2.611002263099742], [0.74990774985468001,
-0.34139195816695178]], [[-2.9013746777617277, -2.2708713146155524], [-4.3105323025833497, -0.54392296428618003]]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[-0.3254861278598376, -4.2808863263987469], [-0.32888352123240383, -1.2946214788893002]],
[[0.40210969932003821, 3.0322352218398869], [-1.6861792426603492, -1.1592252473287548]], [[-0.19028160067300032,
5.2351784249027027], [1.6317467433282764, 9.8854373731814871]], [[-0.43373942291943912, 1.5480555985244666],
[-6.2692222120176346, -2.8303120504291575]], [[0.75340068638834046, 1.4126507123307199], [0.40919890832283623,
0.30857337957883746]], [[-0.97041831716328664, -1.7559936977251271], [0.37750925883244102,
7.707748277050114]]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_quotient_overloaded_constData_rank4_Symbol_rank0(self):
arg0=Data(numpy.array([[[[0.572941583844365, -2.332247190999662, 1.4999835530459045, 1.5952831694610961],
[3.2104897575397366, -4.9371677957474267, 2.2024000914918265, 1.4710857974423694], [-0.87463651673330123,
0.53658487579484948, -1.7854982541271172, 3.5567809651315283]], [[1.0317521940803989, -0.36264412704591908,
-3.5507235577725971, 1.3909848991573099], [1.5627242518273841, -3.3326845915707382, -4.9229613174667284,
-4.9895568338934826], [4.1835909015322699, 1.6315284699855113, 0.71636527590831811, -4.6681291758637924]]],
[[[-0.51377242935229006, -1.8235520070656133, -2.0006476650985006, 0.44472827017699412], [3.9164591306406944,
-1.6610530974168092, 4.9476376200326353, -1.3836826771010848], [1.4986466482116088, -3.8631080673395788,
-1.8710856814872723, -1.4352468456143597]], [[1.9960625823999054, -4.210241298814994, 2.6483546110070852,
-0.86544321226839926], [-0.93317484961197739, -0.44349168612483147, -2.5628503186817273, -3.8695270931809622],
[-4.9604568128480784, 2.1396464175683629, 1.7039271741506079, -3.8283222528970318]]], [[[-1.441363355020906,
-3.3811365967582629, -3.351110695562276, 3.7482593597594853], [-2.3746596093071446, -2.8131334479206336,
-1.2166137380924189, 0.66777797821446239], [-1.3602489200986803, 1.2310997543301729, 4.3550327489506273,
2.8106898161846896]], [[0.0025102013600246664, -4.1702593487397666, -0.76776964329102171, 1.6998502005468374],
[-1.2721136787178864, 0.48196474656392496, 3.8101051719028369, 3.2695331075470051], [2.8814364694283192,
1.768178124165547, 2.7195217125632869, 1.3866661750201548]]]]),self.functionspace)
arg1=Symbol(shape=())
res=arg0/arg1
s1=numpy.array(-3.38430915471)
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[[-0.16929351239873056, 0.68913538461830237, -0.44321705981189319, -0.47137631242701161],
[-0.94863962208351837, 1.4588406584763218, -0.65076799748788894, -0.43467831400514534], [0.25843871725357914,
-0.15855078577787907, 0.52758130906604084, -1.0509621912582798]], [[-0.30486345866006281, 0.1071545507422134,
1.0491723407800817, -0.4110099980732112], [-0.46175576177832739, 0.98474590801808515, 1.4546429100936369,
1.4743206385109813], [-1.2361727934067384, -0.48208612020977609, -0.21167252847185297, 1.3793447827791936]]],
[[[0.15181013490950382, 0.53882548068242941, 0.59115393235071689, -0.13140887839924492], [-1.1572403558900153,
0.49081009490641131, -1.4619342955545513, 0.40885232815524336], [-0.44282202946059263, 1.1414761154314166,
0.55287079163030139, 0.424088574655207]], [[-0.58979912624731723, 1.2440474869008653, -0.78253920961085932,
0.25572226788544083], [0.27573569876520421, 0.13104349096103143, 0.75727429189307016, 1.1433728174015798],
[1.4657221270526268, -0.6322254616098969, -0.5034785819664972, 1.1311975584638549]]], [[[0.4258958886820684,
0.99906256851586073, 0.99019047680574546, -1.1075404723415438], [0.70166746025578719, 0.83122827121273091,
0.35948658425561852, -0.19731589157117033], [0.40192809164754195, -0.36376693087158457, -1.286830649879354,
-0.83050622378023153]], [[-0.00074171751021319377, 1.2322335691269641, 0.22686155672925484, -0.50227391258867338],
[0.37588577773597637, -0.14241156009429243, -1.1258147520589403, -0.96608582670258014], [-0.85141053541651612,
-0.52246353490009168, -0.80356775585259854, -0.40973389593840998]]]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_quotient_overloaded_constData_rank4_Symbol_rank4(self):
arg0=Data(numpy.array([[[[-0.16841036162188416, -2.6003560333643625, 3.5545184910572907, 1.5139147914100795],
[-0.94587340061535929, 0.16930195740283693, 2.4177976471661742, 2.0737743000066882], [4.6768686667116981,
-3.6339789866072092, 2.5135388182835854, -3.8110207055172829]], [[-3.2003418240897084, 4.104104479479858,
3.4425399383215289, 0.8930435704987314], [-1.2104858360688286, -0.54602956579953776, 0.96613869262303176,
-1.1447665449499578], [-0.73618243678854167, -0.75634063494127179, -3.4905332722260507, -1.9294737289274213]]],
[[[-1.7695622917331444, -4.4618511183236436, 1.8912341467374274, -4.274667165997097], [1.0133441322095447,
2.2698437785116283, -1.3999716834553122, -0.82282963781866947], [-1.8934325312597222, -1.8347274000416256,
2.4812263519307525, -0.32095149255687971]], [[1.2294222774888874, -4.7301199904748072, -2.4933570004059069,
1.739157374421735], [-4.4527415960373729, -0.24978137738396544, -3.5733206736544734, -2.6055729246625714],
[4.2634437530410096, 0.63371633316309506, 0.35411662272234246, -2.3278320513076158]]], [[[0.57519577741460104,
-4.1358797158873672, -1.3160752022537814, 3.3667114590653], [4.242351859563227, -4.1169328622744894,
-1.1012574518657403, -0.017794823889186517], [0.56080815754529567, -0.32077975391617564, 0.1045421322530995,
-4.5192026724964922]], [[3.7259353163717144, -0.29314631925237755, 0.96174008183977833, 4.8927047445339618],
[0.19844208500697569, -3.4269276368232626, -1.2296494241640721, 0.74453382811172109], [0.95832215119780528,
-1.2319028512539818, 1.7600259249758201, 0.51820978084550617]]]]),self.functionspace)
arg1=Symbol(shape=(3, 2, 3, 4))
res=arg0/arg1
s1=numpy.array([[[[1.5896505942866401, 0.6891827619618871, 4.6694699071013428, -1.9615140167099101],
[2.3734917018897859, -3.8358457650850006, -2.4207628518454594, -3.6880303333340336], [-3.955550166922265,
-4.8219514951045745, 3.0332138878682766, -0.43173075751425927]], [[3.6491269792507204, 3.5362867736239174,
-3.727772234197988, 0.50203076783315659], [3.0857009585880579, -2.4481643817771017, 3.9495322686066388,
3.9076676035973037], [-2.5661687086956961, 0.51956619090885425, 0.12707753474037187, -3.8065370519275588]]],
[[[-2.7343322214319321, 2.1071856335550851, -2.4809811090224079, -3.7158191036172306], [4.1081487764099194,
-3.9067998805821045, 3.9727737524493261, 0.18115530713208194], [-1.8658727247580997, 1.4129364110321907,
-0.60367775527123779, -4.9370475380777705]], [[0.85946374601468811, 1.2407326725323173, -0.072139226735624007,
-2.4294954216721942], [4.3152150682384747, -1.395544165290584, 3.1948762392822694, 3.0548315771279171],
[1.108458866445222, 4.708696707365803, 3.5605248213884586, -4.016164807819588]]], [[[2.3052524453408676,
-1.4229694054870725, -0.33634510639310111, 4.763489013156013], [1.6475037257860015, 4.4709813704960855,
3.9425928470200127, 4.19691117466847], [1.4825715910251436, 2.8996239189360375, -4.7982559296213081,
-0.63001732867952054]], [[-2.2623123840312722, -3.1391124653197631, -1.6105305432525729, -3.7485403170057396],
[1.8614704354355895, -1.3878572051796354, -1.1974399594213905, -1.1968610566940283], [-3.0520711866611725,
0.39756329287125247, 4.6967348695454127, 1.9766807360255321]]]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[[-0.10594174734194262, -3.7731008041494896, 0.76122527005722196, -0.77180931592291213],
[-0.39851557090435591, -0.044136800010019503, -0.9987750949346299, -0.56229860184798586], [-1.1823560489312861,
0.75363242253609575, 0.82867180199088575, 8.8273087779515276]], [[-0.87701574713271235, 1.160568908067954,
-0.92348451623203176, 1.7788622286105038], [-0.39228877078960939, 0.22303631645975486, 0.24462104039572191,
-0.29295392061907044], [0.28687998349209093, -1.4557156492770218, -27.467744628170902, 0.50688426320462898]]],
[[[0.64716433426163888, -2.1174456807566329, -0.7622928444959578, 1.1503970044816891], [0.24666685345682601,
-0.58099822051121464, -0.35239149538585995, -4.5421227279791312], [1.0147704643172784, -1.2985208574965552,
-4.1101835048003634, 0.06500879120192582]], [[1.4304527482278198, -3.8123603054803912, 34.563123466010623,
-0.71585126644309238], [-1.0318701445059233, 0.17898493189712508, -1.1184535506318147, -0.85293505022370875],
[3.8462805270471452, 0.13458423265439332, 0.099456299418312022, 0.57961566885284688]]], [[[0.24951530951724002,
2.9065134499302072, 3.9128715632794941, 0.70677426771993568], [2.5750180671301726, -0.92081190260421242,
-0.2793231496623142, -0.0042399810595448685], [0.37826716830417445, -0.11062805483887708, -0.021787527340449774,
7.1731402721389852]], [[-1.6469588119976495, 0.093385096103116669, -0.59715730686949942, -1.3052293241552109],
[0.10660501570659631, 2.4692220669630798, 1.0268986052196265, -0.62207206421125749], [-0.3139907599095571,
-3.0986332826580223, 0.37473393194667798, 0.26216159817870188]]]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_quotient_overloaded_taggedData_rank0_Symbol_rank0(self):
arg0=Data(4.14743871401,self.functionspace)
arg0.setTaggedValue(1,-1.21319155942)
arg1=Symbol(shape=())
res=arg0/arg1
s1=numpy.array(1.68574433632)
sub=res.substitute({arg1:s1})
ref=Data(2.46030114096,self.functionspace)
ref.setTaggedValue(1,-0.719677078714)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_quotient_overloaded_taggedData_rank0_Symbol_rank1(self):
arg0=Data(4.5956316483,self.functionspace)
arg0.setTaggedValue(1,-2.78021827332)
arg1=Symbol(shape=(2,))
res=arg0/arg1
s1=numpy.array([-3.1585434961186776, -0.83641144105439835])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([-1.4549844426550103, -5.4944629194795436]),self.functionspace)
ref.setTaggedValue(1,numpy.array([0.88022162010414895, 3.323984030895371]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_quotient_overloaded_taggedData_rank0_Symbol_rank2(self):
arg0=Data(0.921054948699,self.functionspace)
arg0.setTaggedValue(1,0.0394823576247)
arg1=Symbol(shape=(4, 5))
res=arg0/arg1
s1=numpy.array([[-3.4132432835484785, 1.7587931443963667, -3.0756623852847209, -2.3252904186343857,
1.7356444015043548], [2.2713204269584573, -0.88162393927914628, 4.5884799751444127, -0.28536103244470201,
-0.96415679848996039], [-4.9028675399283088, -3.5992342473478436, 2.9789784497386043, -1.4541535741896947,
-1.7122334747232371], [-3.706048617922951, -1.0579893940366314, -1.4083276675366196, 2.9524896646883523,
-3.7595222430650335]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[-0.2698474360553203, 0.52368577375522674, -0.29946555678715542, -0.39610318836626168,
0.53067030775444735], [0.40551519625612281, -1.0447254295881458, 0.20073204060776384, -3.2276829839304844,
-0.95529580887790655], [-0.18786045945513499, -0.25590302975620283, 0.30918483105489369, -0.63339592533191935,
-0.53792602603303119], [-0.24852748672649719, -0.87057106043797383, -0.6540061449688519, 0.31195873764246379,
-0.24499255201844189]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[-0.011567402128943166, 0.022448551013809145, -0.012837025875668295,
-0.016979538258239214, 0.022747953204296133], [0.017382997641416348, -0.044783672341068809, 0.0086046703567616654,
-0.13835931727073011, -0.040950141809460834], [-0.0080529113428377385, -0.010969654907516195, 0.013253656678242865,
-0.027151435945629462, -0.023058980102641378], [-0.010653491547244584, -0.037318292458565619, -0.028034922933652444,
0.013372564211460894, -0.010501961438730782]]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_quotient_overloaded_taggedData_rank0_Symbol_rank3(self):
arg0=Data(4.34795846919,self.functionspace)
arg0.setTaggedValue(1,-3.37152427446)
arg1=Symbol(shape=(6, 2, 2))
res=arg0/arg1
s1=numpy.array([[[-0.7964283399534855, -4.3563885775398905], [2.2670305253314744, 3.660342998126211]],
[[0.98970416783162918, -0.36362420293442987], [-1.5082103582776121, -1.0358411207289953]], [[0.15017273952708887,
1.5640231629694314], [-1.220547262103826, 1.2530487741570369]], [[-0.17882026193937506, -3.7724811453653215],
[-1.2436803428756193, -0.22774847919000152]], [[2.699497971812451, 1.4296834805633196], [-4.8957592859528791,
-2.146219757066874]], [[-0.72865666362676773, 4.5771813067946603], [2.6396990597187395, 4.5439155054068241]]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[-5.459321637707057, -0.99806488604065802], [1.9179090976533884, 1.1878554745872083]],
[[4.393190016282583, -11.95728566498973], [-2.8828594402153405, -4.1975148332894978]], [[28.953047556323426,
2.7799834248849646], [-3.5623024230104048, 3.4699036133818302]], [[-24.314685718698094, -1.1525461100137917],
[-3.4960418037466843, -19.091053800468391]], [[1.6106544678275652, 3.0412035449119168], [-0.88810707700979552,
-2.0258682527148579]], [[-5.9670880487799733, 0.94992052482972167], [1.6471417274567848,
0.95687485033943109]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[4.2333052521197194, 0.77392643343262635], [-1.4871984460681718,
-0.92109517501174554]], [[-3.4065980361069386, 9.2720018284097723], [2.2354469692890762, 3.2548662212688542]],
[[-22.450974025508192, -2.1556741321279111], [2.7623053847588577, -2.6906568555004609]], [[18.854263146114384,
0.89371534132229213], [2.7109251133345817, 14.803718059734445]], [[-1.2489449185243982, -2.358231259085152],
[0.68866218241893562, 1.5709128868843167]], [[4.6270410232452646, -0.73659399715217178], [-1.2772381238114758,
-0.74198656873137026]]]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_quotient_overloaded_taggedData_rank0_Symbol_rank4(self):
arg0=Data(3.7553962803,self.functionspace)
arg0.setTaggedValue(1,-2.45958468224)
arg1=Symbol(shape=(3, 2, 3, 4))
res=arg0/arg1
s1=numpy.array([[[[0.30209630840780743, 2.8852270658365136, -4.3903415927390474, 4.4845847234931409],
[4.0134940566526396, -2.8333839823358611, 4.6472411139448848, -2.1676319616841253], [1.9906320590044926,
-4.6737915213906014, -1.1957812424640659, 2.9170526831938375]], [[3.0964720689860776, -4.4876401431960087,
4.998637079152628, 2.8533787134807831], [3.7729255177559757, -0.73273448472817293, -0.90953567432588045,
-3.0134320952733864], [1.2174883207447538, -4.969203447194892, 2.3012148245460748, 0.41593532301908454]]],
[[[4.3069241488554848, 0.71234169023258431, 4.8556620140326565, 1.8082145124352156], [-3.5374500772725535,
-1.7901952523193723, 4.6526679694301283, -4.4376188651696005], [2.7452568592588422, 0.53836096412756973,
-2.8204068732647301, -4.8795271769078621]], [[-3.393174985031282, -0.64606900578155368, -4.4624221196219267,
-2.3987951994472487], [1.4375945710104423, 3.8286850742584093, 1.170192112555676, 4.7719612212908586],
[-2.396723494110776, -0.28740503184500277, -2.524991852119526, -3.3537880662496655]]], [[[0.024623766163323957,
4.327465668810266, 0.68752296336131913, -4.9843602135399125], [2.0216870258996078, 3.7344481342322595,
3.0194814105721566, -1.7317988533225126], [-1.6115633973347188, 4.7297708204656868, -2.137484390559826,
2.23042822074654]], [[-2.2883581231955787, 0.20442903998765516, -4.1920530024020906, 1.7748267430388616],
[3.2690041254837414, 1.4972269287755724, -0.53465270543508492, -1.4714871726382519], [-3.1922247493051148,
-2.1523014413616917, -3.9955444358316869, 3.5074148206456552]]]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[[12.431122710808555, 1.3015947080098942, -0.85537678583155596, 0.8374011222547626],
[0.93569249817995759, -1.3254102880908314, 0.80809155114224152, -1.7324879622931901], [1.8865346126183558,
-0.80350102547628632, -3.1405378734334621, 1.28739405425753]], [[1.2127983707372514, -0.83683097585119681,
0.75128404419717099, 1.3161226242269759], [0.99535394023185464, -5.1251802099813597, -4.1289158702687541,
-1.2462189827307262], [3.0845439880708629, -0.75573405681742334, 1.6319190369549657, 9.0287986435992362]]],
[[[0.87194390950620149, 5.2719029811010483, 0.77340561790478524, 2.0768533016816324], [-1.0616111035537803,
-2.0977579263682209, 0.80714899601135559, -0.84626381724113975], [1.367958071986602, 6.9756102885086664,
-1.3315086968117844, -0.76962298684838992]], [[-1.1067499603958972, -5.8126860237735665, -0.84156007200365257,
-1.5655343487285083], [2.612277728386359, 0.98085797276682929, 3.2092134616238446, 0.78697124853915967],
[-1.5668875819540604, -13.066564131434559, -1.4872904548770602, -1.1197476424022073]]], [[[152.5110438180339,
0.86780498511318693, 5.462212144798019, -0.75343597160138298], [1.8575557107454101, 1.0056094355349154,
1.2437222720268766, -2.1684944952439889], [-2.3302814437896617, 0.7939911726906822, -1.7569233707087559,
1.6837108880566276]], [[-1.6410876611635838, 18.370170307146662, -0.89583702261107834, 2.1159227485324705],
[1.148789091767819, 2.5082345288638859, -7.0239919149828065, -2.552109423805994], [-1.1764197621477614,
-1.7448282141760787, -0.93989601182298488, 1.0707020618702912]]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[[-8.14172372777319, -0.85247525623201714, 0.56022626720078217,
-0.54845316431455637], [-0.61282878397679552, 0.86807319359829038, -0.52925695524084293, 1.1346874034490511],
[-1.2355797602629599, 0.52625040526085098, 2.0568851516421454, -0.84317458385549982]], [[-0.79431838151277434,
0.54807974876632271, -0.49205106177732016, -0.86199026810434565], [-0.65190385303429499, 3.3567202492850878,
2.7042201330470084, 0.8162071035529318], [-2.0202121370100472, 0.49496558319119838, -1.0688201101440833,
-5.9133825528054009]]], [[[-0.57107685142078468, -3.4528158550336459, -0.50653951513273332, -1.3602283718672756],
[0.69529876846570882, 1.373919788386156, -0.52863963179770801, 0.55425775781276576], [-0.89593972743977257,
-4.5686534613860141, 0.87206732672207277, 0.50406209312172789]], [[0.72486231717691918, 3.8069999647501405,
0.55117705503954395, 1.0253416726877276], [-1.710902873337762, -0.64240976589408783, -2.101864006641482,
-0.51542428116613925], [1.0262279684244842, 8.55790403684715, 0.97409608675446258, 0.73337510708806808]]],
[[[-99.88661628455435, -0.5683660762380438, -3.5774582280300198, 0.49346045969051788], [-1.2166001219412634,
-0.65862065660798974, -0.81457189092959847, 1.4202484760385075], [1.526210316208666, -0.52002195784918936,
1.1506912953840782, -1.1027410159889803]], [[1.0748250709997458, -12.031483796945189, 0.5867255687910351,
-1.3858167800791799], [-0.75239571068836386, -1.6427601153607181, 4.6003408516094462, 1.671495836301927],
[0.77049232914190546, 1.1427696116211459, 0.61558186168052509, -0.70125286229581629]]]]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_quotient_overloaded_taggedData_rank1_Symbol_rank0(self):
arg0=Data(numpy.array([-0.24327728711711405, -3.6056280310023405]),self.functionspace)
arg0.setTaggedValue(1,numpy.array([1.2730035138874616, -4.9781403275646703]))
arg1=Symbol(shape=())
res=arg0/arg1
s1=numpy.array(3.2936567863)
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([-0.073862367241458157, -1.0947188079822001]),self.functionspace)
ref.setTaggedValue(1,numpy.array([0.38650156846396272, -1.5114326265776077]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_quotient_overloaded_taggedData_rank1_Symbol_rank1(self):
arg0=Data(numpy.array([-0.60463512658968011, -0.5520360523301111]),self.functionspace)
arg0.setTaggedValue(1,numpy.array([0.11065887825991361, 3.0681695057704825]))
arg1=Symbol(shape=(2,))
res=arg0/arg1
s1=numpy.array([2.8726661800393423, -0.60963867151620033])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([-0.21047872906047141, 0.90551350844783396]),self.functionspace)
ref.setTaggedValue(1,numpy.array([0.038521314808112543, -5.032767193294676]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_quotient_overloaded_taggedData_rank2_Symbol_rank0(self):
arg0=Data(numpy.array([[4.6512814709921759, 2.098614110043922, 1.895016528010288, 2.6977915277647391,
3.6655404112999896], [-0.48363850300250633, 2.3419850697805096, 2.1662412645385327, -1.9056051572421295,
2.497215246339004], [4.0815840480205914, -1.2641264348648407, 4.6318841341169641, -3.2591414662330562,
3.1161047733410481], [1.1685908100777258, -3.1575041951677232, -3.924192536790263, 3.2452072591922736,
-2.7913771333657267]]),self.functionspace)
arg0.setTaggedValue(1,numpy.array([[1.6044809956172026, -0.70199805786625014, 2.4770184375160351,
0.12526292043311571, 2.8453902277172185], [-2.6270109040291056, -2.5010942322759599, 4.1068958534289663,
-2.6324008007168609, -0.32418555844239805], [3.1579467104423316, 1.086387193462234, 2.7518638232041726,
1.2174922150915348, -2.6071828689039336], [2.8178725134119729, 2.8056937296183913, 3.6699288938229007,
3.6037027182613244, 0.085370105743285407]]))
arg1=Symbol(shape=())
res=arg0/arg1
s1=numpy.array(-1.90169499965)
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[-2.4458609145305559, -1.1035492602308241, -0.99648814786972451, -1.4186247154601728,
-1.9275122519554746], [0.25431970063157006, -1.2315250711689083, -1.1391107748308005, 1.0020561433862574,
-1.3131523439899424], [-2.1462874166364903, 0.66473668758670812, -2.4356608893536356, 1.7138087163503437,
-1.6385933464205495], [-0.61449959656795783, 1.6603630948998709, 2.0635236131561041, -1.7064814598537041,
1.4678363953659475]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[-0.84371100303462776, 0.36914334738060162, -1.302531919144355,
-0.065869090709308312, -1.4962390016522069], [1.3814049595325335, 1.3151920958627077, -2.1595975454495164,
1.3842392188058443, 0.17047189928077702], [-1.6605957900874224, -0.57127309777041291, -1.4470584524419186,
-0.64021423799201049, 1.3709784531112286], [-1.4817689029716252, -1.4753647299597337, -1.9298199209159168,
-1.8949951064352, -0.044891586589426921]]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_quotient_overloaded_taggedData_rank2_Symbol_rank2(self):
arg0=Data(numpy.array([[-4.6443072321046044, 0.38417837617679673, 3.4279882680004921, 0.40710770023483178,
2.8688000401894911], [-0.63862918185682371, -1.0613480573447367, 2.5252641176738813, -2.3743507769422569,
2.2710813572310951], [2.0244163796382626, -0.45931604591878106, -4.2337337609916501, -3.5735365306689681,
1.5100285975528873], [1.9953319494326704, -1.458603065344283, -2.6914617917833263, 2.8267445415419745,
-2.1176995723917171]]),self.functionspace)
arg0.setTaggedValue(1,numpy.array([[2.558345522140046, 3.8739768438957185, -2.5523095986149191,
-1.1599373064608165, -1.9295185061392859], [2.6353186995223199, -3.9407961905622004, 3.7986447608802525,
3.1282445710581239, 0.37606431107885818], [4.6848458369965034, -2.5423454969446944, -0.10041273851269938,
0.39611652325263069, 2.5670890012870329], [2.034125316253621, -4.3475018844119351, -0.69889862294772787,
-4.8039063806121574, 3.7327807333050149]]))
arg1=Symbol(shape=(4, 5))
res=arg0/arg1
s1=numpy.array([[-1.6000167654125255, -0.94021602909627866, 2.5871609076265356, -4.2349329811309033,
4.7526174859280115], [-4.0174826369961654, 1.9310149690886282, 1.3522987503107187, 4.9900274807319445,
-3.1685192513299363], [-2.6079018141064014, 1.653494600107277, 1.6560499898502972, 4.7083848545021212,
-0.40099662839500461], [-2.6239863890404425, -1.1141605513112127, -0.20010931379470431, 3.50058742814422,
-0.89214598784888999]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[2.9026616048658607, -0.40860649498399121, 1.325000025276871, -0.096130848362591351,
0.60362527568938573], [0.15896252443652648, -0.54963222674843171, 1.8673862688208871, -0.47581917857373879,
-0.71676426023854656], [-0.77626249910483291, -0.27778502928826082, -2.5565253385704674, -0.75897290495529057,
-3.7656890123909541], [-0.76042008364316904, 1.3091498021786081, 13.449957629381233, 0.80750576855054512,
2.3737141692446966]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[-1.5989491969357201, -4.1203050405546966, -0.98652912970782747,
0.27389744102894042, -0.40599070130352011], [-0.65596268550216397, -2.0407900786093434, 2.8090277832523585,
0.62689926721591294, -0.11868771538030298], [-1.7964042249043672, -1.5375589958260218, -0.060633881300754963,
0.084130022394806395, -6.4017720337496291], [-0.77520421780749937, 3.9020425550837601, 3.4925841765903027,
-1.3723143555819919, -4.1840469879883226]]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_quotient_overloaded_taggedData_rank3_Symbol_rank0(self):
arg0=Data(numpy.array([[[2.6594060153757653, 3.0985169336341105], [3.3661217116389821, -0.59504905070450942]],
[[-3.9749579000522637, -4.7342067957921277], [1.1895841648424295, 0.56731685724203196]], [[0.20933515875849551,
-0.47657164361986748], [3.6005053961204521, -2.7248219369010487]], [[-4.5417601893012947, 4.6881992583085577],
[0.95219997618841479, 0.045100875229370452]], [[1.6875122232100468, 0.83285808001415162], [-0.98095523355696734,
1.3721074229557644]], [[1.7539561105027381, -0.13330491004776146], [-3.3240848531330958,
3.9526032719641933]]]),self.functionspace)
arg0.setTaggedValue(1,numpy.array([[[4.6963315268174881, 1.5142976851258796], [4.5399300108016529,
4.2558918208130496]], [[-3.5130795594275552, -2.2965838545821149], [3.5486672192586823, -0.16210635982120003]],
[[1.2552491212091041, -0.47844908361541538], [4.446197400169801, 1.9447260435327358]], [[-4.5600746555661678,
-1.6955914419145826], [-2.0546718268066022, 4.4738577557187309]], [[1.0326084184938722, -1.7805867888057159],
[2.4947632536764397, 0.81887034400060177]], [[3.2736611846490149, 3.9295086162843287], [-0.83952910002973979,
-0.69339229981178008]]]))
arg1=Symbol(shape=())
res=arg0/arg1
s1=numpy.array(3.40074527236)
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[0.78200682567782176, 0.9111287925088859], [0.98981883147769634, -0.17497607231601478]],
[[-1.1688490556353195, -1.3921086163884109], [0.34980101994451862, 0.16682133232772797]], [[0.061555671475896641,
-0.14013741267054175], [1.0587401018787967, -0.8012425861615583]], [[-1.3355190776025851, 1.3785799531693437],
[0.27999744171609176, 0.013262056289824579]], [[0.49621835452551266, 0.244904576295006], [-0.28845301691082309,
0.40347256647195434]], [[0.51575639162366171, -0.039198734210198434], [-0.97745775908348687,
1.1622756059065873]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[1.3809712726765795, 0.44528406682920152], [1.3349809077741897,
1.2514585715682693]], [[-1.0330322556006573, -0.67531781143652669], [1.0434969205434828, -0.047667892428998465]],
[[0.36911000991810128, -0.14068947989263214], [1.3074185344927436, 0.57185289922762761]], [[-1.3409045048539983,
-0.49859407456847055], [-0.6041828076649105, 1.3155521503132983]], [[0.30364180078027336, -0.52358722756407616],
[0.73359309618224211, 0.24079143788171639]], [[0.96263051845069647, 1.1554845487028675], [-0.24686621101955039,
-0.20389421855490472]]]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_quotient_overloaded_taggedData_rank3_Symbol_rank3(self):
arg0=Data(numpy.array([[[0.37903407908351117, 4.7562512290710508], [-2.0567133710230481, -2.0597757209890579]],
[[3.1071466326239037, 3.7378759300098636], [-2.2870602196502565, -3.6754721739328113]], [[3.300935107148554,
2.1910214216355826], [-2.2941648800534375, -2.0181827356997148]], [[-3.5358995232538684, 0.077598647462692405],
[1.0254061925828246, 1.3424636637528886]], [[-2.5177374705422064, 3.3774425425435926], [3.7215367528674541,
-2.5394983441996635]], [[4.9947523199127613, 0.074465686539016751], [1.6903291082153435,
-1.548003996221603]]]),self.functionspace)
arg0.setTaggedValue(1,numpy.array([[[-4.1180675764846963, -3.2588333429017147], [2.3910605684413362,
-2.5464559979058099]], [[-0.47366800112006935, -2.9943461140704954], [0.68865869503993338, 3.872833966837911]],
[[2.8476798086608648, -3.2531482133842138], [-2.5572122994418356, 2.4221777629945427]], [[4.3150298961939555,
3.6112269569743987], [0.73197953589044573, -3.7189842913799733]], [[-2.7850398483841654, -1.3377438559149857],
[-1.4556441129183781, 3.6956035182385243]], [[2.2019800882262039, 3.8700529933760066], [1.7561308799879001,
-3.6205253465229568]]]))
arg1=Symbol(shape=(6, 2, 2))
res=arg0/arg1
s1=numpy.array([[[-4.6296995406344124, -2.608733545501265], [-3.2421078749876298, -0.82737098161092959]],
[[-1.4709282084188935, -3.8180830267668897], [-3.8279718065351354, -2.1824375379963898]], [[3.32588617695232,
4.0812665637977101], [3.7989606569590251, 3.748273907432111]], [[-2.888051956795227, -2.7465995899159523],
[4.8196962245729438, 3.6666593093549196]], [[0.56914384990489708, -1.5780278215915988], [-0.33162965001135003,
3.2519356905168753]], [[1.400833424519293, 1.2066487950482028], [1.140741354057325, -0.80768829487616767]]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[-0.08187012477953845, -1.8232031543708858], [0.63437536637515357, 2.4895431031175148]],
[[-2.1123713685277594, -0.97899283588263286], [0.59746004809799647, 1.6841133411346645]], [[0.99249791830620315,
0.5368483992373162], [-0.60389277152711029, -0.53842989747842174]], [[1.2243199139594205, -0.028252624717338932],
[0.21275328253155254, 0.366127188399478]], [[-4.42372779915467, -2.1402934069547039], [-11.221966289021758,
-0.78091899283408828]], [[3.5655576405358471, 0.061712808933805818], [1.4817812137723205,
1.9165858983494843]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[0.8894891645431473, 1.2492013024946684], [-0.73750185392904588,
3.0777680804658418]], [[0.32201979566984912, 0.78425379780336379], [-0.17990171554143927, -1.7745451585264647]],
[[0.85621685684695914, -0.79709280502302848], [-0.67313471508515732, 0.64621151570375557]], [[-1.4940970456023919,
-1.3147992048906205], [0.15187254585849005, -1.0142704782774759]], [[-4.8933847723199335, 0.84773147698101881],
[4.3893666108219174, 1.1364319193074606]], [[1.5719071587557463, 3.207273739681153], [1.5394645541180672,
4.482577461492177]]]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_quotient_overloaded_taggedData_rank4_Symbol_rank0(self):
arg0=Data(numpy.array([[[[-0.18987391835747758, 4.5085269431466148, 2.3332490022782091, 3.5140443207840626],
[-3.8071577551437374, 4.4720793176524474, 1.5612407831467943, -3.8104362786352852], [-4.0532150845521837,
-0.57280649274337314, -0.56128092769382665, 1.5156021557578434]], [[1.7447218212497937, -2.3056218481816462,
-3.1637924745560939, -0.30131377200161236], [-2.7752874159121497, 2.6013067905592049, -3.746096460635143,
-2.9734953480155388], [-3.6161482942867931, 1.9377292214084427, -4.7468533933334172, 3.2118802123097385]]],
[[[-4.4843124086103083, 2.3827156971613297, 2.1743800991624589, 4.4296694534725685], [-3.4871548142175457,
2.955544818380722, 3.0587638749887915, -0.51089762690949225], [3.650413980256328, -3.6682136669452814,
-0.46817705349461569, -0.82910345294884724]], [[1.588186425815735, 1.5765955566881908, -4.3427217875858659,
-2.0163319014518422], [-1.0220958643238132, 1.9727101787163654, -0.065010798608873266, 1.73315365618957],
[2.738465567903118, 1.9645854229284678, -2.7935410540423575, 3.0569496206524445]]], [[[-0.21196663843321328,
-3.1770386067640386, 3.7992231608336073, -4.5172564500621428], [0.98964989530832703, -2.4738974761647881,
0.0114065763112281, -3.1306195317462948], [-1.7022084431469064, -4.8835685460571892, 3.3149881663472325,
-1.8527394999613223]], [[-4.688839386407393, -2.2094627892488683, -1.6723044389697153, 3.4908352481955269],
[-2.4719833755226062, -2.3617449879099341, -4.2339862392091119, 3.802095592491435], [-4.4231212186316329,
-1.5324562390597976, 3.2464993080986027, 4.0812450056830585]]]]),self.functionspace)
arg0.setTaggedValue(1,numpy.array([[[[0.34365977550907534, 3.4152342295544518, -1.2774419454539219,
-0.072154050581789697], [-3.2136192243029962, -4.3100480205097416, 0.92063187364718946, 0.65983261161884776],
[1.836360802359561, 4.7090663804834136, 3.6687227420314681, 0.14984304098941781]], [[1.3240710521065289,
4.8518181175906356, 4.2513462649612741, 2.7178860490952372], [3.6396852376664448, 3.5569808409457586,
3.2627252762282897, 2.4127574016450257], [4.0085015954616114, 0.70935755386425026, -1.9722472564280968,
-4.9450465085805737]]], [[[4.6143116204743464, -1.5718311397775753, -2.8294250621955639, 0.95996522458350686],
[2.3480451792784409, 2.218190909075064, -4.7734664792996711, 0.0048653152667395005], [4.094925294988629,
0.098020816683218825, 4.9363224824892118, -2.7422959820113979]], [[1.9859137713910258, -2.1627818319757597,
-4.0065126947434919, 1.1169402196268106], [0.60687417854650949, 4.7896845328736148, -1.0372629333665175,
-3.4736146138727317], [-0.77834611882985527, 3.7473488660222962, 4.4949031076591055, 1.1504392368842806]]],
[[[3.9105694358071688, 0.85300656236961014, -4.4195308596302096, -1.6714210722352862], [-1.7040870582658831,
4.4180235769979337, 1.0471725443882738, 1.0499359823818599], [-1.2249935138047685, 1.3155129039735147,
2.6341447922360262, -4.9309444752348908]], [[-4.8684443672971778, -2.2231549054199862, 2.0972135466603978,
2.2253917338571325], [4.4575262452243205, -1.0296343337965821, 2.3085756197272289, 3.1003426493793196],
[2.1929744355036931, -1.4027552428126278, -0.58004112631573967, 2.9911401359981511]]]]))
arg1=Symbol(shape=())
res=arg0/arg1
s1=numpy.array(-3.1632147744)
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[[0.060025616943284218, -1.4252990279490751, -0.73761953224333543, -1.1109091767094752],
[1.2035723233069942, -1.4137766913094203, -0.49356142231691552, 1.2046087763224773], [1.2813594313465078,
0.18108365495100007, 0.17744003102042699, -0.47913349672732097]], [[-0.55156603192733833, 0.72888564723490989,
1.0001826307086923, 0.095255552812962635], [0.87736294050361474, -0.82236173515993394, 1.1842687670000487,
0.94002322323480525], [1.1431877226776648, -0.61258224926441518, 1.500642141580049, -1.0153848035560178]]],
[[[1.4176439882937615, -0.75325764043739629, -0.6873956573419292, -1.4003694878140707], [1.1024084872260629,
-0.93434844902089775, -0.96697951076334454, 0.16151215246090378], [-1.1540202738679435, 1.1596473614855805,
0.14800672318666852, 0.26210785927619656]], [[-0.50207985833555446, -0.49841558956020954, 1.3728823672459796,
0.6374312353906596], [0.32311933814789817, -0.62364092210289068, 0.020552129161448576, -0.54790894068148144],
[-0.86572229937262646, -0.62107240988751689, 0.88313353764356695, -0.96640596313356297]]], [[[0.067009878731175476,
1.0043701845590174, -1.2010639276158939, 1.4280587226072361], [-0.31286206150703538, 0.78208330847024454,
-0.003606007534974082, 0.98969553287446743], [0.53812610415292728, 1.5438624609305427, -1.0479807419892684,
0.58571410166503546]], [[1.4823019367366943, 0.69848649137897711, 0.52867242923360025, -1.1035719978443639],
[0.78147819602019131, 0.74662808451185125, 1.3385073544404151, -1.2019720011621717], [1.3982993676015798,
0.48446164688593624, -1.0263290796352258, -1.2902206447417288]]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[[-0.10864256777323951, -1.0796719391910867, 0.40384293718917252,
0.022810354568944386], [1.0159345645167126, 1.3625530758742546, -0.29104311256316334, -0.20859557718270469],
[-0.58053623712851588, -1.4886963789477379, -1.1598082974708024, -0.047370492260626021]], [[-0.41858398703187571,
-1.5338250683628583, -1.3439954502512519, -0.85921641207896848], [-1.1506285526744569, -1.1244828741103539,
-1.0314586611802425, -0.76275484711676045], [-1.2672239735039237, -0.22425209935319021, 0.62349457659008678,
1.5632977401979911]]], [[[-1.4587411698434058, 0.49690939499221054, 0.89447769563233703, -0.30347772536740358],
[-0.74229710808168103, -0.70124574753092495, 1.5090554450906528, -0.001538091977223457], [-1.2945454504477103,
-0.030987720933937649, -1.5605397782151482, 0.86693322382179105]], [[-0.62781502775689713, 0.68372904978794213,
1.2665952142005037, -0.35310287137826013], [-0.19185361154036801, -1.5141825245748524, 0.32791416560174402,
1.0981279684151384], [0.24606173603165085, -1.1846646950278541, -1.4209920692190618, -0.36369305245877914]]],
[[[-1.2362642800784225, -0.26966444683838514, 1.3971643327533361, 0.52839316690162763], [0.53871999841966511,
-1.3966878293415304, -0.33104693138861208, -0.33192054832286222], [0.38726220037875464, -0.41587846472517831,
-0.83274294668644144, 1.5588396068268995]], [[1.5390811925568579, 0.70281503596024564, -0.66300068007811208,
-0.70352217366561076], [-1.4091759691118937, 0.32550250527704089, -0.72981943509196212, -0.98012397845078236],
[-0.6932739607981846, 0.44345874145667485, 0.18337076919658227, -0.94560134209205504]]]]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_quotient_overloaded_taggedData_rank4_Symbol_rank4(self):
arg0=Data(numpy.array([[[[3.1087373682977262, 1.7288468484290664, -4.7350025546766474, -1.2462687606986202],
[1.1147661257221184, 1.4226862562785945, -4.8699002989410065, -0.88238981212800027], [-2.5253953718962063,
-2.7446236822447503, -0.3879205662380496, -3.5863352294770778]], [[-3.8709898597864569, -4.5958808941745852,
-0.69941717063653019, -4.1924685416353791], [-3.532941685164098, 3.0050044088519066, -0.43457496025027353,
3.7774083718666578], [-3.4471087299766379, 1.6436932375732649, 2.6583331263073102, 1.6377907641557758]]],
[[[4.7414386788893221, -3.2160736850371494, 0.96698243106325243, -0.82828848269705979], [3.5200044289955326,
4.9118207264463223, -4.3585332820909706, -0.77115446561512169], [0.16582160103948596, -0.65994126459526647,
2.6619397454888905, -2.3437138040847803]], [[1.2215261759413565, -1.7584557798190015, -1.1089650453761957,
-1.5641210633931846], [2.7598302729800022, 1.8875202967455316, -3.630936106223853, 1.2007447478800914],
[2.6512287695864964, 1.7707580266874441, 0.1953947241778895, 4.503435455349651]]], [[[4.2589981026451031,
-3.9277495689426001, -2.8473369657351677, 1.4040000652368345], [0.26972497738921852, 0.60147023681782397,
-4.4745745609656007, 1.9306805685522557], [3.6376896663673826, 2.8396270545178259, -2.6836138158505385,
2.9279825131423012]], [[-0.3090571730995233, -0.57453652295428181, -1.6271798513695179, -4.4696813085601139],
[-2.6653810514006215, -2.3768146409366411, -1.3128180331345818, 4.581635378865748], [0.50247688944640778,
4.9532451154747754, -2.7018392025036428, 2.1300845231681196]]]]),self.functionspace)
arg0.setTaggedValue(1,numpy.array([[[[-3.4505138582863983, 1.4248257487998686, -3.9524119923215526,
0.81504856520297952], [-4.3718698276484877, -0.94288708981653535, 4.8282582970579551, -4.6722816105251033],
[3.6647072271001377, 4.7114077690304565, 0.092514361474126616, -3.8124864196234185]], [[4.165582354632777,
0.34102300070869251, 0.98343269313930293, -2.6743082425014606], [4.1326181041099233, -0.23439200672333627,
-1.5849546480857146, -2.0736002379586758], [1.6002261433791851, -1.9446791096418647, -3.7570143378185583,
1.076047727904327]]], [[[1.2380177953892098, -3.1484579868063811, -0.76442488916600748, -2.1818057802025415],
[-1.5383818327876098, -3.6057035623329137, 2.163451126002772, 2.188392374984657], [2.3709893728585962,
2.3471840573448759, -4.4074156468519838, 4.7188550456319831]], [[1.9267506253146127, -1.0414071109895739,
1.0685570204986226, 4.0226454234187585], [-3.2914506386974205, 0.15234686305474998, 4.7028886509160337,
-0.054270133945801824], [-2.3311208759539328, 3.6661719497716891, -3.8474697136079583, 1.8341420305410185]]],
[[[3.3674152480608264, 2.7014266389725128, -1.3030301583873838, -3.2387622104831584], [-4.0951687925040563,
4.7004938357073254, 4.1950637273295168, 2.0382289901206372], [1.7427744024426053, 1.1790502862054941,
-2.2495124667079791, -3.0538048367909409]], [[-4.4962463676429856, 1.5822806162106859, -1.8235905534694785,
-4.4062861037909364], [0.87041939490070597, -4.0113094886293554, -4.6982054748353583, 4.2373051220310582],
[-0.84614829051970197, 3.3176111131043982, -2.6687460434287589, 4.6770431583761969]]]]))
arg1=Symbol(shape=(3, 2, 3, 4))
res=arg0/arg1
s1=numpy.array([[[[-2.3182995299525277, -1.6619336920343253, -2.9660511871402528, 0.93254822596563614],
[-4.2541426675441816, -0.30333038370249721, 3.4789554983748676, 0.5140033510602251], [-3.2905733659402281,
3.7139824585796646, 0.78960211543734715, 2.971514572460773]], [[-0.70540352219700697, -1.3136347308463381,
0.73924055860583593, -4.9656858822166203], [2.1473770309669362, -2.1549075593317712, 1.8563623226892654,
-1.3846952158399581], [-2.4903294101424525, -0.88004658956592507, -0.26617618913608698, 3.100112111326629]]],
[[[-3.3182633250231328, -4.3300560926251794, -0.56051213669826971, -0.70765176831018195], [4.8954497928460068,
1.0868590622929659, 4.1265642365266171, -1.2873984102989136], [2.4610703924835473, 2.7606204885978327,
0.74181190652475859, -0.76191236317201927]], [[0.24802432047997591, 4.825490575588006, -1.2970889077321623,
-1.1984463553405069], [-0.17745571227021895, -1.9770464473025151, 0.90483935921876402, -0.059094745500082979],
[-0.07302552360214154, -1.2720098622673035, -0.68873120844511426, -2.3715151078623045]]], [[[0.24502664280811803,
1.2652825479404042, 1.2097368744797734, 3.0828413949333608], [-1.7791129684882536, -3.7837459912887872,
-0.93771394776597372, -2.4396818013745336], [3.7724216979832228, 1.3354698832167236, 4.4778423096546298,
3.3178355959010588]], [[4.1570198977003194, -1.4482811362101113, 4.7418722485913811, -1.7134079314039576],
[-3.9415515512997623, 2.479810994419001, 3.53014195265167, 4.0686466788459725], [-0.0011085503845311706,
-1.9052407432771523, 4.9068043014278278, -1.9049925369925189]]]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[[-1.3409558722385559, -1.0402622299045123, 1.5963994738883607, -1.3364121296870544],
[-0.2620424872505856, -4.6902200792187969, -1.3998167844388623, -1.7167005045938151], [0.76746362747472596,
-0.73899748123591691, -0.49128612835996144, -1.2069048096597954]], [[5.4876247962727875, 3.4985988009114131,
-0.94612932487848034, 0.84428790726567526], [-1.6452358548201755, -1.3944934184479576, -0.23410029116552836,
-2.7279709849905682], [1.3841978960443853, -1.8677343416375281, -9.9871184381116578, 0.52830049538270318]]],
[[[-1.4288916262714828, 0.74273256887241457, -1.7251766157273958, 1.1704746879597985], [0.7190359574598254,
4.5192802791594398, -1.0562136034406204, 0.59900218879101441], [0.067377837523838524, -0.23905541066619487,
3.5884295224641907, 3.0760936787104383]], [[4.925025794153826, -0.36440974285909289, 0.85496455853216458,
1.3051239685641005], [-15.552219974623853, -0.95471722443388563, -4.0127963811817313, -20.318976547219506],
[-36.305508523717684, -1.3920945734895036, -0.28370243976458448, -1.8989697516239186]]], [[[17.381775523816618,
-3.1042470121287091, -2.3536828758399353, 0.4554240343159735], [-0.15160643655945524, -0.1589615788698745,
4.7717905568386882, -0.79136572952443918], [0.96428500247258431, 2.126313060447357, -0.59930958490084085,
0.88249776955784298]], [[-0.074345848878542772, 0.39670234500032192, -0.34315134741406994, 2.6086498297563501],
[0.67622635825267541, -0.95846604692286597, -0.37188817071462443, 1.1260833737878855], [-453.27383983445719,
-2.5998001212984949, -0.55063113108412265, -1.1181589858251946]]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[[1.4883813820024607, -0.85733008219827367, 1.3325501628083192,
0.8740015181080979], [1.0276735336128884, 3.1084492041565728, 1.3878470993128227, -9.0899827810220994],
[-1.1136986839535203, 1.2685595103301153, 0.11716579738756713, -1.283011180546296]], [[-5.905247455610807,
-0.25960260695070153, 1.330328377807082, 0.53855767479752115], [1.9244958125723541, 0.10877125828823039,
-0.8537959582101573, 1.4975138313746732], [-0.64257609329187038, 2.2097456347181117, 14.114764923235574,
0.34709961745346513]]], [[[-0.37309208888073375, 0.72711713646590848, 1.363797211723011, 3.0831630441799445],
[-0.31424729041971439, -3.317544737333189, 0.52427419082751925, -1.6998563595216394], [0.96339762572412757,
0.85023786030692383, -5.9414193922821363, -6.1934354575719004]], [[7.7683939284098056, -0.2158137280918167,
-0.82381170182612518, -3.3565502581680677], [18.54801176355144, -0.07705780674126915, 5.1974846175750971,
0.91835802805387523], [31.922001527224523, -2.8821883057077038, 5.5863153381622421, -0.77340516383820268]]],
[[[13.743057528229171, 2.1350382516299056, -1.0771186576814313, -1.0505769825869253], [2.3018036881512924,
-1.2422857788364075, -4.4737136920314677, -0.83544870030685348], [0.46197762126495856, 0.8828729880194196,
-0.50236527129546937, -0.92042078292356966]], [[-1.0816032827098874, -1.0925231135380433, -0.38457184375036541,
2.5716503484260449], [-0.22083166579761651, -1.6175867828867223, -1.3308828760572351, 1.0414532045906044],
[763.29258672131175, -1.7413080865559627, -0.54388679056390776, -2.4551503838225086]]]]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_quotient_overloaded_expandedData_rank0_Symbol_rank0(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*(-4.16921840294)+(1.-msk_arg0)*(4.13078960119)
arg1=Symbol(shape=())
res=arg0/arg1
s1=numpy.array(-3.74029681078)
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*(1.11467581688)+(1.-msk_ref)*(-1.10440155158)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_quotient_overloaded_expandedData_rank0_Symbol_rank1(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*(0.893893990136)+(1.-msk_arg0)*(0.567432073109)
arg1=Symbol(shape=(2,))
res=arg0/arg1
s1=numpy.array([-0.38687332266391294, 1.9399003858649468])
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([-2.3105599113962496, 0.4607937586121677])+(1.-msk_ref)*numpy.array([-1.4667128485410923,
0.29250577877272954])
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_quotient_overloaded_expandedData_rank0_Symbol_rank2(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*(4.73905345325)+(1.-msk_arg0)*(-4.15656795792)
arg1=Symbol(shape=(4, 5))
res=arg0/arg1
s1=numpy.array([[4.4998386312884939, 0.78446231481643913, -1.6813079285045305, -3.1798436625479907,
-2.0944868186665833], [4.4953342126454849, 1.6650442515799515, -0.91898434961724451, -1.1670746485185877,
4.5503544844109296], [1.9273082092830283, 1.6079265632411186, 2.6574428427115047, -3.0448375461618085,
3.5797303717654643], [-0.075958261118915793, -1.5548980284162739, 3.7360078828652803, -1.3318521748477563,
2.4342937381441239]])
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([[1.0531607556532867, 6.0411486488736692, -2.8186707341949422, -1.4903416507741973,
-2.2626322643857333], [1.0542160446968702, 2.8462027052727157, -5.15683804106071, -4.0606258213740682,
1.0414690700430431], [2.4588975600371681, 2.9473071479664283, 1.783313408317817, -1.5564224302276781,
1.3238576543708847], [-62.39023094310253, -3.0478226653052989, 1.2684805819015268, -3.5582428311065093,
1.9467878419876474]])+(1.-msk_ref)*numpy.array([[-0.9237148925791, -5.2986203153641043, 2.472222897099563,
1.3071611057108705, 1.9845281053476089], [-0.92464047416795137, -2.4963708646060252, 4.5230018984057123,
3.5615270738680049, -0.91346025285808652], [-2.1566700841637609, -2.5850483802851589, -1.5641231830532274,
1.3651197789397753, -1.1611399536423701], [54.721736605010705, 2.6732093564730848, -1.1125693757198694,
3.1208928711619754, -1.7075046831007554]])
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_quotient_overloaded_expandedData_rank0_Symbol_rank3(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*(4.78787757499)+(1.-msk_arg0)*(-3.28976848057)
arg1=Symbol(shape=(6, 2, 2))
res=arg0/arg1
s1=numpy.array([[[1.740883116607046, -2.2503321522792152], [-1.5061945225102802, 2.4982952052110088]],
[[-2.7983305802801484, 2.1687543854796365], [1.643320759290833, -1.4653409727441757]], [[3.2175020834207952,
3.7650334271118187], [1.8014810554950147, 0.12236435291525538]], [[-3.1233989028943911, 0.7696710585002986],
[-4.2753667310345724, 0.88632267436679513]], [[-4.5134534247108533, 2.7836276968220837], [-1.1959116036265316,
-0.39370816860750679]], [[1.5027698132905005, -2.504876271197424], [-4.7995705146222427, 0.81056999509517258]]])
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([[[2.7502579175572066, -2.1276315010390197], [-3.1787909884378931, 1.9164578969705082]],
[[-1.7109763974029355, 2.2076624292018749], [2.9135380587879744, -3.2674153415800777]], [[1.4880728748121901,
1.2716693404400943], [2.6577451705005974, 39.128042284553942]], [[-1.532906210139654, 6.2206802790783859],
[-1.1198752940269061, 5.4019576768818869]], [[-1.0608013696955167, 1.7200136284231737], [-4.003538021097242,
-12.160981043203339]], [[3.1860352348351633, -1.9114227836496733], [-0.9975637529240533,
5.9068033654860717]]])+(1.-msk_ref)*numpy.array([[[-1.889712439158538, 1.4619035137730338], [2.1841591052200662,
-1.3168053453857391]], [[1.1756182431612747, -1.5168930620264134], [-2.0019028311874436, 2.2450532277225537]],
[[-1.0224604041517171, -0.87376873121071352], [-1.8261465867424562, -26.885023311092727]], [[1.0532655555218566,
-4.2742525449554005], [0.76947047763019516, -3.7117052014083791]], [[0.72888056461647699, -1.1818277581909278],
[2.7508458573337604, 8.3558552828834873]], [[-2.1891366538499089, 1.3133457003051041], [0.68542976304877656,
-4.0585865507976919]]])
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_quotient_overloaded_expandedData_rank0_Symbol_rank4(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*(1.75692828977)+(1.-msk_arg0)*(-2.14899285708)
arg1=Symbol(shape=(3, 2, 3, 4))
res=arg0/arg1
s1=numpy.array([[[[0.53229011632207346, -3.0692416214850793, 4.1551847515535112, 1.9956228948074202],
[2.6857183881485671, 2.8513187715775192, 2.8150572483190999, 1.8695234954963702], [-0.89878809174293561,
2.0113600086339014, 1.3054601278816813, 1.3445035108068506]], [[2.3491180092357453, -0.60141647759364947,
-2.7471836719998368, 2.7627957847036289], [4.5424699426220343, -3.5606662061522512, 4.2638313458612025,
-2.7733238430403837], [0.74945623900841163, -1.8138683468323045, -4.0112916371182106, -1.0801639118432984]]],
[[[-3.2970678697848332, -1.0955509383278814, -4.1774865059518689, -0.0073839501556669518], [3.4766781803964601,
2.9172484094685416, 1.016230261992658, 3.9024189052964413], [-4.0414297442027198, 2.9788951120154019,
3.2762553730841475, 2.115591154300632]], [[2.9124374952334513, -1.4781274260861421, 4.8479887494343945,
-4.3582251786045347], [2.0756121461581953, -2.2517472797592286, 2.3516485621871688, 1.7775284781206064],
[3.4727084749786208, 3.8831969404469557, -4.1994122244787384, -4.1535329385031163]]], [[[3.8977522324001956,
-4.4938918457123735, 0.012474134731923492, -0.027833866034599808], [0.81441408850560748, 0.26774810790526793,
-1.1937582797303259, -2.6618906938268347], [4.1680717142998951, -2.6383819679851594, 2.73950628932384,
1.8380370529340837]], [[0.47796984802075215, -1.5175649986704984, -4.2513663435369313, 3.4215268334350561],
[-1.6384162411134087, -3.5293107080529076, -4.9362183918195077, 2.0866175314723741], [0.68432046227346,
2.8697679941819176, 1.3674204155349967, -2.6241647447917482]]]])
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([[[[3.300696811560822, -0.57243075210204586, 0.42282795948188318, 0.88039092673325559],
[0.65417442778904566, 0.61618094310709759, 0.62411813856314535, 0.93977331336139813], [-1.9547747749556408,
0.87350264608417738, 1.3458306785826581, 1.3067487556914268]], [[0.74790976139219179, -2.9213171823946693,
-0.63953797763024534, 0.63592405182349598], [0.38677818718938206, -0.49342684431748218, 0.41205388939105542,
-0.63350996465075404], [2.3442706836281171, -0.96860849511920954, -0.43799565045632483, -1.6265385933616416]]],
[[[-0.53287598531730074, -1.603693838691949, -0.42057066785647906, -237.93880683514894], [0.50534682780712736,
0.60225529100200292, 1.7288683042409734, 0.45021519534590931], [-0.43472939058011706, 0.58979192744417874,
0.53626109374853115, 0.83046683485988038]], [[0.60325012730574579, -1.188617610879251, 0.36240354105082945,
-0.40312930557024584], [0.84646271367295245, -0.78025109902984713, 0.74710495352909456, 0.9884107688824314],
[0.50592449738540834, 0.45244377679371173, -0.41837480958128359, -0.42299611337686482]]], [[[0.45075422577276247,
-0.39095918417482473, 140.84570413315484, -63.121963998303322], [2.1572911306009535, 6.5618700483636143,
-1.4717621813408515, -0.66003021605811329], [0.42152064796339894, -0.66591127103226522, 0.64133026327282083,
0.95587207394159879]], [[3.6758140645167345, -1.1577285264939614, -0.41326203102693038, 0.51349247727679848],
[-1.0723332970476791, -0.49781060243882297, -0.35592596402972293, 0.84199824034348569], [2.5674057501257708,
0.61221962658011064, 1.2848486608869423, -0.66951905106439324]]]])+(1.-msk_ref)*numpy.array([[[[-4.037258613650617,
0.70017063565121707, -0.51718346730044429, -1.076853178360514], [-0.80015569263157726, -0.75368383167253838,
-0.76339223948840329, -1.1494869480160286], [2.3909894632838751, -1.068427754284504, -1.6461574054884893,
-1.5983542176046435]], [[-0.91480838707695511, 3.5732191204351396, 0.78225306847357656, -0.77783268274130046],
[-0.47308906480995322, 0.6035367351675005, -0.50400512655556962, 0.77487988374500394], [-2.8674027184371207,
1.1847567993759462, 0.53573588048219245, 1.9895062531899683]]], [[[0.65178908713899886, 1.9615636132468171,
0.51442245331507475, 291.03566678779583], [-0.61811670381217776, -0.73665062258920866, -2.1146711896460979,
-0.55068225867953946], [0.53174074352400413, -0.72140601675245186, -0.65592959411445284, -1.0157883543395652]],
[[-0.7378674600225622, 1.4538617030959151, -0.44327513287507087, 0.49308899127843331], [-1.0353537683136067,
0.95436680501382209, -0.91382398358183792, -1.2089780183744372], [-0.61882328233601869, -0.55340815571281099,
0.51173658174249104, 0.5173891453132069]]], [[[-0.55134157559292896, 0.47820306559730319, -172.27590556503543,
77.207846528074953], [-2.6386980375371829, -8.0261738314233622, 1.800190954543867, 0.80731822011553245],
[-0.51558442473760147, 0.81451165265637826, -0.78444530879788221, -1.1691782021760189]], [[-4.4960845668011187,
1.4160796137002765, 0.50548286913673068, -0.62808008286899497], [1.311628146228581, 0.60889874393290866,
0.43535206234873525, -1.029893032464839], [-3.1403311395116882, -0.74883853379060061, -1.5715670416121414,
0.81892452116337022]]]])
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_quotient_overloaded_expandedData_rank1_Symbol_rank0(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*numpy.array([2.013685465408555,
-0.23558936453305712])+(1.-msk_arg0)*numpy.array([-3.5034214026844568, 1.5861853637787142])
arg1=Symbol(shape=())
res=arg0/arg1
s1=numpy.array(2.67653295039)
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([0.75234846823535373,
-0.088020349048417421])+(1.-msk_ref)*numpy.array([-1.3089401354733579, 0.592626877074932])
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_quotient_overloaded_expandedData_rank1_Symbol_rank1(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*numpy.array([-0.240783536656898,
2.4522207164515661])+(1.-msk_arg0)*numpy.array([-2.6143091000360554, -1.7209346138742179])
arg1=Symbol(shape=(2,))
res=arg0/arg1
s1=numpy.array([-0.86125236712465636, -3.638203712883775])
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([0.27957372989379237,
-0.67401962890853218])+(1.-msk_ref)*numpy.array([3.0354739212666386, 0.47301766192474731])
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_quotient_overloaded_expandedData_rank2_Symbol_rank0(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*numpy.array([[-3.5524341460375641, 4.7094831286696373, -0.18763500262553379, 0.92078587043782267,
1.5633164400186814], [-3.2160743627609065, -1.475450853387378, 0.4531551970811698, 1.5869072036359357,
2.2501989264199054], [-3.0047858395011962, 3.8668179864794041, 4.7640266575428676, -4.5168020830691402,
-3.9906727379142404], [-4.1304645971682694, -0.88518480435952807, 1.012127262108268, -3.7146974028356574,
-4.6313643928370212]])+(1.-msk_arg0)*numpy.array([[1.7591260079289945, 1.3879718313395273, 1.5326835858993135,
4.2409259796446257, 2.655307107561482], [-2.3814537086615495, 3.7333616202275053, 3.1621968222159325,
4.2220383652526721, 1.5144471593836695], [1.2444878229358283, -2.6197010314766853, -4.3618395583941041,
-1.238746645541231, -1.9420904898415925], [2.9535211925786955, 0.085799908201006581, -3.3450696662897927,
-4.0496886845617643, 0.2423218533090532]])
arg1=Symbol(shape=())
res=arg0/arg1
s1=numpy.array(-4.12759091887)
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([[0.86065557751820976, -1.1409762307454672, 0.045458720671131048, -0.2230806997439248,
-0.378747911492956], [0.77916499623529623, 0.35746053385346038, -0.10978684806424793, -0.3844632946500936,
-0.54516035398121099], [0.72797568813457048, -0.93682200161917983, -1.1541906044431842, 1.0942949947926262,
0.96682854874783597], [1.0006962119929996, 0.21445555573666569, -0.24521016786851502, 0.89996743278379421,
1.1220502428343315]])+(1.-msk_ref)*numpy.array([[-0.42618710102484819, -0.3362668100161586, -0.37132642648596642,
-1.0274579198869207, -0.64330675199020826], [0.57695972189861822, -0.90448925138421032, -0.7661119729091026,
-1.0228819784326422, -0.36690824966706698], [-0.30150464215020217, 0.63468039419829836, 1.056751903018037,
0.30011371521305036, 0.47051428497038333], [-0.71555569595749791, -0.020786921448245156, 0.81041695556566817,
0.98112646436194051, -0.058707817240641058]])
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_quotient_overloaded_expandedData_rank2_Symbol_rank2(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*numpy.array([[4.32105107438462, -2.7938865728204942, -2.6480612891877806, 4.0962844399213747,
-1.6936450259444391], [-4.5540373065810638, -3.4735704316216474, 4.0973336660426511, 3.5796783703777653,
-1.2789555045275067], [1.590790512666854, -3.6030796222704065, -1.5358180525686507, 4.2468820159522522,
-3.549813920750482], [-0.65580849674863462, -3.0765715955273709, -2.9627491908542303, 0.028934297810717879,
-3.4006116351470137]])+(1.-msk_arg0)*numpy.array([[0.44461502386601914, 0.042731475461358137, 2.311229454922783,
4.1960944541714742, -3.5527610506036744], [-0.10889510534494828, 4.1596007072981003, -4.0240613332963262,
3.5788133245810574, -0.84296801766120399], [-3.6483106190723138, 3.5039136834827218, -1.4706155051518079,
-2.1562609648378861, -4.3073932462182585], [0.23063830136591879, 4.1274722231354168, 0.13645071183945756,
-4.5418134155752554, -1.7380106134837989]])
arg1=Symbol(shape=(4, 5))
res=arg0/arg1
s1=numpy.array([[-4.2460288300055975, 1.9723502468412493, 4.5012610797315986, 4.2549986105536082,
-3.0291017439146586], [-3.3815482186462242, 0.49185174223600381, 3.1342744464723928, 2.5140825751839522,
4.9827618110943668], [-2.6160153055213375, 0.22203465130806865, -0.21905170509580429, 0.15252026869692958,
-3.4865345481390886], [-3.7892575047962449, 4.3978652002093614, 1.623750827816151, -1.9970794554891591,
-0.020308141898515153]])
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([[-1.0176688023992817, -1.4165265917120697, -0.58829320101238813, 0.9626993601740369,
0.55912450922023427], [1.3467314413763516, -7.0622306141083753, 1.3072670361251153, 1.4238507540333454,
-0.25667602687325908], [-0.60809679106591863, -16.227555478587014, 7.0112124984233581, 27.844705836384009,
1.0181496473755489], [0.17307044874056365, -0.69956023103684717, -1.8246329055542057, -0.014488305776312136,
167.45065364131872]])+(1.-msk_ref)*numpy.array([[-0.10471314295466831, 0.021665257238055607, 0.5134626527952022,
0.98615648046604887, 1.1728761035316908], [0.032202736233210821, 8.4570213950817141, -1.2838892707131575,
1.423506673928242, -0.16917686408053739], [1.3946059915521991, 15.780931772766907, 6.713554247425833,
-14.137537150046303, 1.2354368461707332], [-0.060866357346785971, 0.93851722034111629, 0.084034267759527922,
2.2742276993995696, 85.581961272925469]])
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_quotient_overloaded_expandedData_rank3_Symbol_rank0(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*numpy.array([[[3.4818338012749823, 3.4971998906886821], [-2.5239057976796686, 2.8945562475557409]],
[[-0.41913812937298012, -4.1817511712950708], [-0.9293236620515497, 3.2965731483205314]], [[-3.6083495228355655,
-2.1861278112872515], [3.5523423560955436, 4.571917845676408]], [[2.5235010801011963, -4.4530426867639079],
[0.03134464425793837, -3.7225099091273051]], [[3.3642508251751551, -2.7962331811582333], [2.5944511937764982,
-2.2330126770386505]], [[-1.3859482579701279, -1.4570666635369189], [1.8261086891132727,
-1.5056570354979151]]])+(1.-msk_arg0)*numpy.array([[[-1.0522384302138366, 4.1034138371598559], [0.079522524263561323,
-0.023108968432953958]], [[-0.026505249441055589, 1.932139350294455], [4.0042157822936666, 2.2762304926106802]],
[[1.264288211638612, -1.9946271929128692], [4.2703916328307017, -1.1300899977876777]], [[4.2801783215727394,
-0.096454065145835877], [4.4865658066576728, 3.9314784386307959]], [[3.1200236527004428, -2.7447976911193561],
[-0.85723836157011135, -2.8199900333320906]], [[4.5366088574407808, -3.5698521752315759], [1.5597344890251819,
-1.342225295850489]]])
arg1=Symbol(shape=())
res=arg0/arg1
s1=numpy.array(-0.28615667046)
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([[[-12.16757867527485, -12.221276844844011], [8.8200138533333607, -10.115284899367113]],
[[1.4647155654258333, 14.613502332743044], [3.2476043999188389, -11.520168804812155]], [[12.609699145006596,
7.6396185620070858], [-12.413977106970515, -15.976974565469046]], [[-8.8185995316688608, 15.56155472317962],
[-0.10953665419564539, 13.008642793981416]], [[-11.756674481041422, 9.7716861768928052], [-9.0665410301538856,
7.8034619058510533]], [[4.8433197651583795, 5.0918493746639122], [-6.3814996385676048,
5.2616527620282927]]])+(1.-msk_ref)*numpy.array([[[3.6771410169222527, -14.339745533681443], [-0.27789855164224664,
0.080756350693547557]], [[0.092624957504787692, -6.7520332382589583], [-13.993089085985231, -7.9544904158717298]],
[[-4.4181678854695097, 6.9704025759996995], [-14.923264329180817, 3.9492002614218951]], [[-14.957464785617184,
0.33706733095137653], [-15.678704254727821, -13.738901952950792]], [[-10.903200850382252, 9.5919402707211265],
[2.9956958899211616, 9.8547066150873093]], [[-15.853584157763764, 12.475166731199417], [-5.4506312451800714,
4.6905259754858344]]])
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_quotient_overloaded_expandedData_rank3_Symbol_rank3(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*numpy.array([[[4.3746388890042116, 0.93842526855136477], [1.8706383189312428,
-3.9786565611021829]], [[-4.8975447110451586, -3.7180783682549334], [2.5316157131902353, -0.25911969310157357]],
[[4.2345079212590129, 2.6909111977680222], [-4.6351551680329486, -1.6723804666364006]], [[-2.3891891985415548,
3.1344558904005719], [-4.5767190111429077, 2.4120357644515966]], [[-4.2232569151431667, -3.8926752844093224],
[2.4150103819552253, -4.5535897724203158]], [[1.3100697627188289, -4.6842849101527513], [-1.1835626225415821,
-1.6287129504016793]]])+(1.-msk_arg0)*numpy.array([[[-0.53174998140680607, -2.3053360747766796], [-4.7243487743326726,
4.6049518621447536]], [[-4.0675549321859457, 3.0884984319359248], [-2.3740277209818728, -4.6692510557804932]],
[[3.1231828125619838, -3.946142358002116], [-3.2294647129609846, 0.76749287499934926]], [[-4.2013021907005701,
4.7778175384969011], [1.0778106376839505, -3.0244592978197171]], [[0.014195128518603717, -1.7862148338991855],
[1.5856103610038801, 2.294853232961561]], [[0.84538744044394143, -0.1665108768840593], [4.9401260246627476,
2.6878593818409549]]])
arg1=Symbol(shape=(6, 2, 2))
res=arg0/arg1
s1=numpy.array([[[2.0746966987933471, -0.038598186515555355], [4.5881839069107979, -4.5221184818631528]],
[[-2.8952171009517813, 3.2089615497034014], [-2.3810029676646538, -0.20007357454609931]], [[1.5952957217622385,
3.5074629033411888], [-4.8216610018108206, 1.1790345104067725]], [[1.5767066224494872, -1.604585837573552],
[3.2217724192087989, 4.7028880210110895]], [[4.9340137982802084, -0.75080095286040205], [-2.5470169322631153,
-2.2621993393540785]], [[1.0027941556835875, -2.9581897902533969], [-2.2860741942260967, 1.3808737645507074]]])
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([[[2.1085679133477782, -24.312677699849253], [0.40770778959266579, 0.87982138837347346]],
[[1.6915984329586637, -1.1586546958154076], [-1.0632560091570595, 1.2951220254320459]], [[2.654371765368603,
0.7671959110970713], [0.96131917326667549, -1.4184321594279894]], [[-1.5153035856663291, -1.9534360936031212],
[-1.420559374043824, 0.51288394571066676]], [[-0.85594752828117715, 5.1846967822550107], [-0.94817209550680237,
2.0129038556436383]], [[1.3064194234616144, 1.5834970851385091], [0.51772712606217608,
-1.1794799729080312]]])+(1.-msk_ref)*numpy.array([[[-0.25630251482834782, 59.726538547286722], [-1.0296772906632581,
-1.0183173839017754]], [[1.4049222529283787, 0.96246040474414196], [0.9970704586354957, 23.337669986521096]],
[[1.9577453696873013, -1.1250703048756534], [0.66978261469400868, 0.65095030571629375]], [[-2.6646061676164279,
-2.9776017129266807], [0.33453965626431137, -0.64310680677646215]], [[0.0028769940861437288, 2.3790790716155366],
[-0.62253624658671147, -1.0144345783501139]], [[0.8430318781302184, 0.056288098022877722], [-2.1609648703178355,
1.9464917437369802]]])
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_quotient_overloaded_expandedData_rank4_Symbol_rank0(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*numpy.array([[[[-0.44452618685267975, 3.0663649813827689, -2.1402320946883169,
0.26287923382873224], [-0.22292293247447503, -1.779872403236904, -0.83539558284346249, -4.8386253696649675],
[-2.6909471943775531, 1.1035502397379968, 0.93405709802047099, -4.5133518823731986]], [[3.5925478528654295,
0.52351538108496953, -0.17314676004431107, 1.4384665038816946], [2.2650627980321367, -4.4386411147000606,
0.38967773187251886, 0.15837556447672352], [2.8076217348733392, 1.2019742108297082, 3.7488885553516198,
2.8100664184927595]]], [[[4.1664177934939932, -4.7550890791444989, -2.0711507140466989, -2.9208312198338993],
[4.1895048918264681, -4.592776924342691, -0.74263699511176196, 0.12222597803184154], [-3.4946750822854025,
-1.571892398029048, 2.6881024097725241, -0.45229879881073565]], [[0.60058700109640029, -4.2888213982767818,
-2.2458451822005356, 0.91258303405033203], [3.3245528372575315, 2.6854365632371522, 1.3044064696261142,
3.5732020880561883], [2.7937275003598705, 0.85713829166626887, 0.71698412194859618, -0.043226853713563607]]],
[[[4.2797940742559852, -0.57449215469944281, 3.5372782003910892, -4.8902142136425928], [3.4633055922696609,
2.2397500952615088, 0.92632497108839473, 1.611561401548693], [-3.7446826480686668, 0.059061052999116015,
-1.973290483226485, 1.3807066282723524]], [[-2.2506401919494836, 2.807017817591114, -3.6668928425508698,
3.3357720442069549], [-2.317215173553099, -1.8521171123736782, 0.9170155613529376, -0.56660297630200507],
[-2.599117417540401, -0.71112092316441, 3.5146676236929366,
2.9740235371521369]]]])+(1.-msk_arg0)*numpy.array([[[[1.1035419360879652, -4.9240459106615404, 0.57110407594141677,
2.5493425213144949], [1.2839073993293217, 2.064480694195943, 2.4864284511610704, -2.2797414911063751],
[3.4328347507735355, -3.742525310292609, -4.4574196999473639, -2.784337348306626]], [[0.23082397702841462,
-4.2835955276491635, -4.20264969709529, -4.7450278917265187], [4.3504991743194115, -1.9305238512009049,
3.4289768271375802, 1.6515283567503367], [0.65954658541804267, 2.0071119325399334, 3.8939804193697718,
1.7025613989899737]]], [[[-4.4747539140927985, 0.69576514550398461, -0.28643903065104048, -2.5385265745964647],
[-0.043853132795085514, 2.9368925771712338, 1.7512112730662803, -2.5278476664471796], [-2.9694783445707151,
1.5583351617541137, -1.5856519335480899, -1.5853506441940759]], [[-2.8774784925906327, 0.18293932340222163,
-1.4449105065783749, -2.1949936564433181], [1.3493715781295066, -4.2954572018618942, 0.98668403490899248,
-4.4055318216701655], [-4.6821350103268777, 3.8610980860163053, -3.523887848809939, -3.546193394267144]]],
[[[2.4380429784019544, 3.4521340144554973, -3.7692521052369967, 4.9812805548611685], [0.84523245498652244,
3.7857178447844362, -4.170487549201054, -0.54939171272889276], [-3.5433162653492811, -1.4054231603316514,
0.69989467178326947, 4.6415130978277084]], [[-0.28700657500147031, -0.19881123713218596, -1.6586432383015293,
-3.4424186977029114], [-4.5813813287646896, 1.7385541775874067, -3.8853770583055702, 0.97367116411611665],
[0.5470898075374242, -2.534993203695346, -4.9939695365786676, 4.0817080848471043]]]])
arg1=Symbol(shape=())
res=arg0/arg1
s1=numpy.array(2.66573806719)
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([[[[-0.16675538843211471, 1.1502874266315799, -0.80286661357682243, 0.09861405254483871],
[-0.083625220053744204, -0.66768465557178724, -0.31338247111570511, -1.8151165822394388], [-1.0094567157580112,
0.41397549643762593, 0.35039342744007457, -1.6930965340977453]], [[1.3476747385954884, 0.1963866546110643,
-0.06495265314148993, 0.53961284553270628], [0.84969443393929622, -1.6650702367684398, 0.14618005297240813,
0.059411525245472156], [1.0532249096146338, 0.45089734269967507, 1.4063229247809164, 1.0541419853204237]]],
[[[1.5629509308409302, -1.7837795609661897, -0.77695207174998171, -1.0956932550060703], [1.5716116085784111,
-1.7228913001128441, -0.27858588368149767, 0.045850708115761904], [-1.3109596645285277, -0.5896649852344733,
1.0083895499175104, -0.16967113325105213]], [[0.22529857996487432, -1.6088682721916423, -0.84248531760932877,
0.34233784829880937], [1.2471415996099637, 1.0073895092277732, 0.48932282045276504, 1.3404175496588318],
[1.0480127566711488, 0.32153882716974125, 0.26896270521588078, -0.016215716857401703]]], [[[1.6054818464464042,
-0.21550960380171494, 1.3269413990545242, -1.8344691377708944], [1.299192007982737, 0.84019886380726794,
0.34749286979442995, 0.60454604350793251], [-1.4047451601330245, 0.022155610007608559, -0.74024170173101733,
0.5179453470187918]], [[-0.84428407263630001, 1.0529983617442118, -1.3755638213979053, 1.2513502677786434],
[-0.86925838741351014, -0.69478585880924792, 0.34400062505739643, -0.21255013134095985], [-0.97500855373963136,
-0.26676323976338612, 1.3184594791778352, 1.115647322501641]]]])+(1.-msk_ref)*numpy.array([[[[0.41397238148416005,
-1.8471604435804707, 0.2142386316834391, 0.95633646557068408], [0.48163299130222342, 0.77444994300265702,
0.93273547081138874, -0.85520086131672679], [1.2877614620220696, -1.4039358766544225, -1.6721146592788225,
-1.044490223017593]], [[0.086589143873261026, -1.6069078880519039, -1.5765426276573689, -1.7800053013936494],
[1.6320054951625498, -0.7241986281252718, 1.2863142366988327, 0.61953887258350682], [0.24741612596359824,
0.7529291633123758, 1.4607513271074251, 0.63868292985895248]]], [[[-1.6786172539481112, 0.26100281721871821,
-0.10745205396451149, -0.95227907266657619], [-0.016450653323677209, 1.1017183620993041, 0.65693298776039311,
-0.94827308712706726], [-1.1139422815460562, 0.58457925065244776, -0.59482660845934698, -0.59471358559422671]],
[[-1.0794303191321972, 0.068626143601199674, -0.54203018832267935, -0.82340935272625038], [0.50619060992421039,
-1.6113575653696872, 0.37013540341897494, -1.6526499268222394], [-1.7564122551849053, 1.4484161566875804,
-1.3219182680327803, -1.3302857613479471]]], [[[0.91458459794258318, 1.2950012069615637, -1.4139619160741743,
1.8686309117048736], [0.31707258315789316, 1.4201387193205603, -1.5644776208624107, -0.20609365919729955],
[-1.3292064621650026, -0.52721727525641704, 0.26255192901259999, 1.7411737315658249]], [[-0.10766495723412189,
-0.074580184594690943, -0.62220788258068571, -1.2913566940695587], [-1.7186164631668384, 0.65218492356209323,
-1.4575239428531019, 0.36525387700306983], [0.20523014405310749, -0.95095359701515858, -1.8733909374079507,
1.5311737244869104]]]])
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_quotient_overloaded_expandedData_rank4_Symbol_rank4(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*numpy.array([[[[0.13107029610522503, -0.47078445251855072, -4.2897888183577919,
-1.9334992582613539], [0.63937604658959835, -3.33703556551216, 0.72789522858485256, -0.26278387324467101],
[-3.9668440246732195, -3.671802989288131, 2.0999037661084587, -2.4149526975860147]], [[-4.7632015166069488,
-0.32819100922220645, 3.9967626932273532, 1.2406587303878966], [1.749127295060128, -0.45247786055816697,
4.6204416104916426, -2.081666165987246], [-0.68418527302623922, -0.086844242543568484, 4.5968719860538609,
-0.91155775353879243]]], [[[1.6437881369181344, -2.3806862133298736, -4.4788477060454488, -4.9712582040083095],
[4.5521169671152215, 3.7737242105731958, 1.1068782587395374, -2.211728710301637], [0.082833519252822008,
3.0206978008416172, 4.357477209475304, 0.49732738761943907]], [[0.79602598196542029, -1.265855956484395,
4.8250166190599018, -4.2788131362835058], [-4.383003984209294, 2.5455905334955311, 4.8365355193769055,
-1.9625703825210605], [-2.6627467883711997, 4.437603110973388, -3.5973501175563993, 0.65182523283167892]]],
[[[3.0885957484528426, 3.1277669503591561, 1.6769138721837127, 0.98171782970485566], [4.192401912714864,
-0.47522991390312086, 2.1118225327681861, 2.1921637678324597], [0.3811847761647158, -0.10326953777431402,
-0.65807832498088992, 2.6865347217403768]], [[-3.1528015361925754, -3.2404126527988009, -1.0365916695998001,
3.8469436365391338], [-1.8197555129691056, -1.4900674698016427, 0.18151570218425128, -2.8062991438823612],
[-4.8043393089791238, 3.6114859202585219, 4.5737474506226459,
-4.6680289702448849]]]])+(1.-msk_arg0)*numpy.array([[[[-4.4610050641633041, -4.2116496086744872, -4.194177401285657,
4.6667981922187636], [-0.69217835042977516, -0.50405115463934003, 4.9237488959861206, 2.8925023711926752],
[1.8515933082064082, 3.7230125484719974, -3.019629418821407, 3.8938117509466394]], [[1.4837641305474625,
-4.8550584858736592, -1.7472809251385635, 3.2307178554322675], [-1.8988735029264605, 0.86622970071825556,
-4.9469479387882034, 1.4686808631017545], [-4.359409091955369, -2.4483640826280038, -4.9126815838484248,
3.1077856574793419]]], [[[1.1758972510173491, 0.55388575745847213, 0.89146624927904305, 0.63400471395171998],
[0.55786297685443831, -3.8299740887244482, -0.11367732885478077, 3.7677830690318999], [-3.5745169669757679,
-2.4896945451989994, 2.6912852814721315, -3.0192095141877564]], [[-2.6866046448316161, 4.6338119217697056,
4.5795707054588188, 1.6240797284646193], [3.3889322076803658, 0.29273714098976189, -1.0820300702831531,
3.1517798425152321], [0.0057643443868427013, 1.3712665201039158, 3.3350408828391025, 4.3948199682885214]]],
[[[-1.8319354596571156, -0.24146471255168933, 1.2551580386328345, -4.4733596377004439], [1.7721669868368117,
-1.9145129599168875, -1.1223547031438805, 2.067367827980739], [1.8871564184207337, 1.2753354170198996,
4.2895243683672284, 3.3249111447243411]], [[1.1893790492598919, -3.0721981828360221, -4.0386799091272056,
2.7354976215990421], [3.5398142903243972, 3.4570995606739494, -1.4962799143431105, 1.3335418401314989],
[1.2267226207454929, 3.271100819046973, -4.8430217828867974, 1.5873946155417196]]]])
arg1=Symbol(shape=(3, 2, 3, 4))
res=arg0/arg1
s1=numpy.array([[[[-0.49104290278642271, -2.3674315644822963, 4.0989949383597608, -1.7292635092608224],
[-4.622988996878866, -4.1541086550089714, -0.037463128987194061, -2.6489483804003844], [-1.3280913255545124,
3.198305008176586, 4.0433681250873974, -4.8641097044412218]], [[-3.8791417988042811, -1.1645861545593315,
-2.3888987565565021, -3.5719587761449887], [-2.4965270383303331, 3.9852223086984733, 0.5007169845482391,
-3.108649685462217], [0.20848826126615183, 4.6794023694682423, 4.6869612020471063, 2.7794546763479264]]],
[[[3.9549024552919079, -0.48631450907908125, 4.4520002177282301, 4.2795685433565982], [-0.044877637378450963,
2.5321944239340848, -1.7730261622972501, 1.866409581291264], [-1.5466816416052809, 2.912554476943007,
-3.3751196293325059, -2.725818247229256]], [[-4.851701238543999, -4.998185592299647, -0.44584459532102105,
-1.2909219819916928], [0.84509859941086773, -1.8873930915550119, 3.6306196902313079, -0.58336909128228509],
[-2.247976724721672, -2.3230490248981575, 4.8350209920697722, -0.32527190181639298]]], [[[2.5335174897949022,
-0.20854461379276934, 3.2780648503024352, -1.6620646994478872], [-2.1379815500652954, -3.2990143123595739,
2.9742698157590199, -3.0724429738487213], [-1.941430518641484, -3.8959116863238554, -3.0686598489329029,
4.0511881765933175]], [[0.02000325485725174, 3.2082680619577548, -4.598681116192207, 4.4506904679653321],
[-1.3308670983956148, -1.4563013194069239, 1.9676557830927131, -1.443387189237999], [3.9560651184617885,
3.6345685490807202, -3.1473146440504864, 2.5626571964669047]]]])
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([[[[-0.26692229001064205, 0.19885873770611004, -1.0465465029518624, 1.1181056258382696],
[-0.13830360552907706, 0.80330964898773283, -19.429643178861738, 0.099203093268639547], [2.986875938683629,
-1.1480465371192021, 0.51934518479270775, 0.49648401132503633]], [[1.2279008511818705, 0.28180912845077649,
-1.6730565421652779, -0.34733288039983173], [-0.70062421444068845, -0.11353892593910048, 9.2276510545379935,
0.66963678015000538], [-3.2816488989412327, -0.018558832023123775, 0.9807787578967162, -0.32796280554448065]]],
[[[0.41563304164901532, 4.8953633274033006, -1.0060304328401219, -1.1616260269333594], [-101.43397097149828,
1.490297970370789, -0.62428760628404523, -1.1850178719997067], [-0.053555636159778923, 1.0371300604863249,
-1.291058595851039, -0.18245067811287974]], [[-0.16407151694367492, 0.25326309579912565, -10.822193808552843,
3.3145404571095454], [-5.1863817870065798, -1.348733628879734, 1.3321515146271816, 3.3642001467839115],
[1.1845081664272488, -1.9102494451953862, -0.74401954478721888, -2.0039395631523571]]], [[[1.2190939122756466,
-14.998071124806016, 0.51155603954235385, -0.5906616210734561], [-1.9609158519571066, 0.14405209220302512,
0.71003058349945258, -0.71349209293425142], [-0.19634221905167631, 0.026507155728614104, 0.21445137531607825,
0.66314735446318107]], [[-157.61442618672615, -1.0100192970849919, 0.22541064348860901, 0.86434760274348943],
[1.3673457816808718, 1.0231862389635615, 0.09224972362744735, 1.9442455668211094], [-1.2144237177893482,
0.99364914197916654, -1.4532221807783379, -1.8215580986331779]]]])+(1.-msk_ref)*numpy.array([[[[9.0847562175307548,
1.778995292561067, -1.0232209271680595, -2.6987201009137105], [0.14972528615081884, 0.12133798041887074,
-131.42919529410358, -1.0919436530339175], [-1.3941761929913368, 1.164058005397852, -0.74681041285503469,
-0.80051890017845562]], [[-0.38249803887159334, 4.1689131086319398, 0.73141690092266454, -0.90446672481449997],
[0.76060602339657379, 0.2173604465747247, -9.879728652007838, -0.47244978099981061], [-20.909614121584703,
-0.52322153328871157, -1.0481592170429599, 1.1181278413803195]]], [[[0.29732648638247061, -1.1389455735288434,
0.20023948914673259, 0.14814687684718106], [-12.430756373157672, -1.5125118563266158, 0.064114862641108966,
2.0187332441924042], [2.3110877318397747, -0.85481475622463277, -0.79738959712203872, 1.107634200210057]],
[[0.5537448644793036, -0.92709881139841899, -10.271674824635689, -1.2580773672773902], [4.0101027383583956,
-0.15510131000245292, -0.29802903157125132, -5.402719975423115], [-0.0025642366860165779, -0.59028737896051597,
0.68976761182818369, -13.511219209980442]]], [[[-0.72307985519587537, 1.1578563845894034, 0.38289603651893378,
2.6914473541170971], [-0.8288972310274092, 0.5803287826743494, -0.37735470305926527, -0.67287427157387836],
[-0.97204427369426094, -0.32735223991262846, -1.3978494129477628, 0.82072493298010418]], [[59.459275890228867,
-0.95758774625624643, 0.87822569277673845, 0.61462320089170264], [-2.65978045034828, -2.3738902894641662,
-0.76043784039873807, -0.92389751694797129], [0.31008655924816314, 0.89999700786337411, 1.5387790324814787,
0.61943307038110118]]]])
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_pow_overloaded_constData_rank0_Symbol_rank0(self):
arg0=Data(3.78107997963,self.functionspace)
arg1=Symbol(shape=())
res=arg0**arg1
s1=numpy.array(0.0800878500517)
sub=res.substitute({arg1:s1})
ref=Data(1.11239752186,self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_pow_overloaded_constData_rank0_Symbol_rank1(self):
arg0=Data(4.90258552017,self.functionspace)
arg1=Symbol(shape=(2,))
res=arg0**arg1
s1=numpy.array([1.2995894043345173, 0.78253444504138814])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([7.8934933252568413, 3.4696165734693301]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_pow_overloaded_constData_rank0_Symbol_rank2(self):
arg0=Data(3.92827709573,self.functionspace)
arg1=Symbol(shape=(4, 5))
res=arg0**arg1
s1=numpy.array([[4.1349425587832229, 0.91098413715014492, 2.7789836134951633, 0.12883166700263957,
3.6410211713020102], [4.0888877726009252, 4.0211549295632993, 4.6892515161560091, 3.4849861795634953,
4.0032210210582928], [4.299348637922626, 1.7371642461914261, 2.185930497819307, 1.7970091028967969,
4.5443361828998592], [3.1255490514768072, 2.9012687039592109, 0.33081920581130919, 1.5123642954465053,
4.3636469435748326]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[286.41226576143146, 3.477832814129735, 44.799992375326617, 1.1927572059187601,
145.71440254454342], [268.92164649005002, 245.12000850057427, 611.45312320484868, 117.70262752348444,
239.17864250542837], [358.65926391850468, 10.770271935452501, 19.901441052324959, 11.689248981538858,
501.48063851421955], [71.979339350865629, 52.959027255929435, 1.5724377868886246, 7.9186302143497267,
391.64108226882905]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_pow_overloaded_constData_rank0_Symbol_rank3(self):
arg0=Data(3.16126608038,self.functionspace)
arg1=Symbol(shape=(6, 2, 2))
res=arg0**arg1
s1=numpy.array([[[2.7962312793949295, 2.0684099882736957], [0.38062016949789762, 3.4476824424756334]],
[[2.041547872112385, 0.47599085293280569], [4.1441460833378203, 0.81184164478938559]], [[4.3084580838333535,
4.0366716734419619], [0.46138540150130164, 3.7240656768046549]], [[0.67126378380454188, 2.6436153849274531],
[3.6341579561428712, 0.79424640609140063]], [[2.8792963151587307, 0.15059024316863731], [1.4454736834885067,
0.20711228881254098]], [[2.5282275840574844, 1.1701013108471818], [4.7723602197014845, 4.287233089759706]]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[24.98774742669173, 10.812287661988025], [1.5497341316804971, 52.888612151720686]],
[[10.48311297133106, 1.7295347337178362], [117.89549729799067, 2.5457046637850267]], [[142.43948278798089,
104.17774369773555], [1.7007034469541953, 72.69681517764694]], [[2.1654024349191139, 20.962369213688653],
[65.550199410739893, 2.4946685023586084]], [[27.494642910796053, 1.1892528402215572], [5.2788095694213668,
1.2691921182937207]], [[18.355342751374547, 3.8449267600833394], [242.95005261713331,
139.00193314904641]]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_pow_overloaded_constData_rank0_Symbol_rank4(self):
arg0=Data(0.993829389255,self.functionspace)
arg1=Symbol(shape=(3, 2, 3, 4))
res=arg0**arg1
s1=numpy.array([[[[0.70582890780189012, 4.459679941127856, 1.7698082654335738, 0.7425015240513263],
[0.17863451536630379, 0.23071258083531751, 1.1519692954618987, 1.546358793346283], [0.43213129061136418,
4.7802564121893596, 3.8019765535927128, 1.7733817493669464]], [[4.4818458367006624, 2.8755675026947229,
1.8170070828708857, 3.689568151497173], [3.2656926466727252, 0.84976244428097047, 0.086645785873186429,
4.3622867616943184], [2.510711276781572, 1.1909328518939111, 2.7614763003730736, 2.859226851647112]]],
[[[2.2999398221257867, 3.7396236508286136, 3.6474360642296735, 0.22271721684765292], [2.5248956632409825,
4.8262190185960856, 0.37713575312925274, 0.15680972420135947], [2.5933818932928228, 4.6152058845108996,
0.37698282218649021, 0.48371699902902621]], [[1.5772879034133864, 0.010480084060075445, 2.7768860672153597,
4.4433640193360082], [3.4675949148609839, 1.0817479487868906, 3.3899814682499194, 0.87646369237945143],
[0.49739388005553137, 1.3463814284307101, 1.0826007333159808, 0.79496282667929863]]], [[[2.4363105443735353,
3.4203323611137431, 1.8278252124219663, 3.6768660714690418], [3.8007621375663323, 3.2788349807887727,
4.723911572993079, 4.5936800906463686], [1.8071402427683898, 3.3902207884220079, 0.37997536953424454,
4.7038449248986476]], [[3.2488654149278418, 1.6048006810659028, 4.9317774433591417, 2.1007443740528178],
[3.054516223687195, 0.55422473313263965, 3.5194589681498765, 3.326550366831976], [4.3669194535152762,
0.047451200822730306, 0.70452802796481673, 0.1404011071625294]]]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[[0.99564064100720095, 0.97277331018429902, 0.98910515232215201, 0.99541466269551271],
[0.99889491206162018, 0.99857297113553989, 0.99289498462376558, 0.99047412161372594], [0.99732879902719118,
0.97084496836204925, 0.97674154691389037, 0.98908327465377543]], [[0.97263985400820108, 0.98235848653943914,
0.98881622960918547, 0.9774213780802502], [0.97998918013383751, 0.99475401042354128, 0.99946382997432526,
0.97335991097124475], [0.98457951330340654, 0.99265555323835852, 0.98305246670030244, 0.98245785140758057]]],
[[[0.98586485161567172, 0.97711859061328932, 0.97767630927362992, 0.99862239086150251], [0.98449307348906423,
0.97056880612221985, 0.99766835491976258, 0.99902986140398409], [0.98407582438051522, 0.97183730768595933,
0.99766929931395654, 0.99701040129524177]], [[0.99028452065604722, 0.99993513323790106, 0.98295870540613939,
0.97287155660372138], [0.97876523296044249, 0.99332664123569459, 0.97923555070780133, 0.99458961757808051],
[0.99692600178713375, 0.99170089495067859, 0.99332139797090802, 0.99509148295090599]]], [[[0.98503303654727714,
0.97905160511359157, 0.98875001942994056, 0.97749822833139455], [0.97674888901436785, 0.97990946373697985,
0.9711836185354098, 0.9719668027506676], [0.98887662147928368, 0.97923410014112311, 0.99765081960374968,
0.97130425391482611]], [[0.98009125718466428, 0.99011589292747115, 0.96993486503795368, 0.98708113870220937],
[0.98127098560076487, 0.99657537729014167, 0.97845107590229674, 0.97962009487115176], [0.9733320001780138,
0.99970633311899726, 0.99564865802941016, 0.99913133289538214]]]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_pow_overloaded_constData_rank1_Symbol_rank0(self):
arg0=Data(numpy.array([2.866799909147558, 4.876125586700824]),self.functionspace)
arg1=Symbol(shape=())
res=arg0**arg1
s1=numpy.array(2.71414665156)
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([17.435868015676721, 73.711650960189885]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_pow_overloaded_constData_rank1_Symbol_rank1(self):
arg0=Data(numpy.array([2.2084898163228521, 1.4985706335851434]),self.functionspace)
arg1=Symbol(shape=(2,))
res=arg0**arg1
s1=numpy.array([2.9685871945135585, 4.2199180780027188])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([10.506962956516261, 5.5124348187786136]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_pow_overloaded_constData_rank2_Symbol_rank0(self):
arg0=Data(numpy.array([[0.77922995940155448, 4.9091672288365764, 4.6023034116074584, 4.0585585948105418,
3.8131836319105985], [0.63921561638913638, 1.9638083205927952, 0.87156249802113606, 1.8813609152354294,
4.652603835129498], [4.4185672711880182, 4.342637431217093, 2.1485008678971482, 3.5999975099694348,
3.0608084623767837], [1.5079573236002384, 0.75251090029482415, 1.7564448317741483, 3.6923994643349598,
1.3401399799253775]]),self.functionspace)
arg1=Symbol(shape=())
res=arg0**arg1
s1=numpy.array(1.88416909919)
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[0.62499961485879429, 20.043593846903835, 17.748329103451528, 14.004750781266964,
12.452150471142952], [0.43033512176215821, 3.5665495273262708, 0.7718134533153036, 3.2896665757733499,
18.115582822820418], [16.436880911894658, 15.90873223635964, 4.2247353523330435, 11.17293869306093,
8.2299551314515949], [2.1682788679762832, 0.5852335625438071, 2.8902325719037805, 11.71940284996929,
1.7360906938610048]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_pow_overloaded_constData_rank2_Symbol_rank2(self):
arg0=Data(numpy.array([[1.3915653950314695, 0.72721703282696448, 3.1310378168895978, 2.6348429358346048,
2.0782491529918197], [1.6459194387429872, 2.562889776578305, 2.0761088251705457, 0.22461806677113369,
4.206926695670866], [3.2315466420058132, 0.54551438767992344, 2.3962603907765661, 3.5705685721719806,
1.576440920087347], [4.5957055774395901, 2.6996348087339399, 1.4523409725849665, 2.7420672940204591,
1.9685543649271546]]),self.functionspace)
arg1=Symbol(shape=(4, 5))
res=arg0**arg1
s1=numpy.array([[2.5855195439682124, 1.0827101090847635, 4.9957557839533946, 0.96832195337783145,
3.2516725650946836], [2.885435871411993, 3.9749315017293902, 4.653244839630724, 2.3530666812704477,
2.8630405667066632], [2.0653715617592563, 1.7593252053700164, 0.52661102213923405, 0.82097677205000363,
2.4546391327367552], [4.7379088202900395, 3.5407454959024718, 1.2463674187947462, 3.0822199358702487,
1.0765535893703142]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[2.349802789241195, 0.70830818466492951, 299.45926695476669, 2.5552067368444966,
10.790674928106091], [4.2114637167679394, 42.137956376461631, 29.939356280651772, 0.029778724884887482,
61.155701008278086], [11.275136610799086, 0.34431546555745196, 1.5844071459062889, 2.8430503327720711,
3.0565212302888405], [1374.5619596132401, 33.662107628675898, 1.5921977639828369, 22.400261085750593,
2.0733157810519325]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_pow_overloaded_constData_rank3_Symbol_rank0(self):
arg0=Data(numpy.array([[[3.0660567741514542, 4.3056273316538842], [3.0504750623805625, 3.7871768115665883]],
[[0.69274312119694637, 4.6538571932210537], [4.8355982528719377, 2.9815267638605385]], [[1.1230458665786869,
2.1830008754633661], [0.91820982431621256, 0.077119031117770812]], [[2.9614071331788696, 3.5610220293470181],
[2.6599569889924082, 3.6221415743920904]], [[1.1098062342497441, 2.676585421551215], [2.0821210491789137,
0.94398503882355267]], [[1.1824417768595235, 1.7231439179679477], [2.2607396485998188,
3.3720654338288014]]]),self.functionspace)
arg1=Symbol(shape=())
res=arg0**arg1
s1=numpy.array(0.560759226052)
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[1.8743657165500538, 2.2674712116369911], [1.8690182094624257, 2.1100648252921164]],
[[0.81395313946463899, 2.3685487640913623], [2.4199799687700909, 1.8452103631477881]], [[1.0672369661252965,
1.5492717722289933], [0.95327751240899017, 0.23766519192617833]], [[1.8382175994812311, 2.0384524232966164],
[1.7308218130238717, 2.0579983247753746]], [[1.0601632912957684, 1.7368809476716751], [1.5087078979960178,
0.96819192271565813]], [[1.0985300082097622, 1.356811420052328], [1.5799715425927656,
1.9770722875528537]]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_pow_overloaded_constData_rank3_Symbol_rank3(self):
arg0=Data(numpy.array([[[3.3957720535524389, 2.4552077579471865], [4.7087905663535405, 1.2879146157483581]],
[[0.040773334485048905, 1.338962649449466], [0.15911448294230743, 0.69782664300100028]], [[1.1490297281690691,
3.6797714112654387], [1.6806152634816163, 2.8654713323448382]], [[3.7833659416351537, 4.1661154468039028],
[3.469956553077123, 3.9754846411912803]], [[1.8241168951232549, 4.4870775362426878], [1.3424534585235119,
1.3802296431342149]], [[3.3210730308450875, 2.5390685855193476], [1.1031930645119352,
2.4082267572037321]]]),self.functionspace)
arg1=Symbol(shape=(6, 2, 2))
res=arg0**arg1
s1=numpy.array([[[2.0527495636735842, 1.3882529561124286], [1.3707482038402852, 0.39288536207269026]],
[[2.3705399572424319, 4.3811434485491185], [3.6465442725413943, 0.81187293080800826]], [[1.5760458701127449,
2.4821521454352227], [2.2611045908095928, 1.222781185593224]], [[4.0828137815142842, 2.0166310563465872],
[3.0037225489944039, 3.8035215103515534]], [[1.8270206735321162, 2.0909380886426585], [1.700233947322449,
0.94341612832048682]], [[3.4512483540336021, 1.9153164559186842], [3.4796412330592381, 4.631860024508482]]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[12.299397442261979, 3.4796948710857625], [8.3635241772012918, 1.1045185700581053]],
[[0.0005079738169757814, 3.592450715967165], [0.0012274367495971861, 0.74669434602170293]], [[1.2447577564334256,
25.37776750720391], [3.2345019416156076, 3.6228533120009838]], [[228.75452373327897, 17.773353840406003],
[41.974303066305048, 190.45529976112371]], [[2.9988077003642086, 23.078919356327741], [1.6499038138569069,
1.3552903973767674]], [[62.959505654838587, 5.9577132902226237], [1.4073820176182781,
58.609608978624692]]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_pow_overloaded_constData_rank4_Symbol_rank0(self):
arg0=Data(numpy.array([[[[3.6942258331411808, 1.2537649796273209, 0.42770554254169135, 4.700673631145162],
[4.4515625216392314, 4.7809283564358287, 3.701290949078563, 2.0902986899469349], [0.95965882113153478,
2.5044950882367898, 1.5843001497355667, 1.9737450842678197]], [[3.4205167033036328, 1.4681649100887066,
1.1447801230251771, 3.5317125976738892], [2.925503328877014, 3.6994047548692786, 3.3897537710459931,
3.2244141062166558], [2.6498609343187218, 1.949698950181703, 2.3949386461700057, 1.6425500663306651]]],
[[[4.2780973886416191, 4.0618406835817691, 0.76281370649400426, 2.2204933921619303], [0.35215441411200815,
1.3818360860420937, 2.5239753242436236, 4.5811725609009564], [4.1716817147405427, 0.44398290268201318,
1.3895634892776816, 0.88264326772164403]], [[0.91724435354033873, 2.1538723799206236, 3.4971391199135713,
3.3554017029685319], [2.1064185289212674, 2.2856357366084303, 0.51918179267857123, 4.6850674663899552],
[4.7379107746617768, 1.3230622992714034, 4.6315184230420652, 1.8428337487384241]]], [[[2.4876080017868238,
2.6810602120023037, 4.5081405094604081, 2.4120441389715914], [4.8306123550480162, 4.2210604925186157,
4.4800528271783264, 2.9281844533079764], [0.63995448593153881, 1.0111923343576328, 3.3513915338885765,
3.1411718524458285]], [[1.4289737147392298, 0.45352921851220701, 2.0118969615009972, 3.5980186028034558],
[3.616620626102065, 2.7726087163304962, 2.3432614104421239, 2.8491353233149006], [0.5320613661271224,
4.4831794866209096, 1.2664788279071584, 0.28049422891374309]]]]),self.functionspace)
arg1=Symbol(shape=())
res=arg0**arg1
s1=numpy.array(1.06920335544)
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[[4.0438772782241736, 1.2735412636471706, 0.40329131191155071, 5.232097558570211],
[4.9361882834372706, 5.32766320430086, 4.0521468468151465, 2.1997222137283998], [0.95692805287595428,
2.6687811343412777, 1.6355615272939708, 2.0688366138549434]], [[3.72436858199117, 1.5077044564832467,
1.1555422923156822, 3.8539651197510056], [3.1511076657220811, 4.0499389894595748, 3.6885660756269054,
3.4965304503075489], [2.8347289565408293, 2.0418991380778091, 2.5441507235354024, 1.6999385690588711]]],
[[[4.7308080931849492, 4.4755720332454247, 0.74865452317740788, 2.3465237379092794], [0.32761633574092269,
1.4131120282318979, 2.6909816922836431, 5.0900078595749889], [4.6050969731100313, 0.41972303754885415,
1.4215628284095434, 0.87505100482757892]], [[0.91177753244543847, 2.2713282731526636, 3.8136397625647156,
3.6486130365880771], [2.2178646706058562, 2.4202014010190394, 0.49615640785649234, 5.2135271144615709],
[5.2764248443312534, 1.3489444284266585, 5.1498394369137142, 1.9224660983894153]]], [[[2.6495455109781756,
2.8704290671798018, 5.0032967403347266, 2.5635842415978054], [5.3868816518123941, 4.6634019262660349,
4.9699739610882974, 3.1541954979564295], [0.62048881052075555, 1.011971499899829, 3.6439508643357619,
3.4001032908251019]], [[1.4647126684483562, 0.42937939938465319, 2.1116224485244599, 3.9313784965136467],
[3.9531144908142579, 2.9753493135007223, 2.4854989078477878, 3.063238048859795], [0.50932777150901942,
4.9736826666605927, 1.2873542029993255, 0.25687297390081976]]]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_pow_overloaded_constData_rank4_Symbol_rank4(self):
arg0=Data(numpy.array([[[[0.99270068488685292, 2.3472808551478828, 3.1312848033300495, 0.28480210095870712],
[4.1760257284106208, 1.6103974533434464, 4.5276876502334824, 0.85611009354413747], [3.2374935042795578,
4.9456771875167229, 4.9802453823156245, 1.2149127690087014]], [[1.8848010664604495, 4.1824957168331602,
2.4934752197401808, 0.28374659653088119], [4.6634122856431866, 4.948465351830853, 4.5396716570475313,
3.7492655830226167], [4.2660162764714533, 0.061690267720591635, 4.6305624131357668, 4.183404037793947]]],
[[[2.7660211396774219, 0.29841308804884997, 2.0624440665165937, 3.4380484119931474], [4.1607961864955918,
3.0023473648209906, 2.7294567940865302, 4.1979140776413049], [1.1126946263221209, 0.12554655331410369,
2.1673178358996532, 1.5768419503465658]], [[2.1884187117240974, 2.9636992993533875, 4.0944289364844799,
0.20431967084707772], [4.9645724723200422, 4.3471545530472664, 3.8082987236588131, 4.9248087663466533],
[0.94748379921640247, 4.1019196615549953, 2.7446431670111022, 3.2260449250615477]]], [[[4.815971417067975,
0.55080927709037153, 4.2444312341013362, 2.4110979642875288], [3.8869586759056411, 3.1718207539088712,
2.2815482728827998, 4.5248091593844997], [1.7555444263914652, 4.8358722724194543, 0.40104556755678317,
0.72504095284272219]], [[1.6815499163639582, 4.7423275338548976, 4.654236755712521, 4.137431652375871],
[2.4100531826283573, 2.054034553663679, 4.6472521248761733, 0.98452264700986813], [4.8377266538556594,
3.082865747001903, 2.5083476832499958, 3.2444587922972503]]]]),self.functionspace)
arg1=Symbol(shape=(3, 2, 3, 4))
res=arg0**arg1
s1=numpy.array([[[[0.21753326670751072, 4.1682471517901032, 4.1014560095520087, 4.6700944718391098],
[3.4121094051365737, 1.1029489552818827, 2.0092241837599003, 0.33422517551710779], [1.1473193657482628,
2.4557624148843549, 3.4564859349197894, 1.8642853910874539]], [[2.7889107332297205, 0.14971366335904424,
3.0278221311653697, 3.2041631900025194], [2.9835640858496824, 3.958665469023726, 0.91439051931800353,
1.7058037073741574], [2.3938649784363508, 3.0154719728696082, 2.6948872188976947, 3.1007570939621973]]],
[[[2.6265063521385672, 4.5831638786700442, 3.219142414188263, 2.7281296715792824], [3.5791848830985131,
2.9499608289992416, 0.6944852503432879, 3.4643337801246901], [3.8686503402845167, 2.0190151391117173,
4.4489402687543533, 0.060998130418435118]], [[2.7466045097612528, 1.6726966829566545, 3.0165307005368542,
4.277931081219279], [4.5278288671598306, 1.9114000108412157, 2.2131466233201289, 1.7114320346159109],
[4.0455182963260539, 4.4214997321041718, 1.833260490213896, 1.6277957064140334]]], [[[3.1805485901595181,
4.6387720949831293, 4.6764390004305776, 0.92658048030955986], [3.6108344682454585, 2.5431872524441639,
0.36463412919482818, 1.0071871900401932], [2.8537457951315179, 3.6983860534861037, 3.23721845983441,
0.55328298453643388]], [[1.2875876430466009, 4.3485253099923922, 1.8266284021761314, 0.0290062123030045],
[3.4490819270809823, 0.22338889791366173, 3.0381024201828888, 1.9556163422370025], [0.44312399416135478,
0.1273534956738884, 1.2491109040783503, 4.9052871585448905]]]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[[0.99840760191037914, 35.043440814220617, 107.94047254802666, 0.00283573184979817],
[131.25378495858195, 1.69136219457744, 20.78752777038369, 0.94940103216918048], [3.8492184440675246,
50.681976498619285, 257.06170337461504, 1.4375275893388262]], [[5.8572122227013041, 1.2389032321759992,
15.902123865064281, 0.01766446796707654], [98.882782586287178, 561.27606329168657, 3.9881999273730413,
9.5288296866662598], [32.224695168018663, 0.00022487036760118656, 62.202503506565009, 84.569345856197444]]],
[[[14.472256933722013, 0.0039174568156997774, 10.281133718280593, 29.048830143480959], [164.49282671202778,
25.614805564866785, 2.0083921055841238, 144.01116277295671], [1.5115151066807018, 0.01515211425419326,
31.224795639114653, 1.0281694802522898]], [[8.5941661198499677, 6.1551040178885987, 70.258681959622137,
0.0011208680343288049], [1415.2525647254004, 16.590718644897333, 19.286031315896192, 15.310076132459979],
[0.80393467182931233, 513.24239208001916, 6.3658859963948018, 6.7299449373066729]]], [[[148.35743563372318,
0.06288742447799564, 862.89828006183689, 2.2602309897996125], [134.58102586913591, 18.833096590582731,
1.3508986287641715, 4.5741688350871659], [4.9829808253152352, 339.97903268955292, 0.051933953881959871,
0.83702989264405847]], [[1.9526351491620346, 870.09434438616336, 16.592467915656908, 1.0420511218052597],
[20.779788947462336, 1.174446125966941, 106.41687348793162, 0.96995612336544568], [2.0108561274370049,
1.1541707905965375, 3.1541298870902299, 321.58780899817299]]]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_pow_overloaded_taggedData_rank0_Symbol_rank0(self):
arg0=Data(1.32457126708,self.functionspace)
arg0.setTaggedValue(1,2.5302736409)
arg1=Symbol(shape=())
res=arg0**arg1
s1=numpy.array(0.0639287997913)
sub=res.substitute({arg1:s1})
ref=Data(1.01813209787,self.functionspace)
ref.setTaggedValue(1,1.0611432451)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_pow_overloaded_taggedData_rank0_Symbol_rank1(self):
arg0=Data(2.2846884006,self.functionspace)
arg0.setTaggedValue(1,4.92570746786)
arg1=Symbol(shape=(2,))
res=arg0**arg1
s1=numpy.array([2.6855694798894114, 0.16312791921292588])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([9.1971819304828841, 1.1442862992395297]),self.functionspace)
ref.setTaggedValue(1,numpy.array([72.389000622754097, 1.2970626822826592]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_pow_overloaded_taggedData_rank0_Symbol_rank2(self):
arg0=Data(2.21652639426,self.functionspace)
arg0.setTaggedValue(1,4.064025676)
arg1=Symbol(shape=(4, 5))
res=arg0**arg1
s1=numpy.array([[0.2933486938821081, 4.9606241062735927, 2.0417721913214839, 1.2853164260627612,
1.0499456227730608], [3.7028655976728868, 1.001170934572813, 2.6365100719695787, 0.82410423612955774,
3.49523244526962], [0.89663456265730868, 2.6778638320352517, 2.7192454387062397, 4.1586374080130568,
2.9049962967419707], [1.1217652316876965, 2.8499266444045688, 3.9941585553088488, 3.2961533103938754,
4.8280322509905496]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[1.2629980959890224, 51.850548679528011, 5.0790832260166887, 2.781628104818838,
2.3064166037170106], [19.053743574603313, 2.2185931490955131, 8.1539886291305006, 1.926950272040735,
16.151287980578164], [2.0414665862164805, 8.4268444216889868, 8.7090237768086354, 27.385979762644116,
10.096678215044131], [2.4421022786752205, 9.6636775100828736, 24.025498067528311, 13.784496182053152,
46.657336659920524]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[1.5088170297129386, 1049.0688973598117, 17.512588025836909,
6.0631975570649415, 4.3588422505812687], [179.83899364529634, 4.0707036936631003, 40.319932430095271,
3.1757300928201744, 134.41394191148893], [3.5156966409819765, 42.727003304736336, 45.27954251470117,
340.74464873701299, 58.751128374701025], [4.8206562660434331, 54.38525791734746, 270.56311294367163,
101.67490980446708, 871.0866275632236]]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_pow_overloaded_taggedData_rank0_Symbol_rank3(self):
arg0=Data(1.91007445723,self.functionspace)
arg0.setTaggedValue(1,2.03887271777)
arg1=Symbol(shape=(6, 2, 2))
res=arg0**arg1
s1=numpy.array([[[0.80913868611567985, 4.9530927395577313], [2.3778199357445504, 3.0438567271974866]],
[[4.5320412624387512, 2.9619886850375607], [1.1655661823312315, 2.0467038300183558]], [[4.0312321266790025,
4.8579928594157327], [2.7986174468536209, 1.9092888523673033]], [[1.8551792128066371, 0.25103319953744346],
[0.46046274064934245, 0.65517785698260234]], [[4.8533885944970478, 3.8765848236891873], [0.21870605031792009,
2.7299809081727413]], [[0.1462666195660515, 2.0992108859870315], [4.2223686511204708, 2.6853651998709744]]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[1.6881408066763193, 24.664265417521737], [4.6589387298581437, 7.1693013734409963]],
[[18.781554836108096, 6.7993562971584689], [2.1260952718063053, 3.7603365776127715]], [[13.582477249987816,
23.192113926087377], [6.1171941494375517, 3.4403782937432652]], [[3.3219926609349035, 1.1763944184475064],
[1.3471414218010285, 1.5280512879240291]], [[23.123113221966992, 12.288965241534358], [1.1520395805917791,
5.8514290694100302]], [[1.0992798740910361, 3.8903067155252629], [15.370852789423738,
5.6848981306905602]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[1.7796701280506677, 34.075162084556929], [5.4409405721417663,
8.7445845584010726]], [[25.244615802104185, 8.2491655364897767], [2.2941122013099484, 4.2976390039468724]],
[[17.669463763447823, 31.843072635800556], [7.3428485812037074, 3.8968626324890954]], [[3.7495064865206524,
1.1958237934315636], [1.3882338189395766, 1.5947975222619479]], [[31.738796403435416, 15.826213597281106],
[1.1685989451986258, 6.9924457563436029]], [[1.1098222979848305, 4.4614404094987661], [20.246927178426528,
6.7736920242494039]]]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_pow_overloaded_taggedData_rank0_Symbol_rank4(self):
arg0=Data(3.79610015957,self.functionspace)
arg0.setTaggedValue(1,3.00563767393)
arg1=Symbol(shape=(3, 2, 3, 4))
res=arg0**arg1
s1=numpy.array([[[[4.4049017177959024, 1.6295279606965902, 0.34996170852682068, 3.8249897524769225],
[3.166351042889235, 0.31704330393840041, 0.56944970265842709, 4.0674127332271564], [4.7531815485196223,
3.9378749832455275, 4.1190804892406074, 1.6490622316576049]], [[0.087540448115054945, 0.47733545759568397,
3.1046110122624913, 0.55496651223126869], [4.9994026589339793, 1.1539583254393559, 4.3285446977517674,
3.9074072454108273], [0.39422465478757818, 0.12749582235212634, 3.3198050981771576, 2.7830434367406554]]],
[[[4.8016423551007534, 1.3182627183354212, 2.5712661793707006, 0.69043427054151507], [4.8923630236769098,
3.9592101125379666, 3.7599262920168344, 0.91223394177619577], [2.0812255919772435, 3.3045177969311674,
1.996477332198537, 0.063412148396049153]], [[0.79247002703848735, 4.3525101922793947, 3.1167979206292715,
2.121356402558547], [1.5149621718504176, 3.5217533956131613, 2.7025468362194096, 1.0269356863150814],
[0.68822789663005524, 2.7317675329700815, 4.4823426664868622, 3.9053596848348828]]], [[[1.6704709258607828,
0.38358040317411868, 3.3180843291402025, 0.38642831404405864], [2.1331043853922558, 4.9167900790585284,
2.2261101298062109, 3.5510258967271158], [0.085366611692996669, 0.85934993459018305, 1.6392862656041329,
3.3139321252721592]], [[4.2399400668492548, 0.4796559233029315, 4.4601344090922499, 0.59344213261796475],
[3.3778564736320909, 1.2667412365915995, 0.33178212910170213, 4.7975639267592287], [1.107681780093535,
4.0673158562596425, 2.0536296572182002, 1.6538443023426441]]]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[[356.38996392138307, 8.7911748930326912, 1.5949460532404072, 164.42226368901473],
[68.294607355721951, 1.5264237920842607, 2.1374878745830888, 227.19846147554483], [567.14889300380855,
191.14339207050969, 243.41002621705448, 9.0232680500616969]], [[1.1238684474828207, 1.8903334019477369,
62.895284718323751, 2.0965875478010414], [787.66627571196375, 4.6615625827727856, 321.87627605217125,
183.53047169478882], [1.6919564879170577, 1.185395111021144, 83.808511283560932, 40.956409805291528]]],
[[[605.02352164794058, 5.8038909264970417, 30.876732730682711, 2.5118550633664829], [682.85761872187595,
196.66158396286718, 150.75332857218464, 3.3766922391691074], [16.059512953026779, 82.116724327381135,
14.342818783171788, 1.0882709736733585]], [[2.8781122849029743, 332.3327044001154, 63.926131299011949,
16.94266251167052], [7.5452675423436384, 109.71964173451236, 36.786389714813964, 3.9349799176508915],
[2.5044729250047846, 38.248624144915659, 395.17542028054856, 183.02986182018722]]], [[[9.2846753307903178,
1.6681018892605832, 83.616352693779916, 1.6744511276170131], [17.210270999211929, 705.47507179707702,
19.483610526578207, 114.08880815037078], [1.1206141275171515, 3.1466887145107263, 8.9063605751931156,
83.154487610286751]], [[285.99403054322283, 1.8961938841923576, 383.63996607747578, 2.2070053099870641],
[90.556467818241586, 5.4183980619512235, 1.5567321089285786, 601.74081791606875], [4.3824984735781634,
227.16910220157538, 15.479075041487985, 9.0810128997191981]]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[[127.42770564150206, 6.009123019727256, 1.4698043152443288,
67.313581093569127], [32.607322081975042, 1.417511552104789, 1.8713751983679385, 87.895272543141388],
[186.94745217878133, 76.217496939589864, 93.037799509475548, 6.1397012380871123]], [[1.101130485150756,
1.6909707601108872, 30.465429935425806, 1.841784536081424], [245.13066163099134, 3.5605652445368658,
117.15748687077632, 73.704335351859541], [1.543172236683038, 1.150627960139273, 38.606145814941883,
21.385449809073382]]], [[[197.188119395863, 4.2662473882119789, 16.939566089510787, 2.1378824667292222],
[217.89109194913004, 78.028188003064429, 62.662322144719482, 2.7289150218921923], [9.8785667055148352,
37.962087467770061, 8.9989044283769228, 1.0722769941330905]], [[2.3919409318986364, 120.28847935111511,
30.876771313432304, 10.324616307330889], [5.2973163402393624, 48.214196607451264, 19.572491618101424,
3.0960657620363805], [2.1326977886693315, 20.212114832315748, 138.7636546194104, 73.538442915757031]]],
[[[6.2860703585515694, 1.52520114833432, 38.533106971609669, 1.529988774774909], [10.458965062353716,
223.82781736742263, 11.586161128219478, 49.792660843018716], [1.0984994159263746, 2.5746298970581099,
6.0740022207983229, 38.357433238090152]], [[106.27292861316032, 1.6952944236467047, 135.41338350999845,
1.9214439948729383], [41.152983414265528, 4.0310857094584485, 1.4406909605726668, 196.3050693195793],
[3.3837766744129514, 87.885902342196843, 9.5830737150633496, 6.1720973205785938]]]]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_pow_overloaded_taggedData_rank1_Symbol_rank0(self):
arg0=Data( | numpy.array([4.1741184553454405, 0.519795084994504]) | numpy.array |
"""
Interval unit commitment
@author:<NAME>
@e-mail:<EMAIL>
"""
from pypower import loadcase, ext2int, makeBdc
from scipy.sparse import csr_matrix as sparse
from numpy import zeros, c_, shape, ix_, ones, r_, arange, sum, concatenate, array, diag, eye
from solvers.mixed_integer_solvers_cplex import mixed_integer_linear_programming as lp
import pandas as pd
def problem_formulation(case, BETA=0.15, BETA_HYDRO=0.05, BETA_LOAD=0.03):
"""
:param case: The test case for unit commitment problem
:return:
"""
CAP_WIND = 1 # The capacity of wind farm
# The disturbance range of wind farm
# The disturbance range of wind farm
CAPVALUE = 10 # The capacity value
Price_energy = r_[ones(8), 3 * ones(8), ones(8)]
from pypower.idx_brch import F_BUS, T_BUS, BR_X, TAP, SHIFT, BR_STATUS, RATE_A
from pypower.idx_cost import MODEL, NCOST, PW_LINEAR, COST, POLYNOMIAL
from pypower.idx_bus import BUS_TYPE, REF, VA, VM, PD, GS, VMAX, VMIN, BUS_I
from pypower.idx_gen import GEN_BUS, VG, PG, QG, PMAX, PMIN, QMAX, QMIN
mpc = ext2int.ext2int(case)
baseMVA, bus, gen, branch, gencost = mpc["baseMVA"], mpc["bus"], mpc["gen"], mpc["branch"], mpc["gencost"]
nb = shape(mpc['bus'])[0] ## number of buses
nl = shape(mpc['branch'])[0] ## number of branches
ng = shape(mpc['gen'])[0] ## number of dispatchable injections
# Bbus = makeBdc.makeBdc(baseMVA, bus, branch)
# Distribution_factor = Bbus[1] * inv(Bbus[0])
Distribution_factor = array([
[0, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[-0.005, -0.005, -0.005, -1.005, -0.005, -0.005, -0.005, -0.005, -0.005, -0.005, -0.005, -0.005, -0.005,
-0.005, ],
[0.47, 0.47, 0.47, 0.47, -0.03, -0.03, -0.03, -0.03, -0.03, -0.03, -0.03, -0.03, -0.03, -0.03],
[0.47, 0.47, 0.47, 0.47, -0.03, - 0.03, -0.03, -0.03, -0.03, -0.03, -0.03, -0.03, -0.03, -0.03],
[0, 0, 0, 0, 0, -1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
[0.32, 0.32, 0.32, 0.32, 0.32, 0.32, -0.68, -0.68, 0.32, 0.32, 0.32, 0.32, 0.32, 0.32],
[0.32, 0.32, 0.32, 0.32, 0.32, 0.32, 0.32, 0.32, -0.68, -0.68, 0.32, 0.32, 0.32, 0.32],
[0.16, 0.16, 0.16, 0.16, 0.16, 0.16, 0.16, 0.16, 0.16, -0.84, 0.16, 0.16, 0.16, 0.16],
[-0.16, -0.16, -0.16, -0.16, -0.16, -0.16, -0.16, -0.16, -0.16, -0.16, -1.16, -0.16, -1.16, -0.16],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0],
[-0.16, -0.16, -0.16, -0.16, -0.16, -0.16, -0.16, -0.16, -0.16, -0.16, -0.16, -1.16, -0.16, -0.16],
[-0.08, -0.08, -0.08, -0.08, -0.08, -0.08, -0.08, -0.08, -0.08, -0.08, -0.08, -0.08, -0.08, -1.08],
])
Distribution_factor = sparse(Distribution_factor)
# Formulate connection matrix for wind farms
i = []
PWMAX = []
PWMIN = []
for index in range(ng):
if gen[index, PMIN] == 0:
i.append(index)
PWMAX.append(gen[index, PMAX])
PWMIN.append(gen[index, PMIN])
i = array(i)
nw = i.shape[0]
Cw = sparse((ones(nw), (gen[i, GEN_BUS], arange(nw))), shape=(nb, nw))
PWMAX = array(PWMAX).reshape((len(PWMAX), 1))
PWMIN = array(PWMIN).reshape((len(PWMIN), 1))
# Formulate the connection matrix for hydro power plants
i = []
PHMAX = []
PHMIN = []
for index in range(ng):
if gen[index, PMIN] > 0:
i.append(index)
PHMAX.append(gen[index, PMAX])
PHMIN.append(gen[index, PMIN])
i = array(i)
nh = i.shape[0]
Ch = sparse((ones(nh), (gen[i, GEN_BUS], arange(nh))), shape=(nb, nh))
PHMAX = array(PHMAX).reshape((len(PHMAX), 1))
PHMIN = array(PHMIN).reshape((len(PHMIN), 1))
# Formulate the external power systems
i = []
PEXMAX = []
PEXMIN = []
for index in range(ng):
if gen[index, PMIN] < 0:
i.append(index)
PEXMAX.append(gen[index, PMAX])
PEXMIN.append(gen[index, PMIN])
i = array(i)
nex = i.shape[0]
Cex = sparse((ones(nex), (gen[i, GEN_BUS], arange(nex))), shape=(nb, nex))
PEXMAX = array(PEXMAX).reshape((len(PEXMAX), 1))
PEXMIN = array(PEXMIN).reshape((len(PEXMIN), 1))
PLMAX = branch[:, RATE_A].reshape((nl, 1)) # The power flow limitation
T = 24
## Profiles
# Wind profile
WIND_PROFILE = array(
[591.35, 714.50, 1074.49, 505.06, 692.78, 881.88, 858.48, 609.11, 559.95, 426.86, 394.54, 164.47, 27.15, 4.47,
54.08, 109.90, 111.50, 130.44, 111.59, 162.38, 188.16, 216.98, 102.94, 229.53]).reshape((T, 1))
WIND_PROFILE = WIND_PROFILE / WIND_PROFILE.max()
WIND_PROFILE_FORECAST = zeros((T * nw, 1))
Delta_wind = zeros((T * nw, 1))
for i in range(T):
WIND_PROFILE_FORECAST[i * nw:(i + 1) * nw, :] = WIND_PROFILE[i] * PWMAX
Delta_wind[i * nw:(i + 1) * nw, :] = WIND_PROFILE[i] * PWMAX * BETA
# Load profile
LOAD_PROFILE = array([0.632596195634005, 0.598783973523217, 0.580981513054525, 0.574328051348912, 0.584214221241601,
0.631074282084712, 0.708620833751212, 0.797665730618795, 0.877125330124026, 0.926981579915087,
0.947428654208872, 0.921588439808779, 0.884707317888543, 0.877717046100358, 0.880387289807107,
0.892056129442049, 0.909233443653261, 0.926748403704075, 0.968646575067696, 0.999358974358974,
0.979169591816267, 0.913517534182463, 0.806453715775750, 0.699930632166617]).reshape((T, 1))
LOAD_FORECAST = zeros((T * nb, 1))
Delta_load = zeros((T * nb, 1))
load_base = bus[:, PD].reshape(nb, 1)
for i in range(T):
LOAD_FORECAST[i * nb:(i + 1) * nb, :] = load_base * LOAD_PROFILE[i]
Delta_load[i * nb:(i + 1) * nb, :] = load_base * BETA_LOAD
# Hydro information
HYDRO_INJECT = array([6, 2, 4, 3]).reshape((nh, 1))
HYDRO_INJECT_FORECAST = zeros((T * nh, 1))
Delta_hydro = zeros((T * nh, 1))
for i in range(T):
HYDRO_INJECT_FORECAST[i * nh:(i + 1) * nh, :] = HYDRO_INJECT
Delta_hydro[i * nh:(i + 1) * nh, :] = HYDRO_INJECT * BETA_HYDRO
MIN_DOWN = ones((nh, 1))
MIN_UP = ones((nh, 1))
QMIN = array([1.5, 1, 1, 1]).reshape((nh, 1))
QMAX = array([20, 10, 10, 10]).reshape((nh, 1))
VMIN = array([70, 50, 70, 40]).reshape((nh, 1))
VMAX = array([160, 140, 150, 130]).reshape((nh, 1))
V0 = array([110, 90, 100, 80]).reshape((nh, 1))
M_transfer = diag(array([8.8649, 6.4444, 6.778, 7.3333]))
C_TEMP = array([30, 2, 9, 4]).reshape((4, 1))
Q_TEMP = array([1.5, 1, 1, 1]).reshape((4, 1))
# Define the first stage decision variables
ON = 0
OFF = 1
IHG = 2
PHG = 3
RUHG = 4
RDHG = 5
QHG = 6
QUHG = 7
QDHG = 8
V = 9
S = 10
PWC = 11
PLC = 12
PEX = 13
CEX = 14
NX = PWC * nh * T + nw * T + nb * T + nex * T + 1
lb = zeros((NX, 1))
ub = zeros((NX, 1))
c = zeros((NX, 1))
vtypes = ["c"] * NX
for i in range(T):
for j in range(nh):
# lower boundary information
lb[ON * nh * T + i * nh + j] = 0
lb[OFF * nh * T + i * nh + j] = 0
lb[IHG * nh * T + i * nh + j] = 0
lb[PHG * nh * T + i * nh + j] = 0
lb[RUHG * nh * T + i * nh + j] = 0
lb[RDHG * nh * T + i * nh + j] = 0
lb[QHG * nh * T + i * nh + j] = 0
lb[QUHG * nh * T + i * nh + j] = 0
lb[QDHG * nh * T + i * nh + j] = 0
lb[V * nh * T + i * nh + j] = VMIN[j]
lb[S * nh * T + i * nh + j] = 0
# upper boundary information
ub[ON * nh * T + i * nh + j] = 1
ub[OFF * nh * T + i * nh + j] = 1
ub[IHG * nh * T + i * nh + j] = 1
ub[PHG * nh * T + i * nh + j] = PHMAX[j]
ub[RUHG * nh * T + i * nh + j] = PHMAX[j]
ub[RDHG * nh * T + i * nh + j] = PHMAX[j]
ub[QHG * nh * T + i * nh + j] = QMAX[j]
ub[QUHG * nh * T + i * nh + j] = QMAX[j]
ub[QDHG * nh * T + i * nh + j] = QMAX[j]
ub[V * nh * T + i * nh + j] = VMAX[j]
ub[S * nh * T + i * nh + j] = 10 ** 8
# objective value
c[S * nh * T + i * nh + j] = 1
c[RUHG * nh * T + i * nh + j] = -Price_energy[j]
c[RDHG * nh * T + i * nh + j] = Price_energy[j]
# variables types
vtypes[ON * nh * T + i * nh + j] = "D"
vtypes[OFF * nh * T + i * nh + j] = "D"
vtypes[IHG * nh * T + i * nh + j] = "D"
if i == T - 1:
lb[V * nh * T + i * nh + j] = V0[j]
ub[V * nh * T + i * nh + j] = V0[j]
for j in range(nw):
# lower boundary information
lb[PWC * nh * T + i * nw + j] = 0
# upper boundary information
ub[PWC * nh * T + i * nw + j] = WIND_PROFILE_FORECAST[i * nw + j]
# objective value
c[PWC * nh * T + i * nw + j] = 1
for j in range(nb):
# lower boundary information
lb[PWC * nh * T + nw * T + i * nb + j] = 0
# upper boundary information
ub[PWC * nh * T + nw * T + i * nb + j] = bus[j, PD] * LOAD_PROFILE[i]
# objective value
c[PWC * nh * T + nw * T + i * nb + j] = 10 ** 8
for j in range(nex):
# lower boundary information
lb[PWC * nh * T + nw * T + nb * T + i * nex + j] = PEXMIN[j]
# upper boundary information
ub[PWC * nh * T + nw * T + nb * T + i * nex + j] = PEXMAX[j]
# objective value
c[PWC * nh * T + nw * T + nb * T + i * nex + j] = -Price_energy[i]
# lower boundary information
lb[PWC * nh * T + nw * T + nb * T + nex * T] = PEXMIN[0]
# upper boundary information
ub[PWC * nh * T + nw * T + nb * T + nex * T] = PEXMAX[0]
# objective value
# c[PWC * nh * T + nw * T + nb * T + nex * T] = -CAPVALUE
# 2) Constraint set
# 2.1) Power balance equation
Aeq = zeros((T, NX))
beq = zeros((T, 1))
for i in range(T):
# For the hydro units
for j in range(nh):
Aeq[i, PHG * nh * T + i * nh + j] = 1
# For the wind farms
for j in range(nw):
Aeq[i, PWC * nh * T + i * nw + j] = -1
# For the loads
for j in range(nb):
Aeq[i, PWC * nh * T + nw * T + i * nb + j] = 1
# For the power exchange
for j in range(nex):
Aeq[i, PWC * nh * T + nw * T + nb * T + i * nex + j] = -1
beq[i] = sum(load_base) * LOAD_PROFILE[i] - sum(WIND_PROFILE_FORECAST[i * nw:(i + 1) * nw])
# 2.2) Status transformation of each unit
Aeq_temp = zeros((T * nh, NX))
beq_temp = zeros((T * nh, 1))
for i in range(T):
for j in range(nh):
Aeq_temp[i * nh + j, ON * nh * T + i * nh + j] = -1
Aeq_temp[i * nh + j, OFF * nh * T + i * nh + j] = 1
Aeq_temp[i * nh + j, IHG * nh * T + i * nh + j] = 1
if i != 0:
Aeq_temp[i * nh + j, IHG * nh * T + (i - 1) * nh + j] = -1
else:
beq_temp[i * T + j] = 0
Aeq = concatenate((Aeq, Aeq_temp), axis=0)
beq = concatenate((beq, beq_temp), axis=0)
# 2.3) water status change
Aeq_temp = zeros((T * nh, NX))
beq_temp = HYDRO_INJECT_FORECAST
for i in range(T):
for j in range(nh):
Aeq_temp[i * nh + j, V * nh * T + i * nh + j] = 1
Aeq_temp[i * nh + j, S * nh * T + i * nh + j] = 1
Aeq_temp[i * nh + j, QHG * nh * T + i * nh + j] = 1
if i != 0:
Aeq_temp[i * nh + j, V * nh * T + (i - 1) * nh + j] = -1
else:
beq_temp[i * T + j] += V0[j]
Aeq = concatenate((Aeq, Aeq_temp), axis=0)
beq = concatenate((beq, beq_temp), axis=0)
# 2.4) Power water transfering
Aeq_temp = zeros((T * nh, NX))
beq_temp = zeros((T * nh, 1))
for i in range(T):
for j in range(nh):
Aeq_temp[i * nh + j, PHG * nh * T + i * nh + j] = 1
Aeq_temp[i * nh + j, QHG * nh * T + i * nh + j] = -M_transfer[j, j]
Aeq_temp[i * nh + j, IHG * nh * T + i * nh + j] = -C_TEMP[j] + M_transfer[j, j] * Q_TEMP[j]
Aeq = concatenate((Aeq, Aeq_temp), axis=0)
beq = concatenate((beq, beq_temp), axis=0)
# 2.5) Power range limitation
Aineq = zeros((T * nh, NX))
bineq = zeros((T * nh, 1))
for i in range(T):
for j in range(nh):
Aineq[i * nh + j, ON * nh * T + i * nh + j] = 1
Aineq[i * nh + j, OFF * nh * T + i * nh + j] = 1
bineq[i * nh + j] = 1
Aineq_temp = zeros((T * nh, NX))
bineq_temp = zeros((T * nh, 1))
for i in range(T):
for j in range(nh):
Aineq_temp[i * nh + j, IHG * nh * T + i * nh + j] = PHMIN[j]
Aineq_temp[i * nh + j, PHG * nh * T + i * nh + j] = -1
Aineq_temp[i * nh + j, RDHG * nh * T + i * nh + j] = 1
Aineq = concatenate((Aineq, Aineq_temp), axis=0)
bineq = concatenate((bineq, bineq_temp), axis=0)
Aineq_temp = zeros((T * nh, NX))
bineq_temp = zeros((T * nh, 1))
for i in range(T):
for j in range(nh):
Aineq_temp[i * nh + j, IHG * nh * T + i * nh + j] = -PHMAX[j]
Aineq_temp[i * nh + j, PHG * nh * T + i * nh + j] = 1
Aineq_temp[i * nh + j, RUHG * nh * T + i * nh + j] = 1
Aineq = concatenate((Aineq, Aineq_temp), axis=0)
bineq = concatenate((bineq, bineq_temp), axis=0)
# 2.6) Water reserve constraints
Aineq_temp = zeros((T * nh, NX))
bineq_temp = zeros((T * nh, 1))
for i in range(T):
for j in range(nh):
Aineq_temp[i * nh + j, PHG * nh * T + i * nh + j] = 1
Aineq_temp[i * nh + j, RUHG * nh * T + i * nh + j] = 1
Aineq_temp[i * nh + j, IHG * nh * T + i * nh + j] = -C_TEMP[j] + M_transfer[j, j] * Q_TEMP[j]
Aineq_temp[i * nh + j, QHG * nh * T + i * nh + j] = -M_transfer[j, j]
Aineq_temp[i * nh + j, QUHG * nh * T + i * nh + j] = -M_transfer[j, j]
Aineq = concatenate((Aineq, Aineq_temp), axis=0)
bineq = concatenate((bineq, bineq_temp), axis=0)
Aineq_temp = zeros((T * nh, NX))
bineq_temp = zeros((T * nh, 1))
for i in range(T):
for j in range(nh):
Aineq_temp[i * nh + j, PHG * nh * T + i * nh + j] = -1
Aineq_temp[i * nh + j, RDHG * nh * T + i * nh + j] = 1
Aineq_temp[i * nh + j, IHG * nh * T + i * nh + j] = C_TEMP[j] - M_transfer[j, j] * Q_TEMP[j]
Aineq_temp[i * nh + j, QHG * nh * T + i * nh + j] = M_transfer[j, j]
Aineq_temp[i * nh + j, QDHG * nh * T + i * nh + j] = -M_transfer[j, j]
Aineq = concatenate((Aineq, Aineq_temp), axis=0)
bineq = concatenate((bineq, bineq_temp), axis=0)
# 2.7) water flow constraints
Aineq_temp = zeros((T * nh, NX))
bineq_temp = zeros((T * nh, 1))
for i in range(T):
for j in range(nh):
Aineq_temp[i * nh + j, IHG * nh * T + i * nh + j] = QMIN[j]
Aineq_temp[i * nh + j, QHG * nh * T + i * nh + j] = -1
Aineq_temp[i * nh + j, QDHG * nh * T + i * nh + j] = 1
Aineq = concatenate((Aineq, Aineq_temp), axis=0)
bineq = concatenate((bineq, bineq_temp), axis=0)
Aineq_temp = zeros((T * nh, NX))
bineq_temp = zeros((T * nh, 1))
for i in range(T):
for j in range(nh):
Aineq_temp[i * nh + j, IHG * nh * T + i * nh + j] = -QMAX[j]
Aineq_temp[i * nh + j, QHG * nh * T + i * nh + j] = 1
Aineq_temp[i * nh + j, QUHG * nh * T + i * nh + j] = 1
Aineq = concatenate((Aineq, Aineq_temp), axis=0)
bineq = concatenate((bineq, bineq_temp), axis=0)
# 2.8) Water reserve limitation
Aineq_temp = zeros((T * nh, NX))
bineq_temp = | zeros((T * nh, 1)) | numpy.zeros |
"""
Two Stage Robust Optimization for Unit Commitment Problem
The test case is the IEEE-6ww system.
@date: 13 June 2018
@author: <NAME>
@e-mail: <EMAIL>
"""
from numpy import zeros, shape, ones, diag, concatenate, r_, arange, array
import matplotlib.pyplot as plt
from solvers.mixed_integer_quadratic_programming import mixed_integer_quadratic_programming as miqp
import scipy.linalg as linalg
from scipy.sparse import csr_matrix as sparse
from pypower.idx_brch import F_BUS, T_BUS, BR_R, BR_X, TAP, SHIFT, BR_STATUS, RATE_A
from pypower.idx_cost import MODEL, NCOST, PW_LINEAR, COST, POLYNOMIAL
from pypower.idx_bus import BUS_TYPE, REF, VA, VM, PD, GS, VMAX, VMIN, BUS_I, QD
from pypower.idx_gen import GEN_BUS, VG, PG, QG, PMAX, PMIN, QMAX, QMIN, RAMP_AGC
from pypower.idx_cost import STARTUP
from solvers.mixed_integer_quadratic_solver_cplex import mixed_integer_quadratic_programming as miqp
from unit_commitment.data_format.data_format_jointed_energy_reserve import ALPHA, BETA, IG, PG, RDG, RUG
class TwoStageUnitCommitmentRobust():
""""""
def __init__(self):
self.name = "Two stage robust optimization"
def problem_formulation(self, case, delta=0.03):
"""
Input check for the unit commitment problem
:param cases:
:return:
"""
baseMVA, bus, gen, branch, gencost, profile = case["baseMVA"], case["bus"], case["gen"], case["branch"], case[
"gencost"], case["Load_profile"]
MIN_UP = -2
MIN_DOWN = -3
# Modify the bus, gen and branch matrix
bus[:, BUS_I] = bus[:, BUS_I] - 1
gen[:, GEN_BUS] = gen[:, GEN_BUS] - 1
branch[:, F_BUS] = branch[:, F_BUS] - 1
branch[:, T_BUS] = branch[:, T_BUS] - 1
ng = shape(case['gen'])[0] # number of schedule injections
nl = shape(case['branch'])[0] ## number of branches
nb = shape(case['bus'])[0] ## number of branches
self.ng = ng
self.nb = nb
self.nl = nl
u0 = [0] * ng # The initial generation status
for i in range(ng):
u0[i] = int(gencost[i, -1] > 0)
# Formulate a mixed integer quadratic programming problem
# 1) Announce the variables
# [vt,wt,ut,Pt]:start-up,shut-down,status,generation level, up-reserve, down-reserve
# 1.1) boundary information
T = case["Load_profile"].shape[0]
nx = (RDG + 1) * T * ng
lb = zeros((nx, 1))
ub = zeros((nx, 1))
vtypes = ["c"] * nx
self.T = T
for i in range(T):
for j in range(ng):
# lower boundary
lb[ALPHA * ng * T + i * ng + j] = 0
lb[BETA * ng * T + i * ng + j] = 0
lb[IG * ng * T + i * ng + j] = 0
lb[PG * ng * T + i * ng + j] = 0
lb[RUG * ng * T + i * ng + j] = 0
lb[RDG * ng * T + i * ng + j] = 0
# upper boundary
ub[ALPHA * ng * T + i * ng + j] = 1
ub[BETA * ng * T + i * ng + j] = 1
ub[IG * ng * T + i * ng + j] = 1
ub[PG * ng * T + i * ng + j] = gen[j, PMAX]
ub[RUG * ng * T + i * ng + j] = gen[j, PMAX]
ub[RDG * ng * T + i * ng + j] = gen[j, PMAX]
# variable types
vtypes[IG * ng * T + i * ng + j] = "D"
c = zeros((nx, 1))
q = zeros((nx, 1))
for i in range(T):
for j in range(ng):
# cost
c[ALPHA * ng * T + i * ng + j] = gencost[j, STARTUP]
c[BETA * ng * T + i * ng + j] = 0
c[IG * ng * T + i * ng + j] = gencost[j, 6]
c[PG * ng * T + i * ng + j] = gencost[j, 5]
c[RUG * ng * T + i * ng + j] = 0
c[RDG * ng * T + i * ng + j] = 0
q[PG * ng * T + i * ng + j] = gencost[j, 4]
# 2) Constraint set
# 2.1) Power balance equation
Aeq = | zeros((T, nx)) | numpy.zeros |
#!/usr/bin/env python3
"""A python script to perform watermark embedding/detection
on the basis of singular value decomposition (SVD) and cepstrum method."""
# Copyright (C) 2020 by <NAME>
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from acoustics.cepstrum import inverse_complex_cepstrum
from scipy.io import wavfile
import numpy as np
import pickle
HOST_SIGNAL_FILE = "host.wav" # 透かし埋め込み先のファイル
WATERMARK_SIGNAL_FILE = "wmed_signal.wav" # 透かしを埋め込んだファイル
WATERMARK_U_FILE = 'svd_left.dat' # 特異値分解の左側の行列
WATERMARK_D_FILE = 'svd_center.dat' # 特異値分解の真ん中の行列
WATERMARK_V_FILE = 'svd_right.dat' # 特異値分解の右側の行列
WATERMARK_ORIGINAL_FILE = 'watermark_ori.dat' # オリジナルの透かし信号
REP_CODE = True # 繰り返し埋め込みを使う
FRAME_LENGTH = 2048 # フレーム長
FFT_LENGTH = FRAME_LENGTH
HOP_LENGTH = 80
CONTROL_STRENGTH = 0.001 # 埋め込み強度
NUM_REPS = 3 # 埋め込みの繰り返し数
THRESHOLD = 0.0
LOG_FLOOR = 0.00001 # 対数のフロア値
def complex_cepstrum(x, n=None):
"""
Compute the complex cepstrum of a real sequence.
borrowed from http://python-acoustics.github.io/python-acoustics
"""
def _unwrap(phase):
samples = phase.shape[-1]
unwrapped = np.unwrap(phase)
center = (samples + 1) // 2
if samples == 1:
center = 0
ndelay = np.array(np.round(unwrapped[..., center] / np.pi))
unwrapped -= np.pi * ndelay[..., None] * np.arange(samples) / center
return unwrapped, ndelay
spectrum = np.fft.fft(x, n=n)
unwrapped_phase, ndelay = _unwrap(np.angle(spectrum))
log_spectrum = np.log(np.abs(spectrum) + LOG_FLOOR) + 1j * unwrapped_phase
ceps = np.fft.ifft(log_spectrum).real
return ceps, ndelay
def fix(xs):
"""
A emuration of MATLAB 'fix' function.
borrowed from https://ideone.com/YjJwOh
"""
# res = [np.floor(e) if e >= 0 else np.ceil(e) for e in xs]
if xs >= 0:
res = np.floor(xs)
else:
res = np.ceil(xs)
return res
def embed():
"""
perform embedding.
"""
sr, host_signal = wavfile.read(HOST_SIGNAL_FILE)
host_signal = host_signal.astype(np.float64)
signal_len = len(host_signal)
# フレームの移動量
frame_shift = HOP_LENGTH
# 隣接フレームとのオーバーラップ長
overlap_length = FRAME_LENGTH - HOP_LENGTH
# 埋め込みの総ビット数
n_frames = int(fix((signal_len - overlap_length) / frame_shift))
# 複素ケプストラムを抽出
pointer = 0
ceps_mat = np.zeros((n_frames, FRAME_LENGTH))
ndelay_vec = np.zeros(n_frames)
for i in range(n_frames):
frame = host_signal[pointer: (pointer + FRAME_LENGTH)]
# 複素ケプストラム
real_ceps, ndelay = complex_cepstrum(frame)
ceps_mat[i, :] = real_ceps
ndelay_vec[i] = ndelay
pointer = pointer + frame_shift
# ケプストラム行列を特異値分解
U, D, V = np.linalg.svd(ceps_mat, full_matrices=False)
off_diag_index = np.where(~np.eye(np.diag(D).shape[0], dtype=bool))
embed_nbit = len(off_diag_index[0])
if REP_CODE:
# 実効的な埋め込み可能ビット数
effective_nbit = int(np.floor(embed_nbit / NUM_REPS))
embed_nbit = int(effective_nbit * NUM_REPS)
else:
effective_nbit = embed_nbit
# オリジナルの透かし信号を作成(0と1のビット列)
wmark_original = np.random.randint(2, size=effective_nbit)
# オリジナルの透かし信号を保存
with open(WATERMARK_ORIGINAL_FILE, 'wb') as f:
pickle.dump(wmark_original, f)
wmark_original = 2 * wmark_original - 1 # 1と-1に変換する
# 透かし信号を拡張する
if REP_CODE:
wmark_extended = np.repeat(wmark_original, NUM_REPS)
else:
wmark_extended = wmark_original
# 埋め込み強度
alpha = CONTROL_STRENGTH
# 透かしの埋め込み
wmed_D = np.diag(D)
row_index = off_diag_index[0]
col_index = off_diag_index[1]
for i in range(embed_nbit):
wmed_D[row_index[i], col_index[i]] = alpha * wmark_extended[i]
# 埋め込んだ行列を特異値分解
Uw, Dw, Vw = np.linalg.svd(wmed_D, full_matrices=False)
# 行列を保存する
with open(WATERMARK_U_FILE, 'wb') as f:
pickle.dump(Uw, f)
with open(WATERMARK_D_FILE, 'wb') as f:
pickle.dump(D, f)
with open(WATERMARK_V_FILE, 'wb') as f:
pickle.dump(Vw, f)
# 再構築
wmed_ceps = U @ np.diag(Dw) @ V
# 透かし入りケプストラムを音声に戻す
wmed_signal = np.zeros((frame_shift * n_frames)) # watermarked signal
for i in range(n_frames):
# 逆変換
wmarked_frame = inverse_complex_cepstrum(wmed_ceps[i, :], ndelay_vec[i])
wmed_signal[frame_shift * i:
frame_shift * (i + 1)] = wmarked_frame[0:frame_shift]
# ホスト信号の残りと結合
wmed_signal = np.concatenate(
(wmed_signal, host_signal[len(wmed_signal): signal_len]))
# 透かしが埋め込まれた信号をwavとして保存
wmed_signal = wmed_signal.astype(np.int16) # convert float into integer
wavfile.write(WATERMARK_SIGNAL_FILE, sr, wmed_signal)
def detect():
"""
perform detecton.
"""
# ホスト信号のロード
sr, host_signal = wavfile.read(HOST_SIGNAL_FILE)
# 埋め込み済みの音声ファイルを開く
_, eval_signal = wavfile.read(WATERMARK_SIGNAL_FILE)
eval_signal = eval_signal.astype(np.float64)
signal_len = len(eval_signal)
# フレームの移動量
frame_shift = HOP_LENGTH
# 隣接フレームとのオーバーラップ長
overlap_length = FRAME_LENGTH - HOP_LENGTH
# 埋め込みの総ビット数
n_frames = int(fix((signal_len - overlap_length) / frame_shift))
# 透かしの埋め込みに用いた行列をロードする
with open(WATERMARK_U_FILE, 'rb') as f:
Uw = pickle.load(f)
with open(WATERMARK_D_FILE, 'rb') as f:
D = pickle.load(f)
with open(WATERMARK_V_FILE, 'rb') as f:
Vw = pickle.load(f)
# 複素ケプストラムを抽出
pointer = 0
wmed_ceps_mat = | np.zeros((n_frames, FRAME_LENGTH)) | numpy.zeros |
"""Defines LightCurve, KeplerLightCurve, and TessLightCurve."""
import os
import datetime
import logging
import warnings
import collections
import numpy as np
from scipy.signal import savgol_filter
from scipy.interpolate import interp1d
import matplotlib
from matplotlib import pyplot as plt
from copy import deepcopy
from astropy.table import Table, Column, MaskedColumn
from astropy.io import fits
from astropy.time import Time, TimeDelta
from astropy import units as u
from astropy.units import Quantity
from astropy.timeseries import TimeSeries, aggregate_downsample
from astropy.table import vstack
from astropy.stats import calculate_bin_edges
from astropy.utils.decorators import deprecated, deprecated_renamed_argument
from astropy.utils.exceptions import AstropyUserWarning
from . import PACKAGEDIR, MPLSTYLE
from .utils import (
running_mean,
bkjd_to_astropy_time,
btjd_to_astropy_time,
validate_method,
_query_solar_system_objects,
)
from .utils import LightkurveWarning, LightkurveDeprecationWarning
__all__ = ["LightCurve", "KeplerLightCurve", "TessLightCurve", "FoldedLightCurve"]
log = logging.getLogger(__name__)
_HAS_VAR_BINS = 'time_bin_end' in aggregate_downsample.__kwdefaults__
def _to_unitless_day(data):
if isinstance(data, Quantity):
return data.to(u.day).value
elif not np.isscalar(data):
return np.asarray([_to_unitless_day(item) for item in data]).flatten()
else:
return data
class QColumn(Column):
"""(Temporary) workaround to provide ``.value`` alias to raw data, so as to match ``Quantity``.
"""
@property
def value(self):
return self.data
class QMaskedColumn(MaskedColumn):
"""(Temporary) workaround to provide ``.value`` alias to raw data, so as to match ``Quantity``.
"""
@property
def value(self):
return self.data
class QTimeSeries(TimeSeries):
def _convert_col_for_table(self, col):
"""
Ensure resulting column has ``.value`` accessor to raw data, irrespective of type of input.
It won't be needed once https://github.com/astropy/astropy/pull/10962 is in astropy release
and Lightkurve requires the corresponding astropy release (4.3).
"""
# string-typed columns should not have a unit, or it will make convert_col_for_table crash!
# see https://github.com/lightkurve/lightkurve/pull/980#issuecomment-806178939
if hasattr(col, 'dtype'):
if hasattr(col, 'unit') and col.dtype.kind in {'U', 'S'}:
del col.unit
# ignore "dropping mask in Quantity column" warning issued up until AstroPy v4.3.1
with warnings.catch_warnings():
warnings.filterwarnings("ignore", message=".*dropping mask.*")
col = super()._convert_col_for_table(col)
if (
isinstance(col, Column)
and getattr(col, "unit", None) is None
and (not hasattr(col, "value"))
):
# the logic is similar to those in the grandparent QTable for Quantity
if isinstance(col, MaskedColumn):
qcol = QMaskedColumn(
data=col.data,
name=col.name,
dtype=col.dtype,
description=col.description,
mask=col.mask,
fill_value=col.fill_value,
format=col.format,
meta=col.meta,
copy=False,
)
else:
qcol = QColumn(
data=col.data,
name=col.name,
dtype=col.dtype,
description=col.description,
format=col.format,
meta=col.meta,
copy=False,
)
qcol.info = col.info
qcol.info.indices = col.info.indices
col = qcol
return col
class LightCurve(QTimeSeries):
"""
Subclass of AstroPy `~astropy.table.Table` guaranteed to have *time*, *flux*, and *flux_err* columns.
Compared to the generic `~astropy.timeseries.TimeSeries` class, `LightCurve`
ensures that each object has `time`, `flux`, and `flux_err` columns.
These three columns are special for two reasons:
1. they are the key columns upon which all light curve operations operate;
2. they are always present (though they may be populated with ``NaN`` values).
`LightCurve` objects also provide user-friendly attribute access to
columns and meta data.
Parameters
----------
data : numpy ndarray, dict, list, `~astropy.table.Table`, or table-like object, optional
Data to initialize time series. This does not need to contain the times
or fluxes, which can be provided separately, but if it does contain the
times and fluxes they should be in columns called ``'time'``,
``'flux'``, and ``'flux_err'`` to be automatically recognized.
time : `~astropy.time.Time` or iterable
Time values. They can either be given directly as a
`~astropy.time.Time` array or as any iterable that initializes the
`~astropy.time.Time` class.
flux : `~astropy.units.Quantity` or iterable
Flux values for every time point.
flux_err : `~astropy.units.Quantity` or iterable
Uncertainty on each flux data point.
**kwargs : dict
Additional keyword arguments are passed to `~astropy.table.QTable`.
Attributes
----------
meta : `dict`
meta data associated with the lightcurve. The header of the underlying FITS file (if applicable)
is store in this dictionary. By convention, keys in this dictionary are usually in uppercase.
Notes
-----
*Attribute access*: You can access a column or a ``meta`` value directly as an attribute.
>>> lc.flux # shortcut for lc['flux'] # doctest: +SKIP
>>> lc.sector # shortcut for lc.meta['SECTOR'] # doctest: +SKIP
>>> lc.flux = lc.flux * 1.05 # update the values of a column. # doctest: +SKIP
In case the given name is both a column name and a key in ``meta``, the column will be returned.
Note that you *cannot* create a new column using the attribute interface. If you do so,
a new attribute is created instead, and a warning is raised.
If you do create such attributes on purpose, please note that the attributes are not carried
over when the lightcurve object is copied, or a new lightcurve object is derived
based on a copy, e.g., ``normalize()``.
Examples
--------
>>> import lightkurve as lk
>>> lc = lk.LightCurve(time=[1, 2, 3, 4], flux=[0.98, 1.02, 1.03, 0.97])
>>> lc.time
<Time object: scale='tdb' format='jd' value=[1. 2. 3. 4.]>
>>> lc.flux
<Quantity [0.98, 1.02, 1.03, 0.97]>
>>> lc.bin(time_bin_size=2, time_bin_start=0.5).flux
<Quantity [1., 1.]>
"""
# The constructor of the `TimeSeries` base class will enforce the presence
# of these columns:
_required_columns = ["time", "flux", "flux_err"]
# The following keywords were removed in Lightkurve v2.0.
# Their use will trigger a warning.
_deprecated_keywords = (
"targetid",
"label",
"time_format",
"time_scale",
"flux_unit",
)
_deprecated_column_keywords = [
"centroid_col",
"centroid_row",
"cadenceno",
"quality",
]
# If an iterable is passed for ``time``, we will initialize an AstroPy
# ``Time`` object using the following format and scale:
_default_time_format = "jd"
_default_time_scale = "tdb"
# To emulate pandas, we do not support creating new columns or meta data
# fields via attribute assignment, and raise a warning in __setattr__ when
# a new attribute is created. We need to relax this warning during the
# initial construction of the object using `_new_attributes_relax`.
_new_attributes_relax = True
# cf. issue #925
__array_priority__ = 100_000
def __init__(self, data=None, *args, time=None, flux=None, flux_err=None, **kwargs):
# Delay checking for required columns until the end
self._required_columns_relax = True
# Lightkurve v1.x supported passing time, flux, and flux_err as
# positional arguments. We support it here for backwards compatibility.
if len(args) in [1, 2]:
warnings.warn(
"passing flux as a positional argument is deprecated"
", please use ``flux=...`` instead.",
LightkurveDeprecationWarning,
)
time = data
flux = args[0]
data = None
if len(args) == 2:
flux_err = args[1]
# For backwards compatibility with Lightkurve v1.x,
# we support passing deprecated keywords via **kwargs.
deprecated_kws = {}
for kw in self._deprecated_keywords:
if kw in kwargs:
deprecated_kws[kw] = kwargs.pop(kw)
deprecated_column_kws = {}
for kw in self._deprecated_column_keywords:
if kw in kwargs:
deprecated_column_kws[kw] = kwargs.pop(kw)
# If `time` is passed as keyword argument, we populate it with integer numbers
if data is None or "time" not in data.keys():
if time is None and flux is not None:
time = np.arange(len(flux))
# We are tolerant of missing time format
if time is not None and not isinstance(time, (Time, TimeDelta)):
# Lightkurve v1.x supported specifying the time_format
# as a constructor kwarg
time = Time(
time,
format=deprecated_kws.get("time_format", self._default_time_format),
scale=deprecated_kws.get("time_scale", self._default_time_scale),
)
# Also be tolerant of missing time format if time is passed via `data`
if data and "time" in data.keys():
if not isinstance(data["time"], (Time, TimeDelta)):
data["time"] = Time(
data["time"],
format=deprecated_kws.get("time_format", self._default_time_format),
scale=deprecated_kws.get("time_scale", self._default_time_scale),
)
# Allow overriding the required columns
self._required_columns = kwargs.pop("_required_columns", self._required_columns)
# Call the SampledTimeSeries constructor.
# Disable required columns for now; we'll check those later.
tmp = self._required_columns
self._required_columns = []
super().__init__(data=data, time=time, **kwargs)
self._required_columns = tmp
# For some operations, an empty time series needs to be created, then
# columns added one by one. We should check that when columns are added
# manually, time is added first and is of the right type.
if data is None and time is None and flux is None and flux_err is None:
self._required_columns_relax = True
return
# Load `time`, `flux`, and `flux_err` from the table as local variable names
time = self.columns["time"] # super().__init__() guarantees this is a column
if "flux" in self.colnames:
if flux is None:
flux = self.columns["flux"]
else:
raise TypeError(
f"'flux' has been given both in the `data` table and as a keyword argument"
)
if "flux_err" in self.colnames:
if flux_err is None:
flux_err = self.columns["flux_err"]
else:
raise TypeError(
f"'flux_err' has been given both in the `data` table and as a keyword argument"
)
# Ensure `flux` and `flux_err` are populated with NaNs if missing
if flux is None and time is not None:
flux = np.empty(len(time))
flux[:] = np.nan
if not isinstance(flux, Quantity):
flux = Quantity(flux, deprecated_kws.get("flux_unit"))
if flux_err is None:
flux_err = np.empty(len(flux))
flux_err[:] = np.nan
if not isinstance(flux_err, Quantity):
flux_err = Quantity(flux_err, flux.unit)
# Backwards compatibility with Lightkurve v1.x
# Ensure attributes are set if passed via deprecated kwargs
for kw in deprecated_kws:
if kw not in self.meta:
self.meta[kw.upper()] = deprecated_kws[kw]
# Ensure all required columns are in the right order
with self._delay_required_column_checks():
for idx, col in enumerate(self._required_columns):
if col in self.colnames:
self.remove_column(col)
self.add_column(locals()[col], index=idx, name=col)
# Ensure columns are set if passed via deprecated kwargs
for kw in deprecated_column_kws:
if kw not in self.meta and kw not in self.columns:
self.add_column(deprecated_column_kws[kw], name=kw)
# Ensure flux and flux_err have the same units
if self["flux"].unit != self["flux_err"].unit:
raise ValueError("flux and flux_err must have the same units")
self._new_attributes_relax = False
self._required_columns_relax = False
self._check_required_columns()
def __getattr__(self, name, **kwargs):
"""Expose all columns and meta keywords as attributes."""
if name in self.__dict__:
return self.__dict__[name]
elif name in self.__class__.__dict__:
return self.__class__.__dict__[name].__get__(self)
elif name in self.columns:
return self[name]
elif "_meta" in self.__dict__:
if name in self.__dict__["_meta"]:
return self.__dict__["_meta"][name]
elif name.upper() in self.__dict__["_meta"]:
return self.__dict__["_meta"][name.upper()]
raise AttributeError(f"object has no attribute {name}")
def __setattr__(self, name, value, **kwargs):
"""To get copied, attributes have to be stored in the meta dictionary!"""
to_set_as_attr = False
if name in self.__dict__:
to_set_as_attr = True
elif name == "time":
self["time"] = value # astropy will convert value to Time if needed
elif ("columns" in self.__dict__) and (name in self.__dict__["columns"]):
self.replace_column(name, value)
elif "_meta" in self.__dict__:
if name in self.__dict__["_meta"]:
self.__dict__["_meta"][name] = value
elif name.upper() in self.__dict__["_meta"]:
self.__dict__["_meta"][name.upper()] = value
else:
to_set_as_attr = True
else:
to_set_as_attr = True
if to_set_as_attr:
if (
name not in self.__dict__
and not name.startswith("_")
and not self._new_attributes_relax
and name != 'meta'
):
warnings.warn(
(
"Lightkurve doesn't allow columns or meta values to be created via a new attribute name."
"A new attribute is created. It will not be carried over when the object is copied."
" - see https://docs.lightkurve.org/reference/api/lightkurve.LightCurve.html"
),
UserWarning,
stacklevel=2,
)
super().__setattr__(name, value, **kwargs)
def _repr_simple_(self) -> str:
"""Returns a simple __repr__.
Used by `LightCurveCollection`.
"""
result = f"<{self.__class__.__name__}"
if "LABEL" in self.meta:
result += f" LABEL=\"{self.meta.get('LABEL')}\""
for kw in ["QUARTER", "CAMPAIGN", "SECTOR", "AUTHOR", "FLUX_ORIGIN"]:
if kw in self.meta:
result += f" {kw}={self.meta.get(kw)}"
result += ">"
return result
def _base_repr_(self, html=False, descr_vals=None, **kwargs):
"""Defines the description shown by `__repr__` and `_html_repr_`."""
if descr_vals is None:
descr_vals = [self.__class__.__name__]
if self.masked:
descr_vals.append("masked=True")
descr_vals.append("length={}".format(len(self)))
if "LABEL" in self.meta:
descr_vals.append(f"LABEL=\"{self.meta.get('LABEL')}\"")
for kw in ["QUARTER", "CAMPAIGN", "SECTOR", "AUTHOR", "FLUX_ORIGIN"]:
if kw in self.meta:
descr_vals.append(f"{kw}={self.meta.get(kw)}")
return super()._base_repr_(html=html, descr_vals=descr_vals, **kwargs)
# Define `time`, `flux`, `flux_err` as class attributes to enable IDE
# of these required columns auto-completion.
@property
def time(self) -> Time:
"""Time values stored as an AstroPy `~astropy.time.Time` object."""
return self["time"]
@time.setter
def time(self, time):
self["time"] = time
@property
def flux(self) -> Quantity:
"""Brightness values stored as an AstroPy `~astropy.units.Quantity` object."""
return self["flux"]
@flux.setter
def flux(self, flux):
self["flux"] = flux
@property
def flux_err(self) -> Quantity:
"""Brightness uncertainties stored as an AstroPy `~astropy.units.Quantity` object."""
return self["flux_err"]
@flux_err.setter
def flux_err(self, flux_err):
self["flux_err"] = flux_err
def select_flux(self, flux_column, flux_err_column=None):
"""Assign a different column to be the flux column.
This method returns a copy of the LightCurve in which the ``flux``
and ``flux_err`` columns have been replaced by the values contained
in a different column.
Parameters
----------
flux_column : str
Name of the column that should become the 'flux' column.
flux_err_column : str or `None`
Name of the column that should become the 'flux_err' column.
By default, the column will be used that is obtained by adding the
suffix "_err" to the value of ``flux_column``. If such a
column does not exist, ``flux_err`` will be populated with NaN values.
Returns
-------
lc : LightCurve
Copy of the ``LightCurve`` object with the new flux values assigned.
Examples
--------
You can use this function to change the flux data on which most Lightkurve
features operate. For example, to view a periodogram based on the "sap_flux"
column in a TESS light curve, use::
>>> lc.select_flux("sap_flux").to_periodogram("lombscargle").plot() # doctest: +SKIP
"""
# Input validation
if flux_column not in self.columns:
raise ValueError(f"'{flux_column}' is not a column")
if flux_err_column and flux_err_column not in self.columns:
raise ValueError(f"'{flux_err_column}' is not a column")
lc = self.copy()
lc["flux"] = lc[flux_column]
if flux_err_column: # not None
lc["flux_err"] = lc[flux_err_column]
else:
# if `flux_err_column` is unspecified, we attempt to use
# f"{flux_column}_err" if it exists
flux_err_column = f"{flux_column}_err"
if flux_err_column in lc.columns:
lc["flux_err"] = lc[flux_err_column]
else:
lc["flux_err"][:] = np.nan
lc.meta['FLUX_ORIGIN'] = flux_column
normalized_new_flux = lc["flux"].unit is None or lc["flux"].unit is u.dimensionless_unscaled
# Note: here we assume unitless flux means it's normalized
# it's not exactly true in many constructed lightcurves in unit test
# but the assumption should hold for any real world use cases, e.g. TESS QLP
if normalized_new_flux:
lc.meta["NORMALIZED"] = normalized_new_flux
else:
# remove it altogether.
# Setting to False would suffice;
# but in typical non-normalized LC, the header will not be there at all.
lc.meta.pop("NORMALIZED", None)
return lc
# Define deprecated attributes for compatibility with Lightkurve v1.x:
@property
@deprecated(
"2.0", alternative="time.format", warning_type=LightkurveDeprecationWarning
)
def time_format(self):
return self.time.format
@property
@deprecated(
"2.0", alternative="time.scale", warning_type=LightkurveDeprecationWarning
)
def time_scale(self):
return self.time.scale
@property
@deprecated("2.0", alternative="time", warning_type=LightkurveDeprecationWarning)
def astropy_time(self):
return self.time
@property
@deprecated(
"2.0", alternative="flux.unit", warning_type=LightkurveDeprecationWarning
)
def flux_unit(self):
return self.flux.unit
@property
@deprecated("2.0", alternative="flux", warning_type=LightkurveDeprecationWarning)
def flux_quantity(self):
return self.flux
@property
@deprecated(
"2.0",
alternative="fits.open(lc.filename)",
warning_type=LightkurveDeprecationWarning,
)
def hdu(self):
return fits.open(self.filename)
@property
@deprecated("2.0", warning_type=LightkurveDeprecationWarning)
def SAP_FLUX(self):
"""A copy of the light curve in which `lc.flux = lc.sap_flux`
and `lc.flux_err = lc.sap_flux_err`. It is provided for backwards-
compatibility with Lightkurve v1.x and will be removed soon."""
lc = self.copy()
lc["flux"] = lc["sap_flux"]
lc["flux_err"] = lc["sap_flux_err"]
return lc
@property
@deprecated("2.0", warning_type=LightkurveDeprecationWarning)
def PDCSAP_FLUX(self):
"""A copy of the light curve in which `lc.flux = lc.pdcsap_flux`
and `lc.flux_err = lc.pdcsap_flux_err`. It is provided for backwards-
compatibility with Lightkurve v1.x and will be removed soon."""
lc = self.copy()
lc["flux"] = lc["pdcsap_flux"]
lc["flux_err"] = lc["pdcsap_flux_err"]
return lc
def __add__(self, other):
newlc = self.copy()
if isinstance(other, LightCurve):
if len(self) != len(other):
raise ValueError(
"Cannot add LightCurve objects because "
"they do not have equal length ({} vs {})."
"".format(len(self), len(other))
)
if np.any(self.time != other.time):
warnings.warn(
"Two LightCurve objects with inconsistent time "
"values are being added.",
LightkurveWarning,
)
newlc.flux = self.flux + other.flux
newlc.flux_err = np.hypot(self.flux_err, other.flux_err)
else:
newlc.flux = self.flux + other
return newlc
def __radd__(self, other):
return self.__add__(other)
def __sub__(self, other):
return self.__add__(-1 * other)
def __rsub__(self, other):
return (-1 * self).__add__(other)
def __mul__(self, other):
newlc = self.copy()
if isinstance(other, LightCurve):
if len(self) != len(other):
raise ValueError(
"Cannot multiply LightCurve objects because "
"they do not have equal length ({} vs {})."
"".format(len(self), len(other))
)
if np.any(self.time != other.time):
warnings.warn(
"Two LightCurve objects with inconsistent time "
"values are being multiplied.",
LightkurveWarning,
)
newlc.flux = self.flux * other.flux
# Applying standard uncertainty propagation, cf.
# https://en.wikipedia.org/wiki/Propagation_of_uncertainty#Example_formulae
newlc.flux_err = abs(newlc.flux) * np.hypot(
self.flux_err / self.flux, other.flux_err / other.flux
)
elif isinstance(
other, (u.UnitBase, u.FunctionUnitBase)
): # cf. astropy/issues/6517
newlc.flux = other * self.flux
newlc.flux_err = other * self.flux_err
else:
newlc.flux = other * self.flux
newlc.flux_err = abs(other) * self.flux_err
return newlc
def __rmul__(self, other):
return self.__mul__(other)
def __truediv__(self, other):
return self.__mul__(1.0 / other)
def __rtruediv__(self, other):
newlc = self.copy()
if isinstance(other, LightCurve):
if len(self) != len(other):
raise ValueError(
"Cannot divide LightCurve objects because "
"they do not have equal length ({} vs {})."
"".format(len(self), len(other))
)
if np.any(self.time != other.time):
warnings.warn(
"Two LightCurve objects with inconsistent time "
"values are being divided.",
LightkurveWarning,
)
newlc.flux = other.flux / self.flux
newlc.flux_err = abs(newlc.flux) * np.hypot(
self.flux_err / self.flux, other.flux_err / other.flux
)
else:
newlc.flux = other / self.flux
newlc.flux_err = abs((other * self.flux_err) / (self.flux ** 2))
return newlc
def __div__(self, other):
return self.__truediv__(other)
def __rdiv__(self, other):
return self.__rtruediv__(other)
def show_properties(self):
"""Prints a description of all non-callable attributes.
Prints in order of type (ints, strings, lists, arrays, others).
"""
attrs = {}
deprecated_properties = list(self._deprecated_keywords)
deprecated_properties += [
"flux_quantity",
"SAP_FLUX",
"PDCSAP_FLUX",
"astropy_time",
"hdu",
]
for attr in dir(self):
if not attr.startswith("_") and attr not in deprecated_properties:
try:
res = getattr(self, attr)
except Exception:
continue
if callable(res):
continue
attrs[attr] = {"res": res}
if isinstance(res, int):
attrs[attr]["print"] = "{}".format(res)
attrs[attr]["type"] = "int"
elif isinstance(res, np.ndarray):
attrs[attr]["print"] = "array {}".format(res.shape)
attrs[attr]["type"] = "array"
elif isinstance(res, list):
attrs[attr]["print"] = "list length {}".format(len(res))
attrs[attr]["type"] = "list"
elif isinstance(res, str):
if res == "":
attrs[attr]["print"] = "{}".format("None")
else:
attrs[attr]["print"] = "{}".format(res)
attrs[attr]["type"] = "str"
elif attr == "wcs":
attrs[attr]["print"] = "astropy.wcs.wcs.WCS"
attrs[attr]["type"] = "other"
else:
attrs[attr]["print"] = "{}".format(type(res))
attrs[attr]["type"] = "other"
output = Table(names=["Attribute", "Description"], dtype=[object, object])
idx = 0
types = ["int", "str", "list", "array", "other"]
for typ in types:
for attr, dic in attrs.items():
if dic["type"] == typ:
output.add_row([attr, dic["print"]])
idx += 1
output.pprint(max_lines=-1, max_width=-1)
def append(self, others, inplace=False):
"""Append one or more other `LightCurve` object(s) to this one.
Parameters
----------
others : `LightCurve`, or list of `LightCurve`
Light curve(s) to be appended to the current one.
inplace : bool
If True, change the current `LightCurve` instance in place instead
of creating and returning a new one. Defaults to False.
Returns
-------
new_lc : `LightCurve`
Light curve which has the other light curves appened to it.
"""
if inplace:
raise ValueError(
"the `inplace` parameter is no longer supported "
"as of Lightkurve v2.0"
)
if not hasattr(others, "__iter__"):
others = (others,)
# Re-use LightCurveCollection.stitch() to avoid code duplication
from .collections import LightCurveCollection # avoid circular import
return LightCurveCollection((self, *others)).stitch(corrector_func=None)
def flatten(
self,
window_length=101,
polyorder=2,
return_trend=False,
break_tolerance=5,
niters=3,
sigma=3,
mask=None,
**kwargs,
):
"""Removes the low frequency trend using scipy's Savitzky-Golay filter.
This method wraps `scipy.signal.savgol_filter`.
Parameters
----------
window_length : int
The length of the filter window (i.e. the number of coefficients).
``window_length`` must be a positive odd integer.
polyorder : int
The order of the polynomial used to fit the samples. ``polyorder``
must be less than window_length.
return_trend : bool
If `True`, the method will return a tuple of two elements
(flattened_lc, trend_lc) where trend_lc is the removed trend.
break_tolerance : int
If there are large gaps in time, flatten will split the flux into
several sub-lightcurves and apply `savgol_filter` to each
individually. A gap is defined as a period in time larger than
`break_tolerance` times the median gap. To disable this feature,
set `break_tolerance` to None.
niters : int
Number of iterations to iteratively sigma clip and flatten. If more than one, will
perform the flatten several times, removing outliers each time.
sigma : int
Number of sigma above which to remove outliers from the flatten
mask : boolean array with length of self.time
Boolean array to mask data with before flattening. Flux values where
mask is True will not be used to flatten the data. An interpolated
result will be provided for these points. Use this mask to remove
data you want to preserve, e.g. transits.
**kwargs : dict
Dictionary of arguments to be passed to `scipy.signal.savgol_filter`.
Returns
-------
flatten_lc : `LightCurve`
New light curve object with long-term trends removed.
If ``return_trend`` is set to ``True``, this method will also return:
trend_lc : `LightCurve`
New light curve object containing the trend that was removed.
"""
if mask is None:
mask = np.ones(len(self.time), dtype=bool)
else:
# Deep copy ensures we don't change the original.
mask = deepcopy(~mask)
# Add NaNs & outliers to the mask
extra_mask = np.isfinite(self.flux)
extra_mask &= np.nan_to_num(np.abs(self.flux - np.nanmedian(self.flux))) <= (
np.nanstd(self.flux) * sigma
)
# In astropy>=5.0, extra_mask is a masked array
if hasattr(extra_mask, 'mask'):
mask &= extra_mask.filled(False)
else: # support astropy<5.0
mask &= extra_mask
for iter in np.arange(0, niters):
if break_tolerance is None:
break_tolerance = np.nan
if polyorder >= window_length:
polyorder = window_length - 1
log.warning(
"polyorder must be smaller than window_length, "
"using polyorder={}.".format(polyorder)
)
# Split the lightcurve into segments by finding large gaps in time
dt = self.time.value[mask][1:] - self.time.value[mask][0:-1]
with warnings.catch_warnings(): # Ignore warnings due to NaNs
warnings.simplefilter("ignore", RuntimeWarning)
cut = np.where(dt > break_tolerance * np.nanmedian(dt))[0] + 1
low = np.append([0], cut)
high = np.append(cut, len(self.time[mask]))
# Then, apply the savgol_filter to each segment separately
trend_signal = Quantity(np.zeros(len(self.time[mask])), unit=self.flux.unit)
for l, h in zip(low, high):
# Reduce `window_length` and `polyorder` for short segments;
# this prevents `savgol_filter` from raising an exception
# If the segment is too short, just take the median
if np.any([window_length > (h - l), (h - l) < break_tolerance]):
trend_signal[l:h] = np.nanmedian(self.flux[mask][l:h])
else:
# Scipy outputs a warning here that is not useful, will be fixed in version 1.2
with warnings.catch_warnings():
warnings.simplefilter("ignore", FutureWarning)
trsig = savgol_filter(
x=self.flux.value[mask][l:h],
window_length=window_length,
polyorder=polyorder,
**kwargs,
)
trend_signal[l:h] = Quantity(trsig, trend_signal.unit)
# Ignore outliers; note we add `1e-14` below to avoid detecting
# outliers which are merely caused by numerical noise.
mask1 = np.nan_to_num(np.abs(self.flux[mask] - trend_signal)) < (
np.nanstd(self.flux[mask] - trend_signal) * sigma
+ Quantity(1e-14, self.flux.unit)
)
f = interp1d(
self.time.value[mask][mask1],
trend_signal[mask1],
fill_value="extrapolate",
)
trend_signal = Quantity(f(self.time.value), self.flux.unit)
# In astropy>=5.0, mask1 is a masked array
if hasattr(mask1, 'mask'):
mask[mask] &= mask1.filled(False)
else: # support astropy<5.0
mask[mask] &= mask1
flatten_lc = self.copy()
with warnings.catch_warnings():
# ignore invalid division warnings
warnings.simplefilter("ignore", RuntimeWarning)
flatten_lc.flux = flatten_lc.flux / trend_signal.value
flatten_lc.flux_err = flatten_lc.flux_err / trend_signal.value
if return_trend:
trend_lc = self.copy()
trend_lc.flux = trend_signal
return flatten_lc, trend_lc
return flatten_lc
@deprecated_renamed_argument(
"transit_midpoint",
"epoch_time",
"2.0",
warning_type=LightkurveDeprecationWarning,
)
@deprecated_renamed_argument(
"t0", "epoch_time", "2.0", warning_type=LightkurveDeprecationWarning
)
def fold(
self,
period=None,
epoch_time=None,
epoch_phase=0,
wrap_phase=None,
normalize_phase=False,
):
"""Returns a `FoldedLightCurve` object folded on a period and epoch.
This method is identical to AstroPy's `~astropy.timeseries.TimeSeries.fold()`
method, except it returns a `FoldedLightCurve` object which offers
convenient plotting methods.
Parameters
----------
period : float `~astropy.units.Quantity`
The period to use for folding. If a ``float`` is passed we'll
assume it is in units of days.
epoch_time : `~astropy.time.Time`
The time to use as the reference epoch, at which the relative time
offset / phase will be ``epoch_phase``. Defaults to the first time
in the time series.
epoch_phase : float or `~astropy.units.Quantity`
Phase of ``epoch_time``. If ``normalize_phase`` is `True`, this
should be a dimensionless value, while if ``normalize_phase`` is
``False``, this should be a `~astropy.units.Quantity` with time
units. Defaults to 0.
wrap_phase : float or `~astropy.units.Quantity`
The value of the phase above which values are wrapped back by one
period. If ``normalize_phase`` is `True`, this should be a
dimensionless value, while if ``normalize_phase`` is ``False``,
this should be a `~astropy.units.Quantity` with time units.
Defaults to half the period, so that the resulting time series goes
from ``-period / 2`` to ``period / 2`` (if ``normalize_phase`` is
`False`) or -0.5 to 0.5 (if ``normalize_phase`` is `True`).
normalize_phase : bool
If `False` phase is returned as `~astropy.time.TimeDelta`,
otherwise as a dimensionless `~astropy.units.Quantity`.
Returns
-------
folded_lightcurve : `FoldedLightCurve`
The folded light curve object in which the ``time`` column
holds the phase values.
"""
# Lightkurve v1.x assumed that `period` was given in days if no unit
# was specified. We maintain this behavior for backwards-compatibility.
if period is not None and not isinstance(period, Quantity):
period *= u.day
if epoch_time is not None and not isinstance(epoch_time, Time):
epoch_time = Time(
epoch_time, format=self.time.format, scale=self.time.scale
)
if (
epoch_phase is not None
and not isinstance(epoch_phase, Quantity)
and not normalize_phase
):
epoch_phase *= u.day
if wrap_phase is not None and not isinstance(wrap_phase, Quantity):
wrap_phase *= u.day
# Warn if `epoch_time` appears to use the wrong format
if epoch_time is not None and epoch_time.value > 2450000:
if self.time.format == "bkjd":
warnings.warn(
"`epoch_time` appears to be given in JD, "
"however the light curve time uses BKJD "
"(i.e. JD - 2454833).",
LightkurveWarning,
)
elif self.time.format == "btjd":
warnings.warn(
"`epoch_time` appears to be given in JD, "
"however the light curve time uses BTJD "
"(i.e. JD - 2457000).",
LightkurveWarning,
)
ts = super().fold(
period=period,
epoch_time=epoch_time,
epoch_phase=epoch_phase,
wrap_phase=wrap_phase,
normalize_phase=normalize_phase,
)
# The folded time would pass the `TimeSeries` validation check if
# `normalize_phase=True`, so creating a `FoldedLightCurve` object
# requires the following three-step workaround:
# 1. Give the folded light curve a valid time column again
with ts._delay_required_column_checks():
folded_time = ts.time.copy()
ts.remove_column("time")
ts.add_column(self.time, name="time", index=0)
# 2. Create the folded object
lc = FoldedLightCurve(data=ts)
# 3. Restore the folded time
with lc._delay_required_column_checks():
lc.remove_column("time")
lc.add_column(folded_time, name="time", index=0)
# Add extra column and meta data specific to FoldedLightCurve
lc.add_column(
self.time.copy(), name="time_original", index=len(self._required_columns)
)
lc.meta["PERIOD"] = period
lc.meta["EPOCH_TIME"] = epoch_time
lc.meta["EPOCH_PHASE"] = epoch_phase
lc.meta["WRAP_PHASE"] = wrap_phase
lc.meta["NORMALIZE_PHASE"] = normalize_phase
lc.sort("time")
return lc
def normalize(self, unit="unscaled"):
"""Returns a normalized version of the light curve.
The normalized light curve is obtained by dividing the ``flux`` and
``flux_err`` object attributes by the median flux.
Optionally, the result will be multiplied by 1e2 (if `unit='percent'`),
1e3 (`unit='ppt'`), or 1e6 (`unit='ppm'`).
Parameters
----------
unit : 'unscaled', 'percent', 'ppt', 'ppm'
The desired relative units of the normalized light curve;
'ppt' means 'parts per thousand', 'ppm' means 'parts per million'.
Examples
--------
>>> import lightkurve as lk
>>> lc = lk.LightCurve(time=[1, 2, 3], flux=[25945.7, 25901.5, 25931.2], flux_err=[6.8, 4.6, 6.2])
>>> normalized_lc = lc.normalize()
>>> normalized_lc.flux
<Quantity [1.00055917, 0.99885466, 1. ]>
>>> normalized_lc.flux_err
<Quantity [0.00026223, 0.00017739, 0.00023909]>
Returns
-------
normalized_lightcurve : `LightCurve`
A new light curve object in which ``flux`` and ``flux_err`` have
been divided by the median flux.
Warns
-----
LightkurveWarning
If the median flux is negative or within half a standard deviation
from zero.
"""
validate_method(unit, ["unscaled", "percent", "ppt", "ppm"])
median_flux = np.nanmedian(self.flux)
std_flux = np.nanstd(self.flux)
# If the median flux is within half a standard deviation from zero, the
# light curve is likely zero-centered and normalization makes no sense.
if (median_flux == 0) or (
np.isfinite(std_flux) and (np.abs(median_flux) < 0.5 * std_flux)
):
warnings.warn(
"The light curve appears to be zero-centered "
"(median={:.2e} +/- {:.2e}); `normalize()` will divide "
"the light curve by a value close to zero, which is "
"probably not what you want."
"".format(median_flux, std_flux),
LightkurveWarning,
)
# If the median flux is negative, normalization will invert the light
# curve and makes no sense.
if median_flux < 0:
warnings.warn(
"The light curve has a negative median flux ({:.2e});"
" `normalize()` will therefore divide by a negative "
"number and invert the light curve, which is probably"
"not what you want".format(median_flux),
LightkurveWarning,
)
# Create a new light curve instance and normalize its values
lc = self.copy()
lc.flux = lc.flux / median_flux
lc.flux_err = lc.flux_err / median_flux
if not lc.flux.unit:
lc.flux *= u.dimensionless_unscaled
if not lc.flux_err.unit:
lc.flux_err *= u.dimensionless_unscaled
# Set the desired relative (dimensionless) units
if unit == "percent":
lc.flux = lc.flux.to(u.percent)
lc.flux_err = lc.flux_err.to(u.percent)
elif unit in ("ppt", "ppm"):
lc.flux = lc.flux.to(unit)
lc.flux_err = lc.flux_err.to(unit)
lc.meta["NORMALIZED"] = True
return lc
def remove_nans(self, column: str = "flux"):
"""Removes cadences where ``column`` is a NaN.
Parameters
----------
column : str
Column to check for NaNs. Defaults to ``'flux'``.
Returns
-------
clean_lightcurve : `LightCurve`
A new light curve object from which NaNs fluxes have been removed.
Examples
--------
>>> import lightkurve as lk
>>> import numpy as np
>>> lc = lk.LightCurve({'time': [1, 2, 3], 'flux': [1., np.nan, 1.]})
>>> lc.remove_nans()
<LightCurve length=2>
time flux flux_err
<BLANKLINE>
Time float64 float64
---- ------- --------
1.0 1.0 nan
3.0 1.0 nan
"""
return self[~np.isnan(self[column])] # This will return a sliced copy
def fill_gaps(self, method: str = "gaussian_noise"):
"""Fill in gaps in time.
By default, the gaps will be filled with random white Gaussian noise
distributed according to
:math:`\mathcal{N} (\mu=\overline{\mathrm{flux}}, \sigma=\mathrm{CDPP})`.
No other methods are supported at this time.
Parameters
----------
method : string {'gaussian_noise'}
Method to use for gap filling. Fills with Gaussian noise by default.
Returns
-------
filled_lightcurve : `LightCurve`
A new light curve object in which all NaN values and gaps in time
have been filled.
"""
lc = self.copy().remove_nans()
# nlc = lc.copy()
newdata = {}
# Find missing time points
# Most precise method, taking into account time variation due to orbit
if hasattr(lc, "cadenceno"):
dt = lc.time.value - np.median(np.diff(lc.time.value)) * lc.cadenceno.value
ncad = np.arange(lc.cadenceno.value[0], lc.cadenceno.value[-1] + 1, 1)
in_original = np.in1d(ncad, lc.cadenceno.value)
ncad = ncad[~in_original]
ndt = np.interp(ncad, lc.cadenceno.value, dt)
ncad = np.append(ncad, lc.cadenceno.value)
ndt = np.append(ndt, dt)
ncad, ndt = ncad[np.argsort(ncad)], ndt[np.argsort(ncad)]
ntime = ndt + np.median(np.diff(lc.time.value)) * ncad
newdata["cadenceno"] = ncad
else:
# Less precise method
dt = np.nanmedian(lc.time.value[1::] - lc.time.value[:-1:])
ntime = [lc.time.value[0]]
for t in lc.time.value[1::]:
prevtime = ntime[-1]
while (t - prevtime) > 1.2 * dt:
ntime.append(prevtime + dt)
prevtime = ntime[-1]
ntime.append(t)
ntime = np.asarray(ntime, float)
in_original = np.in1d(ntime, lc.time.value)
# Fill in time points
newdata["time"] = Time(ntime, format=lc.time.format, scale=lc.time.scale)
f = np.zeros(len(ntime))
f[in_original] = np.copy(lc.flux)
fe = np.zeros(len(ntime))
fe[in_original] = np.copy(lc.flux_err)
# Temporary workaround for issue #1172. TODO: remove the `if`` statement
# below once we adopt AstroPy >=5.0.3 as a minimum dependency.
if hasattr(lc.flux_err, 'mask'):
fe[~in_original] = np.interp(ntime[~in_original], lc.time.value, lc.flux_err.unmasked)
else:
fe[~in_original] = np.interp(ntime[~in_original], lc.time.value, lc.flux_err)
if method == "gaussian_noise":
try:
std = lc.estimate_cdpp().to(lc.flux.unit).value
except:
std = np.nanstd(lc.flux.value)
f[~in_original] = np.random.normal(
np.nanmean(lc.flux.value), std, (~in_original).sum()
)
else:
raise NotImplementedError("No such method as {}".format(method))
newdata["flux"] = Quantity(f, lc.flux.unit)
newdata["flux_err"] = Quantity(fe, lc.flux_err.unit)
if hasattr(lc, "quality"):
quality = np.zeros(len(ntime), dtype=lc.quality.dtype)
quality[in_original] = np.copy(lc.quality)
quality[~in_original] += 65536
newdata["quality"] = quality
"""
# TODO: add support for other columns
for column in lc.columns:
if column in ("time", "flux", "flux_err", "quality"):
continue
old_values = lc[column]
new_values = np.empty(len(ntime), dtype=old_values.dtype)
new_values[~in_original] = np.nan
new_values[in_original] = np.copy(old_values)
newdata[column] = new_values
"""
return LightCurve(data=newdata, meta=self.meta)
def remove_outliers(
self, sigma=5.0, sigma_lower=None, sigma_upper=None, return_mask=False, **kwargs
):
"""Removes outlier data points using sigma-clipping.
This method returns a new `LightCurve` object from which data points
are removed if their flux values are greater or smaller than the median
flux by at least ``sigma`` times the standard deviation.
Sigma-clipping works by iterating over data points, each time rejecting
values that are discrepant by more than a specified number of standard
deviations from a center value. If the data contains invalid values
(NaNs or infs), they are automatically masked before performing the
sigma clipping.
.. note::
This function is a convenience wrapper around
`astropy.stats.sigma_clip()` and provides the same functionality.
Any extra arguments passed to this method will be passed on to
``sigma_clip``.
Parameters
----------
sigma : float
The number of standard deviations to use for both the lower and
upper clipping limit. These limits are overridden by
``sigma_lower`` and ``sigma_upper``, if input. Defaults to 5.
sigma_lower : float or None
The number of standard deviations to use as the lower bound for
the clipping limit. Can be set to float('inf') in order to avoid
clipping outliers below the median at all. If `None` then the
value of ``sigma`` is used. Defaults to `None`.
sigma_upper : float or None
The number of standard deviations to use as the upper bound for
the clipping limit. Can be set to float('inf') in order to avoid
clipping outliers above the median at all. If `None` then the
value of ``sigma`` is used. Defaults to `None`.
return_mask : bool
Whether or not to return a mask (i.e. a boolean array) indicating
which data points were removed. Entries marked as `True` in the
mask are considered outliers. This mask is not returned by default.
**kwargs : dict
Dictionary of arguments to be passed to `astropy.stats.sigma_clip`.
Returns
-------
clean_lc : `LightCurve`
A new light curve object from which outlier data points have been
removed.
outlier_mask : NumPy array, optional
Boolean array flagging which cadences were removed.
Only returned if `return_mask=True`.
Examples
--------
This example generates a new light curve in which all points
that are more than 1 standard deviation from the median are removed::
>>> lc = LightCurve(time=[1, 2, 3, 4, 5], flux=[1, 1000, 1, -1000, 1])
>>> lc_clean = lc.remove_outliers(sigma=1)
>>> lc_clean.time
<Time object: scale='tdb' format='jd' value=[1. 3. 5.]>
>>> lc_clean.flux
<Quantity [1., 1., 1.]>
Instead of specifying `sigma`, you may specify separate `sigma_lower`
and `sigma_upper` parameters to remove only outliers above or below
the median. For example::
>>> lc = LightCurve(time=[1, 2, 3, 4, 5], flux=[1, 1000, 1, -1000, 1])
>>> lc_clean = lc.remove_outliers(sigma_lower=float('inf'), sigma_upper=1)
>>> lc_clean.time
<Time object: scale='tdb' format='jd' value=[1. 3. 4. 5.]>
>>> lc_clean.flux
<Quantity [ 1., 1., -1000., 1.]>
Optionally, you may use the `return_mask` parameter to return a boolean
array which flags the outliers identified by the method. For example::
>>> lc_clean, mask = lc.remove_outliers(sigma=1, return_mask=True)
>>> mask
array([False, True, False, True, False])
"""
# The import time for `sigma_clip` is somehow very slow, so we use
# a local import here.
from astropy.stats.sigma_clipping import sigma_clip
# First, we create the outlier mask using AstroPy's sigma_clip function
with warnings.catch_warnings(): # Ignore warnings due to NaNs or Infs
warnings.simplefilter("ignore")
outlier_mask = sigma_clip(
data=self.flux,
sigma=sigma,
sigma_lower=sigma_lower,
sigma_upper=sigma_upper,
**kwargs,
).mask
# Second, we return the masked light curve and optionally the mask itself
if return_mask:
return self.copy()[~outlier_mask], outlier_mask
return self.copy()[~outlier_mask]
@deprecated_renamed_argument(
"binsize",
new_name=None,
since="2.0",
warning_type=LightkurveDeprecationWarning,
alternative="time_bin_size",
)
def bin(
self,
time_bin_size=None,
time_bin_start=None,
time_bin_end=None,
n_bins=None,
aggregate_func=None,
bins=None,
binsize=None,
):
"""Bins a lightcurve in equally-spaced bins in time.
If the original light curve contains flux uncertainties (``flux_err``),
the binned lightcurve will report the root-mean-square error.
If no uncertainties are included, the binned curve will return the
standard deviation of the data.
Parameters
----------
time_bin_size : `~astropy.units.Quantity` or `~astropy.time.TimeDelta`, optional
The time interval for the binned time series - this is either a scalar
value (in which case all time bins will be assumed to have the same
duration) or as an array of values (in which case each time bin can
have a different duration). If this argument is provided,
``time_bin_end`` should not be provided.
(Default: 0.5 days; default unit: days.)
time_bin_start : `~astropy.time.Time` or iterable, optional
The start time for the binned time series - this can be either given
directly as a `~astropy.time.Time` array or as any iterable that
initializes the `~astropy.time.Time` class. This can also be a scalar
value if ``time_bin_size`` is provided. Defaults to the first
time in the sampled time series.
time_bin_end : `~astropy.time.Time` or iterable, optional
The times of the end of each bin - this can be either given directly as
a `~astropy.time.Time` array or as any iterable that initializes the
`~astropy.time.Time` class. This can only be given if ``time_bin_start``
is an array of values. If ``time_bin_end`` is a scalar, time bins are
assumed to be contiguous, such that the end of each bin is the start
of the next one, and ``time_bin_end`` gives the end time for the last
bin. If ``time_bin_end`` is an array, the time bins do not need to be
contiguous. If this argument is provided, ``time_bin_size`` should not
be provided. This option, like the iterable form of ``time_bin_start``,
requires Astropy 5.0.
n_bins : int, optional
The number of bins to use. Defaults to the number needed to fit all
the original points. Note that this will create this number of bins
of length ``time_bin_size`` independent of the lightkurve length.
aggregate_func : callable, optional
The function to use for combining points in the same bin. Defaults
to np.nanmean.
bins : int, iterable or str, optional
If an int, this gives the number of bins to divide the lightkurve into.
In contrast to ``n_bins`` this adjusts the length of ``time_bin_size``
to accommodate the input time series length.
If it is an iterable of ints, it specifies the indices of the bin edges.
If a string, it must be one of 'blocks', 'knuth', 'scott' or 'freedman'
defining a method of automatically determining an optimal bin size.
See `~astropy.stats.histogram` for a description of each method.
Note that 'blocks' is not a useful method for regularly sampled data.
binsize : int
In Lightkurve v1.x, the default behavior of `bin()` was to create
bins which contained an equal number data points in each bin.
This type of binning is discouraged because it usually makes more sense to
create equally-sized bins in time duration, which is the new default
behavior in Lightkurve v2.x. Nevertheless, this `binsize` parameter
allows users to simulate the old behavior of Lightkurve v1.x.
For ease of implementation, setting this parameter is identical to passing
``time_bin_size = lc.time[binsize] - time[0]``, which means that
the bins are not guaranteed to contain an identical number of
data points.
Returns
-------
binned_lc : `LightCurve`
A new light curve which has been binned.
"""
kwargs = dict()
if binsize is not None and bins is not None:
raise ValueError("Only one of ``bins`` and ``binsize`` can be specified.")
elif (binsize is not None or bins is not None) and (
time_bin_size is not None or n_bins is not None
):
raise ValueError(
"``bins`` or ``binsize`` conflicts with "
"``n_bins`` or ``time_bin_size``."
)
elif bins is not None:
if (bins not in ('blocks', 'knuth', 'scott', 'freedman') and
np.array(bins).dtype != np.int):
raise TypeError("``bins`` must have integer type.")
elif (isinstance(bins, str) or np.size(bins) != 1) and not _HAS_VAR_BINS:
raise ValueError("Sequence or method for ``bins`` requires Astropy 5.0.")
if time_bin_start is None:
time_bin_start = self.time[0]
if not isinstance(time_bin_start, (Time, TimeDelta)):
if isinstance(self.time, TimeDelta):
time_bin_start = TimeDelta(
time_bin_start, format=self.time.format, scale=self.time.scale
)
else:
time_bin_start = Time(
time_bin_start, format=self.time.format, scale=self.time.scale
)
# Backwards compatibility with Lightkurve v1.x
if time_bin_size is None:
if bins is not None:
if np.size(bins) == 1 and _HAS_VAR_BINS:
# This actually calculates equal-length bins just as the method below;
# should it instead set equal-number bins with binsize=int(len(self) / bins)?
# Get start times in mjd and convert back to original format
bin_starts = calculate_bin_edges(self.time.mjd, bins=bins)[:-1]
time_bin_start = Time(Time(bin_starts, format='mjd'), format=self.time.format)
elif np.size(bins) == 1:
warnings.warn(
'"classic" `bins` require Astropy 5.0; will use constant lengths in time.',
LightkurveWarning)
# Odd memory error in np.searchsorted with pytest-memtest?
if self.time[0] >= time_bin_start:
i = len(self.time)
else:
i = len(self.time) - np.searchsorted(self.time, time_bin_start)
time_bin_size = ((self.time[-1] - time_bin_start) * i /
((i - 1) * bins)).to(u.day)
else:
time_bin_start = self.time[bins[:-1]]
kwargs['time_bin_end'] = self.time[bins[1:]]
elif binsize is not None:
if _HAS_VAR_BINS:
time_bin_start = self.time[::binsize]
else:
warnings.warn(
'`binsize` requires Astropy 5.0 to guarantee equal number of points; '
'will use estimated time lengths for bins.', LightkurveWarning)
if self.time[0] >= time_bin_start:
i = 0
else:
i = np.searchsorted(self.time, time_bin_start)
time_bin_size = (self.time[i + binsize] - self.time[i]).to(u.day)
else:
time_bin_size = 0.5 * u.day
elif not isinstance(time_bin_size, Quantity):
time_bin_size *= u.day
# Call AstroPy's aggregate_downsample
with warnings.catch_warnings():
# ignore uninteresting empty slice warnings
warnings.simplefilter("ignore", (RuntimeWarning, AstropyUserWarning))
ts = aggregate_downsample(
self,
time_bin_size=time_bin_size,
n_bins=n_bins,
time_bin_start=time_bin_start,
aggregate_func=aggregate_func,
**kwargs
)
# If `flux_err` is populated, assume the errors combine as the root-mean-square
if np.any(np.isfinite(self.flux_err)):
rmse_func = (
lambda x: np.sqrt(np.nansum(x ** 2)) / len(np.atleast_1d(x))
if np.any(np.isfinite(x))
else np.nan
)
ts_err = aggregate_downsample(
self,
time_bin_size=time_bin_size,
n_bins=n_bins,
time_bin_start=time_bin_start,
aggregate_func=rmse_func,
)
ts["flux_err"] = ts_err["flux_err"]
# If `flux_err` is unavailable, populate `flux_err` as nanstd(flux)
else:
ts_err = aggregate_downsample(
self,
time_bin_size=time_bin_size,
n_bins=n_bins,
time_bin_start=time_bin_start,
aggregate_func=np.nanstd,
)
ts["flux_err"] = ts_err["flux"]
# Prepare a LightCurve object by ensuring there is a time column
ts._required_columns = []
ts.add_column(ts.time_bin_start + ts.time_bin_size / 2.0, name="time")
# Ensure the required columns appear in the correct order
for idx, colname in enumerate(self.__class__._required_columns):
tmpcol = ts[colname]
ts.remove_column(colname)
ts.add_column(tmpcol, name=colname, index=idx)
return self.__class__(ts, meta=self.meta)
def estimate_cdpp(
self, transit_duration=13, savgol_window=101, savgol_polyorder=2, sigma=5.0
) -> float:
"""Estimate the CDPP noise metric using the Savitzky-Golay (SG) method.
A common estimate of the noise in a lightcurve is the scatter that
remains after all long term trends have been removed. This is the idea
behind the Combined Differential Photometric Precision (CDPP) metric.
The official Kepler Pipeline computes this metric using a wavelet-based
algorithm to calculate the signal-to-noise of the specific waveform of
transits of various durations. In this implementation, we use the
simpler "sgCDPP proxy algorithm" discussed by Gilliland et al
(2011ApJS..197....6G) and Van Cleve et al (2016PASP..128g5002V).
The steps of this algorithm are:
1. Remove low frequency signals using a Savitzky-Golay filter with
window length `savgol_window` and polynomial order `savgol_polyorder`.
2. Remove outliers by rejecting data points which are separated from
the mean by `sigma` times the standard deviation.
3. Compute the standard deviation of a running mean with
a configurable window length equal to `transit_duration`.
We use a running mean (as opposed to block averaging) to strongly
attenuate the signal above 1/transit_duration whilst retaining
the original frequency sampling. Block averaging would set the Nyquist
limit to 1/transit_duration.
Parameters
----------
transit_duration : int, optional
The transit duration in units of number of cadences. This is the
length of the window used to compute the running mean. The default
is 13, which corresponds to a 6.5 hour transit in data sampled at
30-min cadence.
savgol_window : int, optional
Width of Savitsky-Golay filter in cadences (odd number).
Default value 101 (2.0 days in Kepler Long Cadence mode).
savgol_polyorder : int, optional
Polynomial order of the Savitsky-Golay filter.
The recommended value is 2.
sigma : float, optional
The number of standard deviations to use for clipping outliers.
The default is 5.
Returns
-------
cdpp : float
Savitzky-Golay CDPP noise metric in units parts-per-million (ppm).
Notes
-----
This implementation is adapted from the Matlab version used by
<NAME> but lacks the normalization factor used there:
svn+ssh://murzim/repo/so/trunk/Develop/jvc/common/compute_SG_noise.m
"""
if not isinstance(transit_duration, int):
raise ValueError(
"transit_duration must be an integer in units "
"number of cadences, got {}.".format(transit_duration)
)
detrended_lc = self.flatten(
window_length=savgol_window, polyorder=savgol_polyorder
)
cleaned_lc = detrended_lc.remove_outliers(sigma=sigma)
with warnings.catch_warnings(): # ignore "already normalized" message
warnings.filterwarnings("ignore", message=".*already.*")
normalized_lc = cleaned_lc.normalize("ppm")
mean = running_mean(data=normalized_lc.flux, window_size=transit_duration)
return np.std(mean)
def query_solar_system_objects(
self,
cadence_mask="outliers",
radius=None,
sigma=3,
location=None,
cache=True,
return_mask=False,
):
"""Returns a list of asteroids or comets which affected the light curve.
Light curves of stars or galaxies are frequently affected by solar
system bodies (e.g. asteroids, comets, planets). These objects can move
across a target's photometric aperture mask on time scales of hours to
days. When they pass through a mask, they tend to cause a brief spike
in the brightness of the target. They can also cause dips by moving
through a local background aperture mask (if any is used).
The artifical spikes and dips introduced by asteroids are frequently
confused with stellar flares, planet transits, etc. This method helps
to identify false signals injects by asteroids by providing a list of
the solar system objects (name, brightness, time) that passed in the
vicinity of the target during the span of the light curve.
This method queries the `SkyBot API <http://vo.imcce.fr/webservices/skybot/>`_,
which returns a list of asteroids/comets/planets given a location, time,
and search cone.
Notes
-----
* This method will use the `ra` and `dec` properties of the `LightCurve`
object to determine the position of the search cone.
* The size of the search cone is 15 spacecraft pixels by default. You
can change this by passing the `radius` parameter (unit: degrees).
* By default, this method will only search points in time during which the light
curve showed 3-sigma outliers in flux. You can override this behavior
and search for specific times by passing `cadence_mask`. See examples for details.
Parameters
----------
cadence_mask : str, or boolean array with length of self.time
mask in time to select which frames or points should be searched for SSOs.
Default "outliers" will search for SSOs at points that are `sigma` from the mean.
"all" will search all cadences. Alternatively, pass a boolean array with values of "True"
for times to search for SSOs.
radius : optional, float
Radius in degrees to search for bodies. If None, will search for
SSOs within 15 pixels.
sigma : optional, float
If `cadence_mask` is set to `"outlier"`, `sigma` will be used to identify
outliers.
location : optional, str
Spacecraft location. Options include `'kepler'` and `'tess'`. Default: `self.mission`
cache : optional, bool
If True will cache the search result in the astropy cache. Set to False
to request the search again.
return_mask: optional, bool
If True will return a boolean mask in time alongside the result
Returns
-------
result : `pandas.DataFrame`
DataFrame object which lists the Solar System objects in frames
that were identified to contain SSOs. Returns `None` if no objects
were found.
Examples
--------
Find if there are SSOs affecting the lightcurve for the given time frame:
>>> df_sso = lc.query_solar_system_objects(cadence_mask=(lc.time.value >= 2014.1) & (lc.time.value <= 2014.9)) # doctest: +SKIP
Find if there are SSOs affecting the lightcurve for all times, but it will be much slower:
>>> df_sso = lc.query_solar_system_objects(cadence_mask='all') # doctest: +SKIP
"""
for attr in ["ra", "dec"]:
if not hasattr(self, "{}".format(attr)):
raise ValueError("Input does not have a `{}` attribute.".format(attr))
# Validate `cadence_mask`
if isinstance(cadence_mask, str):
if cadence_mask == "outliers":
cadence_mask = self.remove_outliers(sigma=sigma, return_mask=True)[1]
elif cadence_mask == "all":
cadence_mask = np.ones(len(self.time)).astype(bool)
else:
raise ValueError("invalid `cadence_mask` string argument")
elif isinstance(cadence_mask, collections.abc.Sequence):
cadence_mask = np.array(cadence_mask)
elif isinstance(cadence_mask, (bool)):
# for boundary case of a single element tuple, e.g., (True)
cadence_mask = np.array([cadence_mask])
elif not isinstance(cadence_mask, np.ndarray):
raise ValueError("the `cadence_mask` argument is missing or invalid")
# Avoid searching times with NaN flux; this is necessary because e.g.
# `remove_outliers` includes NaNs in its mask.
if hasattr(self.flux, 'mask'):
# Temporary workaround for issue #1172. TODO: remove this `if`` statement
# once we adopt AstroPy >=5.0.3 as a minimum dependency
cadence_mask &= ~np.isnan(self.flux.unmasked)
else:
cadence_mask &= ~np.isnan(self.flux)
# Validate `location`
if location is None:
if hasattr(self, "mission") and self.mission:
location = self.mission.lower()
else:
raise ValueError("you must pass a value for `location`.")
# Validate `radius`
if radius is None:
# 15 pixels has been chosen as a reasonable default.
# Comets have long tails which have tripped up users.
if (location == "kepler") | (location == "k2"):
radius = (4 * 15) * u.arcsecond.to(u.deg)
elif location == "tess":
radius = (27 * 15) * u.arcsecond.to(u.deg)
else:
radius = 15 * u.arcsecond.to(u.deg)
res = _query_solar_system_objects(
ra=self.ra,
dec=self.dec,
times=self.time.jd[cadence_mask],
location=location,
radius=radius,
cache=cache,
)
if return_mask:
return res, np.in1d(self.time.jd, res.epoch)
return res
def _create_plot(
self,
method="plot",
column="flux",
ax=None,
normalize=False,
xlabel=None,
ylabel=None,
title="",
style="lightkurve",
show_colorbar=True,
colorbar_label="",
offset=None,
clip_outliers=False,
**kwargs,
) -> matplotlib.axes.Axes:
"""Implements `plot()`, `scatter()`, and `errorbar()` to avoid code duplication.
Parameters
----------
method : str
One of 'plot', 'scatter', or 'errorbar'.
column : str
Name of data column to plot. Default `flux`.
ax : `~matplotlib.axes.Axes`
A matplotlib axes object to plot into. If no axes is provided,
a new one will be generated.
normalize : bool
Normalize the lightcurve before plotting?
xlabel : str
X axis label.
ylabel : str
Y axis label.
title : str
Title shown at the top using matplotlib `set_title`.
style : str
Path or URL to a matplotlib style file, or name of one of
matplotlib's built-in stylesheets (e.g. 'ggplot').
Lightkurve's custom stylesheet is used by default.
show_colorbar : boolean
Show the colorbar if colors are given using the `c` argument?
colorbar_label : str
Label to show next to the colorbar (if `c` is given).
offset : float
Offset value to apply to the Y axis values before plotting. Use this
to avoid light curves from overlapping on the same plot. By default,
no offset is applied.
clip_outliers : bool
If ``True``, clip the y axis limit to the 95%-percentile range.
kwargs : dict
Dictionary of arguments to be passed to Matplotlib's `plot`,
`scatter`, or `errorbar` methods.
Returns
-------
ax : `~matplotlib.axes.Axes`
The matplotlib axes object.
"""
# Configure the default style
if style is None or style == "lightkurve":
style = MPLSTYLE
# Default xlabel
if xlabel is None:
if not hasattr(self.time, "format"):
xlabel = "Phase"
elif self.time.format == "bkjd":
xlabel = "Time - 2454833 [BKJD days]"
elif self.time.format == "btjd":
xlabel = "Time - 2457000 [BTJD days]"
elif self.time.format == "jd":
xlabel = "Time [JD]"
else:
xlabel = "Time"
# Default ylabel
if ylabel is None:
if "flux" == column:
ylabel = "Flux"
else:
ylabel = f"{column}"
if normalize or (column == "flux" and self.meta.get("NORMALIZED")):
ylabel = "Normalized " + ylabel
elif (self[column].unit) and (self[column].unit.to_string() != ""):
ylabel += f" [{self[column].unit.to_string('latex_inline')}]"
# Default legend label
if "label" not in kwargs:
kwargs["label"] = self.meta.get("LABEL")
# Workaround for AstroPy v5.0.0 issue #12481: the 'c' argument
# in matplotlib's scatter does not work with masked quantities.
if "c" in kwargs and hasattr(kwargs["c"], 'mask'):
kwargs["c"] = kwargs["c"].unmasked
flux = self[column]
try:
flux_err = self[f"{column}_err"]
except KeyError:
flux_err = np.full(len(flux), np.nan)
# Second workaround for AstroPy v5.0.0 issue #12481:
# matplotlib does not work well with `MaskedNDArray` arrays.
if hasattr(flux, 'mask'):
flux = flux.filled(np.nan)
if hasattr(flux_err, 'mask'):
flux_err = flux_err.filled(np.nan)
# Normalize the data if requested
if normalize:
# ignore "light curve is already normalized" message because
# the user explicitely asked for normalization here
with warnings.catch_warnings():
warnings.filterwarnings("ignore", message=".*already.*")
if column == "flux":
lc_normed = self.normalize()
else:
# Code below is a temporary hack because `normalize()`
# does not have a `column` argument yet
lc_tmp = self.copy()
lc_tmp["flux"] = flux
lc_tmp["flux_err"] = flux_err
lc_normed = lc_tmp.normalize()
flux, flux_err = lc_normed.flux, lc_normed.flux_err
# Apply offset if requested
if offset:
flux = flux.copy() + offset * flux.unit
# Make the plot
with plt.style.context(style):
if ax is None:
fig, ax = plt.subplots(1)
if method == "scatter":
sc = ax.scatter(self.time.value, flux, **kwargs)
# Colorbars should only be plotted if the user specifies, and there is
# a color specified that is not a string (e.g. 'C1') and is iterable.
if (
show_colorbar
and ("c" in kwargs)
and (not isinstance(kwargs["c"], str))
and hasattr(kwargs["c"], "__iter__")
):
cbar = plt.colorbar(sc, ax=ax)
cbar.set_label(colorbar_label)
cbar.ax.yaxis.set_tick_params(tick1On=False, tick2On=False)
cbar.ax.minorticks_off()
elif method == "errorbar":
if np.any(~np.isnan(flux_err)):
ax.errorbar(
x=self.time.value, y=flux.value, yerr=flux_err.value, **kwargs
)
else:
log.warning(f"Column `{column}` has no associated errors.")
else:
ax.plot(self.time.value, flux.value, **kwargs)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
# Show the legend if labels were set
legend_labels = ax.get_legend_handles_labels()
if np.sum([len(a) for a in legend_labels]) != 0:
ax.legend(loc="best")
if clip_outliers and len(flux) > 0:
ymin, ymax = np.percentile(flux.value, [2.5, 97.5])
margin = 0.05 * (ymax - ymin)
ax.set_ylim(ymin - margin, ymax + margin)
return ax
def plot(self, **kwargs) -> matplotlib.axes.Axes:
"""Plot the light curve using Matplotlib's `~matplotlib.pyplot.plot` method.
Parameters
----------
column : str
Name of data column to plot. Default `flux`.
ax : `~matplotlib.axes.Axes`
A matplotlib axes object to plot into. If no axes is provided,
a new one will be generated.
normalize : bool
Normalize the lightcurve before plotting?
xlabel : str
X axis label.
ylabel : str
Y axis label.
title : str
Title shown at the top using matplotlib `set_title`.
style : str
Path or URL to a matplotlib style file, or name of one of
matplotlib's built-in stylesheets (e.g. 'ggplot').
Lightkurve's custom stylesheet is used by default.
show_colorbar : boolean
Show the colorbar if colors are given using the `c` argument?
colorbar_label : str
Label to show next to the colorbar (if `c` is given).
offset : float
Offset value to apply to the Y axis values before plotting. Use this
to avoid light curves from overlapping on the same plot. By default,
no offset is applied.
kwargs : dict
Dictionary of arguments to be passed to `matplotlib.pyplot.plot`.
Returns
-------
ax : `~matplotlib.axes.Axes`
The matplotlib axes object.
"""
return self._create_plot(method="plot", **kwargs)
def scatter(
self, colorbar_label="", show_colorbar=True, **kwargs
) -> matplotlib.axes.Axes:
"""Plots the light curve using Matplotlib's `~matplotlib.pyplot.scatter` method.
Parameters
----------
column : str
Name of data column to plot. Default `flux`.
ax : `~matplotlib.axes.Axes`
A matplotlib axes object to plot into. If no axes is provided,
a new one will be generated.
normalize : bool
Normalize the lightcurve before plotting?
xlabel : str
X axis label.
ylabel : str
Y axis label.
title : str
Title shown at the top using matplotlib `set_title`.
style : str
Path or URL to a matplotlib style file, or name of one of
matplotlib's built-in stylesheets (e.g. 'ggplot').
Lightkurve's custom stylesheet is used by default.
show_colorbar : boolean
Show the colorbar if colors are given using the `c` argument?
colorbar_label : str
Label to show next to the colorbar (if `c` is given).
offset : float
Offset value to apply to the Y axis values before plotting. Use this
to avoid light curves from overlapping on the same plot. By default,
no offset is applied.
kwargs : dict
Dictionary of arguments to be passed to `matplotlib.pyplot.scatter`.
Returns
-------
ax : `~matplotlib.axes.Axes`
The matplotlib axes object.
"""
return self._create_plot(
method="scatter",
colorbar_label=colorbar_label,
show_colorbar=show_colorbar,
**kwargs,
)
def errorbar(self, linestyle="", **kwargs) -> matplotlib.axes.Axes:
"""Plots the light curve using Matplotlib's `~matplotlib.pyplot.errorbar` method.
Parameters
----------
linestyle : str
Connect the error bars using a line?
column : str
Name of data column to plot. Default `flux`.
ax : `~matplotlib.axes.Axes`
A matplotlib axes object to plot into. If no axes is provided,
a new one will be generated.
normalize : bool
Normalize the lightcurve before plotting?
xlabel : str
X axis label.
ylabel : str
Y axis label.
title : str
Title shown at the top using matplotlib `set_title`.
style : str
Path or URL to a matplotlib style file, or name of one of
matplotlib's built-in stylesheets (e.g. 'ggplot').
Lightkurve's custom stylesheet is used by default.
show_colorbar : boolean
Show the colorbar if colors are given using the `c` argument?
colorbar_label : str
Label to show next to the colorbar (if `c` is given).
offset : float
Offset value to apply to the Y axis values before plotting. Use this
to avoid light curves from overlapping on the same plot. By default,
no offset is applied.
kwargs : dict
Dictionary of arguments to be passed to `matplotlib.pyplot.errorbar`.
Returns
-------
ax : `~matplotlib.axes.Axes`
The matplotlib axes object.
"""
if "ls" not in kwargs:
kwargs["linestyle"] = linestyle
return self._create_plot(method="errorbar", **kwargs)
def interact_bls(
self,
notebook_url="localhost:8888",
minimum_period=None,
maximum_period=None,
resolution=2000,
):
"""Display an interactive Jupyter Notebook widget to find planets.
The Box Least Squares (BLS) periodogram is a statistical tool used
for detecting transiting exoplanets and eclipsing binaries in
light curves. This method will display a Jupyter Notebook Widget
which enables the BLS algorithm to be used interactively.
Behind the scenes, the widget uses the AstroPy implementation of BLS [1]_.
This feature only works inside an active Jupyter Notebook.
It requires Bokeh v1.0 (or later). An error message will be shown
if these dependencies are not available.
Parameters
----------
notebook_url: str
Location of the Jupyter notebook page (default: "localhost:8888")
When showing Bokeh applications, the Bokeh server must be
explicitly configured to allow connections originating from
different URLs. This parameter defaults to the standard notebook
host and port. If you are running on a different location, you
will need to supply this value for the application to display
properly. If no protocol is supplied in the URL, e.g. if it is
of the form "localhost:8888", then "http" will be used.
minimum_period : float or None
Minimum period to assess the BLS to. If None, default value of 0.3 days
will be used.
maximum_period : float or None
Maximum period to evaluate the BLS to. If None, the time coverage of the
lightcurve / 2 will be used.
resolution : int
Number of points to use in the BLS panel. Lower this value for faster
but less accurate performance. You can also vary this value using the
widget's Resolution Slider.
Examples
--------
Load the light curve for Kepler-10, remove long-term trends, and
display the BLS tool as follows:
>>> import lightkurve as lk
>>> lc = lk.search_lightcurve('kepler-10', quarter=3).download() # doctest: +SKIP
>>> lc = lc.normalize().flatten() # doctest: +SKIP
>>> lc.interact_bls() # doctest: +SKIP
References
----------
.. [1] https://docs.astropy.org/en/stable/timeseries/bls.html
"""
from .interact_bls import show_interact_widget
return show_interact_widget(
self,
notebook_url=notebook_url,
minimum_period=minimum_period,
maximum_period=maximum_period,
resolution=resolution,
)
def to_table(self) -> Table:
return Table(self)
@deprecated(
"2.0",
message="`to_timeseries()` has been deprecated. `LightCurve` is a "
"sub-class of Astropy TimeSeries as of Lightkurve v2.0 "
"and no longer needs to be converted.",
warning_type=LightkurveDeprecationWarning,
)
def to_timeseries(self):
return self
@staticmethod
def from_timeseries(ts):
"""Creates a new `LightCurve` from an AstroPy
`~astropy.timeseries.TimeSeries` object.
Parameters
----------
ts : `~astropy.timeseries.TimeSeries`
The AstroPy TimeSeries object. The object must contain columns
named 'time', 'flux', and 'flux_err'.
"""
return LightCurve(
time=ts["time"].value, flux=ts["flux"], flux_err=ts["flux_err"]
)
def to_stingray(self):
"""Returns a `stingray.Lightcurve` object.
This feature requires `Stingray <https://stingraysoftware.github.io/>`_
to be installed (e.g. ``pip install stingray``). An `ImportError` will
be raised if this package is not available.
Returns
-------
lightcurve : `stingray.Lightcurve`
An stingray Lightcurve object.
"""
try:
from stingray import Lightcurve as StingrayLightcurve
except ImportError:
raise ImportError(
"You need to install Stingray to use "
"the LightCurve.to_stringray() method."
)
return StingrayLightcurve(
time=self.time.value,
counts=self.flux,
err=self.flux_err,
input_counts=False,
)
@staticmethod
def from_stingray(lc):
"""Create a new `LightCurve` from a `stingray.Lightcurve`.
Parameters
----------
lc : `stingray.Lightcurve`
A stingray Lightcurve object.
"""
return LightCurve(time=lc.time, flux=lc.counts, flux_err=lc.counts_err)
def to_csv(self, path_or_buf=None, **kwargs):
"""Writes the light curve to a CSV file.
This method will convert the light curve into the Comma-Separated Values
(CSV) text format. By default this method will return the result as a
string, but you can also write the string directly to disk by providing
a file name or handle via the `path_or_buf` parameter.
Parameters
----------
path_or_buf : string or file handle
File path or object. By default, the result is returned as a string.
**kwargs : dict
Dictionary of arguments to be passed to `TimeSeries.write()`.
Returns
-------
csv : str or None
Returns a csv-formatted string if ``path_or_buf=None``.
Returns `None` otherwise.
"""
use_stringio = False
if path_or_buf is None:
use_stringio = True
from io import StringIO
path_or_buf = StringIO()
result = self.write(path_or_buf, format="ascii.csv", **kwargs)
if use_stringio:
return path_or_buf.getvalue()
return result
def to_pandas(self, **kwargs):
"""Converts the light curve to a Pandas `~pandas.DataFrame` object.
The data frame will be indexed by `time` using values corresponding
to the light curve's time format. This is different from the
default behavior of `Table.to_pandas()` in AstroPy, which converts
time values into ISO timestamps.
Returns
-------
dataframe : `pandas.DataFrame`
A data frame indexed by `time`.
"""
df = super().to_pandas(**kwargs)
# Default AstroPy behavior is to change the time column into ``np.datetime64``
# We override it here because it confuses Kepler/TESS users who are used
# to working in BTJD and BKJD rather than ISO timestamps.
df.index = self.time.value
df.index.name = "time"
return df
def to_excel(self, path_or_buf, **kwargs) -> None:
"""Shorthand for `to_pandas().to_excel()`.
Parameters
----------
path_or_buf : string or file handle
File path or object.
**kwargs : dict
Dictionary of arguments to be passed to `to_pandas().to_excel(**kwargs)`.
"""
try:
import openpyxl # optional dependency
except ModuleNotFoundError:
raise ModuleNotFoundError(
"You need to install `openpyxl` to use this feature, e.g. use `pip install openpyxl`."
)
self.to_pandas().to_excel(path_or_buf, **kwargs)
def to_periodogram(self, method="lombscargle", **kwargs):
"""Converts the light curve to a `~lightkurve.periodogram.Periodogram`
power spectrum object.
This method will call either
`LombScarglePeriodogram.from_lightcurve() <lightkurve.periodogram.LombScarglePeriodogram.from_lightcurve>` or
`BoxLeastSquaresPeriodogram.from_lightcurve() <lightkurve.periodogram.BoxLeastSquaresPeriodogram.from_lightcurve>`,
which in turn wrap `astropy`'s `~astropy.timeseries.LombScargle` and `~astropy.timeseries.BoxLeastSquares`.
Optional keywords accepted if ``method='lombscargle'`` are:
``minimum_frequency``, ``maximum_frequency``, ``mininum_period``,
``maximum_period``, ``frequency``, ``period``, ``nterms``,
``nyquist_factor``, ``oversample_factor``, ``freq_unit``,
``normalization``, ``ls_method``.
Optional keywords accepted if ``method='bls'`` are
``minimum_period``, ``maximum_period``, ``period``,
``frequency_factor``, ``duration``.
Parameters
----------
method : {'lombscargle', 'boxleastsquares', 'ls', 'bls'}
Use the Lomb Scargle or Box Least Squares (BLS) method to
extract the power spectrum. Defaults to ``'lombscargle'``.
``'ls'`` and ``'bls'`` are shorthands for ``'lombscargle'``
and ``'boxleastsquares'``.
kwargs : dict
Keyword arguments passed to either
`LombScarglePeriodogram <lightkurve.periodogram.LombScarglePeriodogram.from_lightcurve>` or
`BoxLeastSquaresPeriodogram <lightkurve.periodogram.BoxLeastSquaresPeriodogram.from_lightcurve>`.
Returns
-------
Periodogram : `~lightkurve.periodogram.Periodogram` object
The power spectrum object extracted from the light curve.
"""
supported_methods = ["ls", "bls", "lombscargle", "boxleastsquares"]
method = validate_method(method.replace(" ", ""), supported_methods)
if method in ["bls", "boxleastsquares"]:
from .periodogram import BoxLeastSquaresPeriodogram
return BoxLeastSquaresPeriodogram.from_lightcurve(lc=self, **kwargs)
else:
from .periodogram import LombScarglePeriodogram
return LombScarglePeriodogram.from_lightcurve(lc=self, **kwargs)
def to_seismology(self, **kwargs):
"""Returns a `~lightkurve.seismology.Seismology` object for estimating
quick-look asteroseismic quantities.
All `**kwargs` will be passed to the `to_periodogram()` method.
Returns
-------
seismology : `~lightkurve.seismology.Seismology` object
Object which can be used to estimate quick-look asteroseismic quantities.
"""
from .seismology import Seismology
return Seismology.from_lightcurve(self, **kwargs)
def to_fits(
self, path=None, overwrite=False, flux_column_name="FLUX", **extra_data
):
"""Converts the light curve to a FITS file in the Kepler/TESS file format.
The FITS file will be returned as a `~astropy.io.fits.HDUList` object.
If a `path` is specified then the file will also be written to disk.
Parameters
----------
path : str or None
Location where the FITS file will be written, which is optional.
overwrite : bool
Whether or not to overwrite the file, if `path` is set.
flux_column_name : str
The column name in the FITS file where the light curve flux data
should be stored. Typical values are `FLUX` or `SAP_FLUX`.
extra_data : dict
Extra keywords or columns to include in the FITS file.
Arguments of type str, int, float, or bool will be stored as
keywords in the primary header.
Arguments of type np.array or list will be stored as columns
in the first extension.
Returns
-------
hdu : `~astropy.io.fits.HDUList`
Returns an `~astropy.io.fits.HDUList` object.
"""
typedir = {
int: "J",
str: "A",
float: "D",
bool: "L",
np.int32: "J",
np.int32: "K",
np.float32: "E",
np.float64: "D",
}
def _header_template(extension):
"""Returns a template `fits.Header` object for a given extension."""
template_fn = os.path.join(
PACKAGEDIR, "data", "lc-ext{}-header.txt".format(extension)
)
return fits.Header.fromtextfile(template_fn)
def _make_primary_hdu(extra_data=None):
"""Returns the primary extension (#0)."""
if extra_data is None:
extra_data = {}
hdu = fits.PrimaryHDU()
# Copy the default keywords from a template file from the MAST archive
tmpl = _header_template(0)
for kw in tmpl:
hdu.header[kw] = (tmpl[kw], tmpl.comments[kw])
# Override the defaults where necessary
from . import __version__
default = {
"ORIGIN": "Unofficial data product",
"DATE": datetime.datetime.now().strftime("%Y-%m-%d"),
"CREATOR": "lightkurve.LightCurve.to_fits()",
"PROCVER": str(__version__),
}
for kw in default:
hdu.header["{}".format(kw).upper()] = default[kw]
if default[kw] is None:
log.warning("Value for {} is None.".format(kw))
for kw in extra_data:
if isinstance(extra_data[kw], (str, float, int, bool, type(None))):
hdu.header["{}".format(kw).upper()] = extra_data[kw]
if extra_data[kw] is None:
log.warning("Value for {} is None.".format(kw))
return hdu
def _make_lightcurve_extension(extra_data=None):
"""Create the 'LIGHTCURVE' extension (i.e. extension #1)."""
# Turn the data arrays into fits columns and initialize the HDU
if extra_data is None:
extra_data = {}
cols = []
if ~np.asarray(["TIME" in k.upper() for k in extra_data.keys()]).any():
cols.append(
fits.Column(
name="TIME",
format="D",
unit=self.time.format,
array=self.time.value,
)
)
if ~np.asarray(
[flux_column_name in k.upper() for k in extra_data.keys()]
).any():
cols.append(
fits.Column(
name=flux_column_name, format="E", unit="e-/s", array=self.flux
)
)
if hasattr(self,'flux_err'):
if ~(flux_column_name.upper() + "_ERR" in extra_data.keys()):
cols.append(
fits.Column(
name=flux_column_name.upper() + "_ERR",
format="E",
unit="e-/s",
array=self.flux_err,
)
)
if hasattr(self,'cadenceno'):
if ~np.asarray(
["CADENCENO" in k.upper() for k in extra_data.keys()]
).any():
cols.append(
fits.Column(name="CADENCENO", format="J", array=self.cadenceno)
)
for kw in extra_data:
if isinstance(extra_data[kw], (np.ndarray, list)):
cols.append(
fits.Column(
name="{}".format(kw).upper(),
format=typedir[extra_data[kw].dtype.type],
array=extra_data[kw],
)
)
if "SAP_QUALITY" not in extra_data:
cols.append(
fits.Column(
name="SAP_QUALITY", format="J", array=np.zeros(len(self.flux))
)
)
coldefs = fits.ColDefs(cols)
hdu = fits.BinTableHDU.from_columns(coldefs)
hdu.header["EXTNAME"] = "LIGHTCURVE"
return hdu
def _hdulist(**extra_data):
"""Returns an astropy.io.fits.HDUList object."""
list_out = fits.HDUList(
[
_make_primary_hdu(extra_data=extra_data),
_make_lightcurve_extension(extra_data=extra_data),
]
)
return list_out
hdu = _hdulist(**extra_data)
if path is not None:
hdu.writeto(path, overwrite=overwrite, checksum=True)
return hdu
def to_corrector(self, method="sff", **kwargs):
"""Returns a corrector object to remove instrument systematics.
Parameters
----------
methods : string
Currently, "sff" and "cbv" are supported. This will return a
`~correctors.SFFCorrector` and `~correctors.CBVCorrector`
class instance respectively.
**kwargs : dict
Extra keyword arguments to be passed to the corrector class.
Returns
-------
correcter : `~correctors.corrector.Corrector`
Instance of a Corrector class, which typically provides
`~correctors.corrector.Corrector.correct()`
and `~correctors.corrector.Corrector.diagnose()` methods.
"""
if method == "pld":
raise ValueError(
"The 'pld' method can only be used on "
"`TargetPixelFile` objects, not `LightCurve` objects."
)
method = validate_method(method, supported_methods=["sff", "cbv"])
if method == "sff":
from .correctors import SFFCorrector
return SFFCorrector(self, **kwargs)
elif method == "cbv":
from .correctors import CBVCorrector
return CBVCorrector(self, **kwargs)
@deprecated_renamed_argument(
"t0", "epoch_time", "2.0", warning_type=LightkurveDeprecationWarning
)
def plot_river(
self,
period,
epoch_time=None,
ax=None,
bin_points=1,
minimum_phase=-0.5,
maximum_phase=0.5,
method="mean",
**kwargs,
) -> matplotlib.axes.Axes:
"""Plot the light curve as a river plot.
A river plot uses colors to represent the light curve values in
chronological order, relative to the period of an interesting signal.
Each row in the plot represents a full period cycle, and each column
represents a fixed phase. This type of plot is often used to visualize
Transit Timing Variations (TTVs) in the light curves of exoplanets, but
it can be used to visualize periodic signals of any origin.
All extra keywords supplied are passed on to Matplotlib's
`~matplotlib.pyplot.pcolormesh` function.
Parameters
----------
ax : `~matplotlib.axes.Axes`
The matplotlib axes object.
period: float
Period at which to fold the light curve
epoch_time : float
Phase mid point for plotting. Defaults to the first time value.
bin_points : int
How many points should be in each bin.
minimum_phase : float
The minimum phase to plot.
maximum_phase : float
The maximum phase to plot.
method : str
The river method. Choose from `'mean'` or `'median'` or `'sigma'`.
If `'mean'` or `'median'`, the plot will display the average value in each bin.
If `'sigma'`, the plot will display the average in the bin divided by
the error in each bin, in order to show the data in terms of standard
deviation.
kwargs : dict
Dictionary of arguments to be passed on to Matplotlib's
`~matplotlib.pyplot.pcolormesh` function.
Returns
-------
ax : `~matplotlib.axes.Axes`
The matplotlib axes object.
"""
if hasattr(self, "time_original"): # folded light curve
time = self.time_original
else:
time = self.time
# epoch_time defaults to the first time value
if epoch_time is None:
epoch_time = time[0]
# Lightkurve v1.x assumed that `period` was given in days if no unit
# was specified. We maintain this behavior for backwards-compatibility.
if period is not None and not isinstance(period, Quantity):
period *= u.day
if epoch_time is not None and not isinstance(epoch_time, (Time, Quantity)):
epoch_time = Time(epoch_time, format=time.format, scale=time.scale)
method = validate_method(method, supported_methods=["mean", "median", "sigma"])
if (bin_points == 1) and (method in ["mean", "median"]):
bin_func = lambda y, e: (y[0], e[0])
elif (bin_points == 1) and (method in ["sigma"]):
bin_func = lambda y, e: ((y[0] - 1) / e[0], np.nan)
elif method == "mean":
bin_func = lambda y, e: (np.nanmean(y), np.nansum(e ** 2) ** 0.5 / len(e))
elif method == "median":
bin_func = lambda y, e: (np.nanmedian(y), np.nansum(e ** 2) ** 0.5 / len(e))
elif method == "sigma":
bin_func = lambda y, e: (
(np.nanmean(y) - 1) / (np.nansum(e ** 2) ** 0.5 / len(e)),
np.nan,
)
s = np.argsort(time.value)
x, y, e = time.value[s], self.flux[s], self.flux_err[s]
med = np.nanmedian(self.flux)
e /= med
y /= med
# Here `ph` is the phase of each time point x
# cyc is the number of cycles that have occured at each time point x
# since the phase 0 before x[0]
n = int(
period.value
/ np.nanmedian(np.diff(x))
* (maximum_phase - minimum_phase)
/ bin_points
)
if n == 1:
bin_points = int(maximum_phase - minimum_phase) / (
2 / int(period.value / np.nanmedian(np.diff(x)))
)
warnings.warn(
"`bin_points` is too high to plot a phase curve, resetting to {}".format(
bin_points
),
LightkurveWarning,
)
n = 2
ph = x / period.value % 1
cyc = np.asarray((x - x % period.value) / period.value, int)
cyc -= np.min(cyc)
phase = (epoch_time.value % period.value) / period.value
ph = ((x - (phase * period.value)) / period.value) % 1
cyc = np.asarray(
(x - ((x - phase * period.value) % period.value)) / period.value, int
)
cyc -= np.min(cyc)
ph[ph > 0.5] -= 1
ar = np.empty((n, np.max(cyc) + 1))
ar[:] = np.nan
bs = np.linspace(minimum_phase, maximum_phase, n + 1)
cycs = np.arange(0, np.max(cyc) + 2)
ph_masks = [(ph > bs[jdx]) & (ph <= bs[jdx + 1]) for jdx in range(n)]
qual_mask = np.isfinite(y)
for cyc1 in np.unique(cyc):
cyc_mask = cyc == cyc1
if not np.any(cyc_mask):
continue
for jdx, ph_mask in enumerate(ph_masks):
if not np.any(cyc_mask & ph_mask & qual_mask):
ar[jdx, cyc1] = np.nan
else:
ar[jdx, cyc1] = bin_func(
y[cyc_mask & ph_mask], e[cyc_mask & ph_mask]
)[0]
# If the method is average we need to denormalize the plot
if method in ["mean", "median"]:
median = np.nanmedian(self.flux.value)
if hasattr(median, 'mask'):
median = median.filled(np.nan)
ar *= median
d = np.max(
[
np.abs(np.nanmedian(ar) - np.nanpercentile(ar, 5)),
np.abs(np.nanmedian(ar) - np.nanpercentile(ar, 95)),
]
)
vmin = kwargs.pop("vmin", np.nanmedian(ar) - d)
vmax = kwargs.pop("vmax", np.nanmedian(ar) + d)
if method in ["mean", "median"]:
cmap = kwargs.pop("cmap", "viridis")
elif method == "sigma":
cmap = kwargs.pop("cmap", "coolwarm")
with plt.style.context(MPLSTYLE):
if ax is None:
_, ax = plt.subplots(figsize=(12, cyc.max() * 0.1))
im = ax.pcolormesh(
bs, cycs, ar.T, vmin=vmin, vmax=vmax, cmap=cmap, **kwargs
)
cbar = plt.colorbar(im, ax=ax)
if method in ["mean", "median"]:
unit = "[Normalized Flux]"
if self.flux.unit is not None:
if self.flux.unit != u.dimensionless_unscaled:
unit = "[{}]".format(self.flux.unit.to_string("latex"))
if bin_points == 1:
cbar.set_label("Flux {}".format(unit))
else:
cbar.set_label("Average Flux in Bin {}".format(unit))
elif method == "sigma":
if bin_points == 1:
cbar.set_label(
"Flux in units of Standard Deviation "
"$(f - \overline{f})/(\sigma_f)$"
)
else:
cbar.set_label(
"Average Flux in Bin in units of Standard Deviation "
"$(f - \overline{f})/(\sigma_f)$"
)
ax.set_xlabel("Phase")
ax.set_ylabel("Cycle")
ax.set_ylim(cyc.max(), 0)
ax.set_title(self.meta.get("LABEL"))
a = cyc.max() * 0.1 / 12.0
b = (cyc.max() - cyc.min()) / (bs.max() - bs.min())
ax.set_aspect(a / b)
return ax
def create_transit_mask(self, period, transit_time, duration):
"""Returns a boolean array that is ``True`` during transits and
``False`` elsewhere.
This method supports multi-planet systems by allowing ``period``,
``transit_time``, and ``duration`` to be array-like lists of parameters.
Parameters
----------
period : `~astropy.units.Quantity`, float, or array-like
Period(s) of the transits.
duration : `~astropy.units.Quantity`, float, or array-like
Duration(s) of the transits.
transit_time : `~astropy.time.Time`, float, or array-like
Transit midpoint(s) of the transits.
Returns
-------
transit_mask : np.array of bool
Mask that flags transits. Mask is ``True`` where there are transits.
Examples
--------
You can create a transit mask for a single-planet system as follows::
>>> import lightkurve as lk
>>> lc = lk.LightCurve({'time': [1, 2, 3, 4, 5], 'flux': [1, 1, 1, 1, 1]})
>>> lc.create_transit_mask(transit_time=2., period=2., duration=0.1)
array([False, True, False, True, False])
The method accepts lists of parameters to support multi-planet systems::
>>> lc.create_transit_mask(transit_time=[2., 3.], period=[2., 10.], duration=[0.1, 0.1])
array([False, True, True, True, False])
"""
# Convert Quantity objects to floats in units "day"
period = _to_unitless_day(period)
duration = _to_unitless_day(duration)
# If ``transit_time`` is a ``Quantity```, attempt converting it to a ``Time`` object
if isinstance(transit_time, Quantity):
transit_time = Time(transit_time, format=self.time.format, scale=self.time.scale)
# Ensure all parameters are 1D-arrays
period = np.atleast_1d(period)
duration = np.atleast_1d(duration)
transit_time = np.atleast_1d(transit_time)
# Make sure all params have the same number of entries
n_planets = len(period)
if any(len(param) != n_planets for param in [duration, transit_time]):
raise ValueError(
"period, duration, and transit_time must have "
"the same number of values."
)
# Initialize an empty cadence mask
in_transit = np.empty(len(self), dtype=bool)
in_transit[:] = False
# Create the transit mask
for per, dur, tt in zip(period, duration, transit_time):
if isinstance(tt, Time):
# If a `Time` is passed, ensure it has the right format & scale
tt = Time(tt, format=self.time.format, scale=self.time.scale).value
hp = per / 2.0
in_transit |= np.abs((self.time.value - tt + hp) % per - hp) < 0.5 * dur
return in_transit
def search_neighbors(
self, limit: int = 10, radius: float = 3600.0, **search_criteria
):
"""Search the data archive at MAST for the most nearby light curves.
By default, the 10 nearest neighbors located within 3600 arcseconds
are returned. You can override these defaults by changing the `limit`
and `radius` parameters.
If the LightCurve object is a Kepler, K2, or TESS light curve,
the default behavior of this method is to only return light curves
obtained during the exact same quarter, campaign, or sector.
This is useful to enable coeval light curves to be inspected for
spurious noise signals in common between multiple neighboring targets.
You can override this default behavior by passing a `mission`,
`quarter`, `campaign`, or `sector` argument yourself.
Please refer to the docstring of `search_lightcurve` for a complete
list of search parameters accepted.
Parameters
----------
limit : int
Maximum number of results to return.
radius : float or `astropy.units.Quantity` object
Conesearch radius. If a float is given it will be assumed to be in
units of arcseconds.
**search_criteria : kwargs
Extra criteria to be passed to `search_lightcurve`.
Returns
-------
result : :class:`SearchResult` object
Object detailing the neighbor light curves found, sorted by
distance from the current light curve.
"""
# Local import to avoid circular dependency
from .search import search_lightcurve
# By default, only return results from the same sector/quarter/campaign
if (
"mission" not in search_criteria
and "sector" not in search_criteria
and "quarter" not in search_criteria
and "campaign" not in search_criteria
):
mission = self.meta.get("MISSION", None)
if mission == "TESS":
search_criteria["sector"] = self.sector
elif mission == "Kepler":
search_criteria["quarter"] = self.quarter
elif mission == "K2":
search_criteria["campaign"] = self.campaign
# Note: we increase `limit` by one below to account for the fact that the
# current light curve will be returned by the search operation
log.info(
f"Started searching for up to {limit} neighbors within {radius} arcseconds."
)
result = search_lightcurve(
f"{self.ra} {self.dec}", radius=radius, limit=limit + 1, **search_criteria
)
# Filter by distance > 0 to avoid returning the current light curve
result = result[result.distance > 0]
log.info(f"Found {len(result)} neighbors.")
return result
def head(self, n: int = 5):
"""Return the first n rows.
Parameters
----------
n : int
Number of rows to return.
Returns
-------
lc : LightCurve
Light curve containing the first n rows.
"""
return self[:n]
def tail(self, n: int = 5):
"""Return the last n rows.
Parameters
----------
n : int
Number of rows to return.
Returns
-------
lc : LightCurve
Light curve containing the last n rows.
"""
return self[-n:]
def truncate(self, before: float = None, after: float = None, column: str = "time"):
"""Truncates the light curve before and after some time value.
Parameters
---------_
before : float
Truncate all rows before this time value.
after : float
Truncate all rows after this time value.
column : str, optional
The name of the column on which the truncation is based. Defaults to 'time'.
Returns
-------
truncated_lc : LightCurve
The truncated light curve.
"""
def _to_unitless(data):
return np.asarray(getattr(data, "value", data))
mask = np.ones(len(self), dtype=bool)
if before:
mask &= _to_unitless(getattr(self, column)) >= before
if after:
mask &= _to_unitless(getattr(self, column)) <= after
return self[mask]
class FoldedLightCurve(LightCurve):
"""Subclass of `LightCurve` in which the ``time`` parameter represents phase values.
Compared to the `~lightkurve.lightcurve.LightCurve` base class, this class
has extra meta data entries (``period``, ``epoch_time``, ``epoch_phase``,
``wrap_phase``, ``normalize_phase``), an extra column (``time_original``),
extra properties (``phase``, ``odd_mask``, ``even_mask``),
and implements different plotting defaults.
"""
@property
def phase(self):
"""Alias for `LightCurve.time`."""
return self.time
@property
def odd_mask(self):
"""Boolean mask which flags the odd-numbered cycles (1, 3, 5, etc).
This is useful for studying every second occurence of a signal.
For example, in exoplanet searches, comparisons of odd and even transits
can help confirm the planetary nature of a signal. Differences in the
depth, duration, or shape of the odd- and even-numbered transits would
indicate that the 'transits' are being caused by a near-equal mass
eclipsing background binary, rather than a true transiting exoplanet.
Examples
--------
You can can visualize the odd- and even-centered transits separately as
follows:
>>> f = lc.fold(...) # doctest: +SKIP
>>> f[f.odd_mask].scatter() # doctest: +SKIP
>>> f[f.even_mask].scatter() # doctest: +SKIP
"""
cycle = (
self.time_original - self.time.value * (self.period) - self.period * 0.5
) / (self.period * 2)
return (cycle.value % 1) < 0.5
@property
def even_mask(self):
"""Boolean mask which flags the even-numbered cycles (2, 4, 6, etc).
See the documentation of `odd_mask` for examples.
"""
return ~self.odd_mask
def _set_xlabel(self, kwargs):
"""Helper function for plot, scatter, and errorbar.
Ensures the xlabel is correctly set for folded light curves.
"""
if "xlabel" not in kwargs:
kwargs["xlabel"] = "Phase"
if isinstance(self.time, TimeDelta):
kwargs["xlabel"] += f" [{self.time.format.upper()}]"
return kwargs
def plot(self, **kwargs):
"""Plot the folded light curve using matplotlib's
`~matplotlib.pyplot.plot` method.
See `LightCurve.plot` for details on the accepted arguments.
Parameters
----------
kwargs : dict
Dictionary of arguments to be passed to `LightCurve.plot`.
Returns
-------
ax : `~matplotlib.axes.Axes`
The matplotlib axes object.
"""
kwargs = self._set_xlabel(kwargs)
return super(FoldedLightCurve, self).plot(**kwargs)
def scatter(self, **kwargs):
"""Plot the folded light curve using matplotlib's `~matplotlib.pyplot.scatter` method.
See `LightCurve.scatter` for details on the accepted arguments.
Parameters
----------
kwargs : dict
Dictionary of arguments to be passed to `LightCurve.scatter`.
Returns
-------
ax : `~matplotlib.axes.Axes`
The matplotlib axes object.
"""
kwargs = self._set_xlabel(kwargs)
return super(FoldedLightCurve, self).scatter(**kwargs)
def errorbar(self, **kwargs):
"""Plot the folded light curve using matplotlib's
`~matplotlib.pyplot.errorbar` method.
See `LightCurve.scatter` for details on the accepted arguments.
Parameters
----------
kwargs : dict
Dictionary of arguments to be passed to `LightCurve.scatter`.
Returns
-------
ax : `~matplotlib.axes.Axes`
The matplotlib axes object.
"""
kwargs = self._set_xlabel(kwargs)
return super(FoldedLightCurve, self).errorbar(**kwargs)
def plot_river(self, **kwargs):
"""Plot the folded light curve in a river style.
See `~LightCurve.plot_river` for details on the accepted arguments.
Parameters
----------
kwargs : dict
Dictionary of arguments to be passed to `~LightCurve.plot_river`.
Returns
-------
ax : `~matplotlib.axes.Axes`
The matplotlib axes object.
"""
ax = super(FoldedLightCurve, self).plot_river(
period=self.period, epoch_time=self.epoch_time, **kwargs
)
return ax
class KeplerLightCurve(LightCurve):
"""Subclass of :class:`LightCurve <lightkurve.lightcurve.LightCurve>`
to represent data from NASA's Kepler and K2 mission."""
_deprecated_keywords = (
"targetid",
"label",
"time_format",
"time_scale",
"flux_unit",
"quality_bitmask",
"channel",
"campaign",
"quarter",
"mission",
"ra",
"dec",
)
_default_time_format = "bkjd"
@classmethod
def read(cls, *args, **kwargs):
"""Returns a `KeplerLightCurve` by reading the given file.
Parameters
----------
filename : str
Local path or remote url of a Kepler light curve FITS file.
flux_column : str, optional
The column in the FITS file to be read as `flux`. Defaults to 'pdcsap_flux'.
Typically 'pdcsap_flux' or 'sap_flux'.
quality_bitmask : str or int, optional
Bitmask (integer) which identifies the quality flag bitmask that should
be used to mask out bad cadences. If a string is passed, it has the
following meaning:
* "none": no cadences will be ignored
* "default": cadences with severe quality issues will be ignored
* "hard": more conservative choice of flags to ignore
This is known to remove good data.
* "hardest": removes all data that has been flagged
This mask is not recommended.
See the :class:`KeplerQualityFlags <lightkurve.utils.KeplerQualityFlags>` class for details on the bitmasks.
format : str, optional
The format of the Kepler FITS file. Should be one of 'kepler', 'k2sff', 'everest'. Defaults to 'kepler'.
"""
# Default to Kepler file format
if kwargs.get("format") is None:
kwargs["format"] = "kepler"
return super().read(*args, **kwargs)
def to_fits(
self,
path=None,
overwrite=False,
flux_column_name="FLUX",
aperture_mask=None,
**extra_data,
):
"""Writes the KeplerLightCurve to a FITS file.
Parameters
----------
path : string, default None
File path, if `None` returns an astropy.io.fits.HDUList object.
overwrite : bool
Whether or not to overwrite the file
flux_column_name : str
The name of the label for the FITS extension, e.g. SAP_FLUX or FLUX
aperture_mask : array-like
Optional 2D aperture mask to save with this lightcurve object, if
defined. The mask can be either a boolean mask or an integer mask
mimicking the Kepler/TESS convention; boolean masks are
automatically converted to the Kepler/TESS conventions
extra_data : dict
Extra keywords or columns to include in the FITS file.
Arguments of type str, int, float, or bool will be stored as
keywords in the primary header.
Arguments of type np.array or list will be stored as columns
in the first extension.
Returns
-------
hdu : astropy.io.fits
Returns an astropy.io.fits object if path is None
"""
kepler_specific_data = {
"TELESCOP": "KEPLER",
"INSTRUME": "Kepler Photometer",
"OBJECT": "{}".format(self.targetid),
"KEPLERID": self.targetid,
"CHANNEL": self.channel,
"MISSION": self.mission,
"RA_OBJ": self.ra,
"DEC_OBJ": self.dec,
"EQUINOX": 2000,
"DATE-OBS": Time(self.time[0] + 2454833.0, format=("jd")).isot,
"SAP_QUALITY": self.quality,
"MOM_CENTR1": self.centroid_col,
"MOM_CENTR2": self.centroid_row,
}
for kw in kepler_specific_data:
if ~np.asarray([kw.lower == k.lower() for k in extra_data]).any():
extra_data[kw] = kepler_specific_data[kw]
hdu = super(KeplerLightCurve, self).to_fits(
path=None, overwrite=overwrite, **extra_data
)
hdu[0].header["QUARTER"] = self.meta.get("QUARTER")
hdu[0].header["CAMPAIGN"] = self.meta.get("CAMPAIGN")
hdu = _make_aperture_extension(hdu, aperture_mask)
if path is not None:
hdu.writeto(path, overwrite=overwrite, checksum=True)
else:
return hdu
class TessLightCurve(LightCurve):
"""Subclass of :class:`LightCurve <lightkurve.lightcurve.LightCurve>`
to represent data from NASA's TESS mission."""
_deprecated_keywords = (
"targetid",
"label",
"time_format",
"time_scale",
"flux_unit",
"quality_bitmask",
"sector",
"camera",
"ccd",
"mission",
"ra",
"dec",
)
_default_time_format = "btjd"
@classmethod
def read(cls, *args, **kwargs):
"""Returns a `TessLightCurve` by reading the given file.
Parameters
----------
filename : str
Local path or remote url of a TESS light curve FITS file.
flux_column : str, optional
The column in the FITS file to be read as `flux`. Defaults to 'pdcsap_flux'.
Typically 'pdcsap_flux' or 'sap_flux'.
quality_bitmask : str or int, optional
Bitmask (integer) which identifies the quality flag bitmask that should
be used to mask out bad cadences. If a string is passed, it has the
following meaning:
* "none": no cadences will be ignored
* "default": cadences with severe quality issues will be ignored
* "hard": more conservative choice of flags to ignore
This is known to remove good data.
* "hardest": removes all data that has been flagged
This mask is not recommended.
See the :class:`TessQualityFlags <lightkurve.utils.TessQualityFlags>` class for details on the bitmasks.
"""
# Default to TESS file format
if kwargs.get("format") is None:
kwargs["format"] = "tess"
return super().read(*args, **kwargs)
def to_fits(
self,
path=None,
overwrite=False,
flux_column_name="FLUX",
aperture_mask=None,
**extra_data,
):
"""Writes the KeplerLightCurve to a FITS file.
Parameters
----------
path : string, default None
File path, if `None` returns an astropy.io.fits.HDUList object.
overwrite : bool
Whether or not to overwrite the file
flux_column_name : str
The name of the label for the FITS extension, e.g. SAP_FLUX or FLUX
aperture_mask : array-like
Optional 2D aperture mask to save with this lightcurve object, if
defined. The mask can be either a boolean mask or an integer mask
mimicking the Kepler/TESS convention; boolean masks are
automatically converted to the Kepler/TESS conventions
extra_data : dict
Extra keywords or columns to include in the FITS file.
Arguments of type str, int, float, or bool will be stored as
keywords in the primary header.
Arguments of type np.array or list will be stored as columns
in the first extension.
Returns
-------
hdu : astropy.io.fits
Returns an astropy.io.fits object if path is None
"""
tess_specific_data = {
"OBJECT": "{}".format(self.targetid),
"MISSION": self.meta.get("MISSION"),
"RA_OBJ": self.meta.get("RA"),
"TELESCOP": self.meta.get("MISSION"),
"CAMERA": self.meta.get("CAMERA"),
"CCD": self.meta.get("CCD"),
"SECTOR": self.meta.get("SECTOR"),
"TARGETID": self.meta.get("TARGETID"),
"DEC_OBJ": self.meta.get("DEC"),
"MOM_CENTR1": self.centroid_col,
"MOM_CENTR2": self.centroid_row,
}
for kw in tess_specific_data:
if ~np.asarray([kw.lower == k.lower() for k in extra_data]).any():
extra_data[kw] = tess_specific_data[kw]
hdu = super(TessLightCurve, self).to_fits(
path=None, overwrite=overwrite, **extra_data
)
# We do this because the TESS file format is subtly different in the
# name of this column.
hdu[1].columns.change_name("SAP_QUALITY", "QUALITY")
hdu = _make_aperture_extension(hdu, aperture_mask)
if path is not None:
hdu.writeto(path, overwrite=overwrite, checksum=True)
else:
return hdu
# Helper functions
def _boolean_mask_to_bitmask(aperture_mask):
"""Takes in an aperture_mask and returns a Kepler-style bitmask
Parameters
----------
aperture_mask : array-like
2D aperture mask. The mask can be either a boolean mask or an integer
mask mimicking the Kepler/TESS convention; boolean or boolean-like masks
are converted to the Kepler/TESS conventions. Kepler bitmasks are
returned unchanged except for possible datatype conversion.
Returns
-------
bitmask : numpy uint8 array
A bitmask incompletely mimicking the Kepler/TESS convention: Bit 2,
value = 3, means "pixel was part of the custom aperture". The other
bits have no meaning and are currently assigned a value of 1.
"""
# Masks can either be boolean input or Kepler pipeline style
clean_mask = np.nan_to_num(aperture_mask)
contains_bit2 = (clean_mask.astype(np.int) & 2).any()
all_zeros_or_ones = (clean_mask.dtype in ["float", "int"]) & (
(set( | np.unique(clean_mask) | numpy.unique |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.