prompt
stringlengths 15
655k
| completion
stringlengths 3
32.4k
| api
stringlengths 8
52
|
---|---|---|
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/03_Train Flyvec.ipynb (unless otherwise specified).
__all__ = ['BIN', 'init', 'main']
# Cell
import os
from pathlib import Path
import ctypes
from ctypes import *
import array as arr
import sys
import argparse
import time
import numpy
import flyvec.path_fixes as pf
import subprocess as sp
# Cell
# Load binaries
BIN = pf.CU_BIN
# Cell
from fastcore.script import *
@call_parse
def init():
sp.call(["sh", str(BIN / "short_make")], cwd=str(BIN))
# Cell
@call_parse
def main(
encodings_source:Param("""Path to the tokens encoded as an ID. (eltype should be np.int32)""", str),
offsets_source:Param("""Path to the offsets array indicating where each value of the array indicates where a chunk of text should start. (eltype should be np.uint64, first value should be 0)""",str),
output_dir:Param("""Directory in which to save the checkpoints. Created if it does not exist""", str)=".",
save_every:Param("""How many epochs to run before saving a checkpoint""", int)=1,
ckpt_prefix:Param("""Prefix to name each checkpoint. Additional parameter choices are inserted into the checkpoint name.""", str)="flyvec_model",
starting_checkpoint:Param("""Path to .npy file of saved checkpoint""", str)=None,
W:Param("Size of the W-gram sliding window used to train the word vectors", int)=11,
hid:Param("Number of hidden units (neurons). Do not change", int)=400,
initial_learning_rate:Param("Initial learning rate", float)=0.002,
delta:Param("From equation", float)=0.,
mu:Param("If no checkpoint provided, use this as mean for random normal initialization of synapses", float)=0.,
sigma:Param("If no checkpoint provided, use this as stdev for random normal initialization of synapses", float)=0.02,
Nep:Param("Maximum number of epochs, fewer if starting from a checkpoint", int)=15,
batch_size:Param("Minibatch size", int)=10000,
prec:Param("Precision, avoid dividing by 0", float)=1.0E-30,
):
# Obsolete Parameters
stride = int(1)
IM_HEIGHT = int(1)
IM_WIDTH = int(11)
Nchannels = int(1)
# Unexposed Parameters
frequency_scaling = 1 # 1 or 0, true or false
Lmid = 1.0
Lbase = 1.0
sparse_input = 1 # 1 or 0, true or false, make input sparse and only store column
# Load object files
model_descriptor = ctypes.CDLL(str(BIN / 'model_descriptor.so'))
model_arrays = ctypes.CDLL(str(BIN / 'model_arrays.so'))
cuda_helpers = ctypes.CDLL(str(BIN / 'cuda_helpers.so'))
model = ctypes.CDLL(str(BIN / 'cuda_funcs.so'))
prune_input = ctypes.CDLL(str(BIN / 'prune_input.so'))
# m = 2
# p = 2
# parser.add_argument("--stride", default=1, type=int, help="Stride. Do not change.")
# parser.add_argument("--IM_HEIGHT", default=1, type=int, help="Height of the image data. Obsolete.")
# parser.add_argument("--IM_WIDTH", default=11, type=int, help="Width of the image data. Obsolete.")
# parser.add_argument("--Nchannels", default=1, type=int, help="Number of channels in the image data. Obsolete.")
# parser.add_argument("--m", default=2, type=int, help="Parameter from equation. Obsolete.")
# parser.add_argument("--p", default=2, type=int, help="Parameter from equation. Obsolete.")
# Assign values to memory
py_prune_input_data = prune_input.prune_input_data
py_prune_input_data.rgtypes=[c_void_p,c_uint64,c_uint64]
py_getnum_samples = prune_input.getnum_samples
py_getnum_samples.rgtypes=[c_void_p,c_uint64,c_uint64]
py_getnum_samples.restype = c_uint64
py_compute_offset_phrases = prune_input.compute_offset_phrases
py_compute_offset_phrases.rgtypes=[c_void_p, c_void_p, c_uint64, c_uint64]
py_model_create_descriptor = model_descriptor.model_create_descriptor
py_model_create_descriptor.rgtypes=[c_int32]
py_model_create_descriptor.restype = ctypes.c_void_p
py_set_model_param_int = model_descriptor.set_model_param_int
py_set_model_param_float = model_descriptor.set_model_param_float
py_compute_model_derived_parameters = model_descriptor.compute_model_derived_parameters
py_set_model_param_int.rgtypes=[c_uint64,c_char_p,c_void_p]
py_set_model_param_float.rgtypes=[c_float,c_char_p,c_void_p]
py_compute_model_derived_parameters.rgtypes=[c_void_p]
py_copy_model_params = model_descriptor.copy_model_params
py_copy_model_params.rgtypes=[c_void_p,c_int32,c_int32]
py_model_create_arrays = model_arrays.model_create_arrays
py_model_create_arrays.restype = ctypes.c_void_p
py_model_create_arrays.rgtypes=[c_int32]
py_model_arrays_allocate_memory = model_arrays.model_arrays_allocate_memory
py_model_arrays_allocate_memory.rgtypes=[c_void_p,c_void_p,c_int32]
py_model_arrays_reshuffle_indices = model_arrays.do_reshuffle_indices
py_model_arrays_reshuffle_indices.rgtypes=[c_void_p,c_void_p,c_int32]
py_compute_inverse_word_frequency = model_arrays.compute_inverse_word_frequency
py_compute_inverse_word_frequency.rgtypes=[c_void_p,c_void_p,c_void_p,c_uint64,c_int32]
python_get_cuda_pinned_memory = cuda_helpers.do_gpu_cudaHostAlloc
python_get_cuda_pinned_memory.restype = ctypes.c_void_p
python_get_cuda_managed_memory = cuda_helpers.do_gpu_cudaMallocManaged
python_get_cuda_managed_memory.restype = ctypes.c_void_p
py_run_epoch_INPUT_AS_IMAGE = model.launch_epoch_INPUT_AS_IMAGE
py_run_epoch_INPUT_AS_IMAGE.rgtypes=[c_void_p,c_void_p,c_int32,c_uint64,c_uint64]
py_run_epoch_INPUT_AS_FLOAT = model.launch_epoch_INPUT_AS_FLOAT
py_run_epoch_INPUT_AS_FLOAT.rgtypes=[c_void_p,c_void_p,c_int32,c_uint64,c_uint64]
py_run_epoch_INPUT_AS_INT = model.launch_epoch_INPUT_AS_INT
py_run_epoch_INPUT_AS_INT.rgtypes=[c_void_p,c_void_p,c_int32,c_uint64,c_uint64]
py_model_arrays_get_data_pointer = model_arrays.get_data_pointer
py_model_arrays_get_data_pointer.restype = ctypes.c_void_p
py_model_arrays_get_data_pointer.rgtypes = [c_char_p, c_void_p, c_int32]
num_gpus = cuda_helpers.get_cuda_num_devices()
print('num_gpus=', num_gpus)
print("py_model_create_arrays and descriptor...")
MODEL_DSCR = py_model_create_descriptor( c_int32(num_gpus) )
MODEL_DATA = py_model_create_arrays( c_int32(num_gpus) )
# Create output directory
output_dir = Path(output_dir)
if not output_dir.exists(): output_dir.mkdir(parents=True)
OUTPUT_NAME=f"{ckpt_prefix}_H_{hid}_W_{W}_LR_{initial_learning_rate}_"
OUTPUT = str(output_dir / OUTPUT_NAME)
# Allocate memory for initial encodings
input_data_on_disk_encoding = numpy.load(encodings_source, mmap_mode='r')
Ns_1 = input_data_on_disk_encoding.shape[0]
# Cannot push to GPU
INPUT = python_get_cuda_pinned_memory(ctypes.c_uint64(Ns_1*ctypes.sizeof(ctypes.c_int32)))
#create numpy array to store input data, use memory allocated for INPUT
INPUT_data_pointer = ctypes.cast(INPUT,ctypes.POINTER(ctypes.c_int32))
INPUT_np_array = numpy.ctypeslib.as_array(INPUT_data_pointer,shape=(Ns_1,))
print('Copying input...')
numpy.copyto(INPUT_np_array,input_data_on_disk_encoding)
vocabulary_size = numpy.uint64(numpy.max(INPUT_np_array)+1)
N = numpy.uint64(vocabulary_size*2)
print(f"Determined a vocabulary size of {vocabulary_size}")
input_data_on_disk_offsets = numpy.load(offsets_source,mmap_mode='r')
Number_of_sentences = input_data_on_disk_offsets.shape[0] - 1
Number_of_phrases = prune_input.getnum_samples(ctypes.c_void_p(input_data_on_disk_offsets.__array_interface__['data'][0]),
c_uint64(Number_of_sentences), c_uint64(W) )
print('Number of phrases: ', Number_of_phrases)
#allocate memory for offsets for phrases of size W
INPUT_phrases_offsets = python_get_cuda_pinned_memory(ctypes.c_uint64((Number_of_phrases+1)*ctypes.sizeof(ctypes.c_int64) ))
#compute offsets
py_compute_offset_phrases(c_void_p(INPUT_phrases_offsets),
c_void_p(input_data_on_disk_offsets.__array_interface__['data'][0]), c_uint64(Number_of_sentences),
c_uint64(W))
Ns_1 = Number_of_phrases
model_descriptor.print_model_params(ctypes.c_void_p(MODEL_DSCR))
py_set_model_param_int(IM_HEIGHT,b'IM_HEIGHT',ctypes.c_void_p(MODEL_DSCR))
py_set_model_param_int(IM_WIDTH,b'IM_WIDTH',ctypes.c_void_p(MODEL_DSCR))
py_set_model_param_int(Nchannels,b'Nchannels',ctypes.c_void_p(MODEL_DSCR))
py_set_model_param_int(ctypes.c_uint64(Ns_1),b'Ns_1',ctypes.c_void_p(MODEL_DSCR))
#
py_set_model_param_int(W, b'W', ctypes.c_void_p(MODEL_DSCR))
py_set_model_param_int(stride, b'ST', ctypes.c_void_p(MODEL_DSCR))
py_set_model_param_int(hid, b'hid', ctypes.c_void_p(MODEL_DSCR))
py_set_model_param_int(batch_size, b'Num', ctypes.c_void_p(MODEL_DSCR))
py_set_model_param_int(c_uint64(vocabulary_size), b'vocabulary_size',ctypes.c_void_p(MODEL_DSCR))
py_set_model_param_int(sparse_input, b'sparse_input',ctypes.c_void_p(MODEL_DSCR))
py_set_model_param_int(frequency_scaling, b'frequency_scaling',ctypes.c_void_p(MODEL_DSCR))
py_set_model_param_float(c_float(initial_learning_rate), b'initial_learning_rate', ctypes.c_void_p(MODEL_DSCR))
py_set_model_param_float(c_float(delta), b'delta', ctypes.c_void_p(MODEL_DSCR))
py_set_model_param_float(c_float(prec), b'prec', ctypes.c_void_p(MODEL_DSCR))
py_set_model_param_float(c_float(mu), b'mu', ctypes.c_void_p(MODEL_DSCR))
py_set_model_param_float(c_float(sigma), b'sigma', ctypes.c_void_p(MODEL_DSCR))
py_set_model_param_float(c_float(Lmid), b'Lmid', ctypes.c_void_p(MODEL_DSCR))
py_set_model_param_float(c_float(Lbase), b'Lbase', ctypes.c_void_p(MODEL_DSCR))
py_compute_model_derived_parameters(ctypes.c_void_p(MODEL_DSCR))
for gpu in range(1,num_gpus,1):
py_copy_model_params( ctypes.c_void_p(MODEL_DSCR), c_int32(0), c_int32(gpu) )
model_descriptor.print_model_params(ctypes.c_void_p(MODEL_DSCR))
py_model_arrays_allocate_memory(ctypes.c_void_p(MODEL_DSCR), ctypes.c_void_p(MODEL_DATA),c_int32(num_gpus))
print('setting INPUT pointer')
for gpu in range(0,num_gpus,1):
model_arrays.set_up_INPUT_pointer(b'INPUT', ctypes.c_void_p(MODEL_DATA), ctypes.c_void_p(MODEL_DSCR), ctypes.c_void_p(INPUT),ctypes.c_void_p(INPUT_phrases_offsets), b'i4',c_int32(gpu))
if frequency_scaling==1 :
print('Computing inverse word frequency...')
for gpu in range(0,num_gpus,1):
py_compute_inverse_word_frequency(ctypes.c_void_p(MODEL_DATA), ctypes.c_void_p(MODEL_DSCR), ctypes.c_void_p(INPUT), c_uint64(input_data_on_disk_encoding.shape[0]), c_int32(gpu) )
#model_arrays.push_INPUT_memory_to_GPU(ctypes.c_void_p(MODEL_DATA), ctypes.c_void_p(MODEL_DSCR),c_int32(0), b'i4')
#initialize SYNAPSES
print('Setting initial weights...')
epoch_start = 0
if starting_checkpoint is not None:
R=
|
numpy.load(starting_checkpoint,mmap_mode='r')
|
numpy.load
|
#!/usr/bin/python
#-*- coding: <encoding name> -*-
import numpy as np
import pandas as pd
import os
import math
def naca0012(x):
return 0.6 * (-0.1015 * x ** 4 + 0.2843 * x ** 3 - 0.3576 \
* x ** 2 - 0.1221 * x + 0.2969 * x ** 0.5)
def simpleIteration(alpha, beta, gamma, x):
out = (alpha[1:-1, 1:-1] * (x[1:-1, 2:] + x[1:-1, 0:-2]) + \
gamma[1:-1, 1:-1] * (x[2:, 1:-1] + x[0:-2, 1:-1]) - \
beta[1:-1, 1:-1] / 2.0 * (x[2:, 2:] + x[0:-2, 0:-2] - x[0:-2, 2:] \
- x[2:, 0:-2])) / (2.0 * (alpha[1:-1, 1:-1] + gamma[1:-1, 1:-1]))
return out
def isConverg(dx, dy, errlimit):
errx = np.abs(dx[1:-1, 1:-1]).max()
erry = np.abs(dy[1:-1, 1:-1]).max()
return (errx < errlimit) & (erry < errlimit)
class Ogrid():
'''
This is a O-gird generater with:
- Slitting, far field, airfoil, infield boundary
- Radius of far field
- Maximum of iteration and error limits
- Number of points on the airfoil surface
'''
def __init__(self, N, radius, iterlimit, errlimit):
self.N = N
self.radius = radius
self.iterlimit = iterlimit
self.errlimit = errlimit
def initialGrid(self):
n = self.N
r = self.radius
theta = np.array([2 * math.pi * (i - 1) / (n - 1) for i in range(0, n + 2)])
x = np.zeros((n, n + 2))
y = np.zeros((n, n + 2))
# Airfoil boundary
x[0, 1:-1] = 0.5 * (1 + np.cos(theta[1:-1]))
y[0, 1: int((n + 1) / 2)] = - naca0012(x[0, 1:int((n + 1) / 2)])
y[0, int((n + 1) / 2):-1] = naca0012(x[0, int((n + 1) / 2):-1])
# Far field boundary
x[-1, 1:-1] = r * np.cos(- theta[1:-1])
y[-1, 1:-1] = r * np.sin(- theta[1:-1])
# In field boundary
for i in range(1, n - 1):
x[i, 1:-1] = x[0, 1:-1] + (x[-1, 1:-1] - x[0, 1:-1]) * i / (n - 1)
y[i, 1:-1] = y[0, 1:-1] + (y[-1, 1:-1] - y[0, 1:-1]) * i / (n - 1)
# Slitting (割缝)
x[:, 0], x[:, -1] = x[:, -3], x[:, 2]
y[:, 0], y[:, -1] = y[:, -3], y[:, 2]
return x, y
def gridGenerate(self):
# Generate grid using simple iteration
# Initial variables
n = self.N
alpha = np.zeros((n, n+2))
beta = np.zeros((n, n+2))
gamma = np.zeros((n, n+2))
x1 = np.zeros((n, n+2))
x2 = np.zeros((n, n+2))
y1 = np.zeros((n, n+2))
y2 = np.zeros((n, n+2))
x, y = self.initialGrid()
for i in range(0, iterlimit):
xs, ys = x.copy(), y.copy() # Very important point, with no np.copy(), xs will change with x
x1[1:-1, 1:-1] = (xs[2:, 1:-1] - xs[0:-2, 1:-1]) / 2.0
y1[1:-1, 1:-1] = (ys[2:, 1:-1] - ys[0:-2, 1:-1]) / 2.0
x2[1:-1, 1:-1] = (xs[1:-1, 2:] - xs[1:-1, 0:-2]) / 2.0
y2[1:-1, 1:-1] = (ys[1:-1, 2:] - ys[1:-1, 0:-2]) / 2.0
# Calculate alpha, beta and gamma
alpha[1:-1, 1:-1] = x1[1:-1, 1:-1] ** 2 + y1[1:-1, 1:-1] ** 2
beta[1:-1, 1:-1] = x2[1:-1, 1:-1] * x1[1:-1, 1:-1] + y2[1:-1, 1:-1] * y1[1:-1, 1:-1]
gamma[1:-1, 1:-1] = x2[1:-1, 1:-1] ** 2 + y2[1:-1, 1:-1] ** 2
# Using simple iteration equation
x[1:-1, 1:-1] = simpleIteration(alpha, beta, gamma, xs)
y[1:-1, 1:-1] = simpleIteration(alpha, beta, gamma, ys)
# Slitting
x[:, 0], x[:, -1] = x[:, -3], x[:, 2]
y[:, 0], y[:, -1] = y[:, -3], y[:, 2]
if isConverg(x - xs, y - ys, self.errlimit):
return x, y
elif i == (iterlimit -1):
return x, y
def gridExport(self, x, y):
# Export grid to a dat file
np.savetxt('Result' + os.sep + 'o-grid.dat', np.transpose([x[:, 1:-1].flatten(), y[:, 1:-1].flatten()]), \
header='variables=x,y\nzone i=101,j=101,F=POINT', delimiter='\t', comments='')
class GridSolver():
'''
This is a o-grid solver with:
- Grid coordinates input
- Freestream velocity input
- Phi, u, v, Cp output
'''
def __init__(self, x, y, Vf, errlimit, iterlimit):
self.x = x
self.y = y
self.Vf = Vf
self.errlimit = errlimit
self.iterlimit = iterlimit
def phiSolver(self):
Vf = self.Vf
errlimit = self.errlimit
iterlimit = self.iterlimit
x = self.x
y = self.y
phi, phii, ak = 0*x.copy(), 0*x.copy(), 0*x.copy()
phi[:, 1:-1] = Vf * x[:, 1:-1]
for i in range(0, iterlimit):
phi[:, 0], phi[:, -1] = phi[:, -3], phi[:, 2]
phis = phi.copy()
x1 = (x[0, 2] - x[0, 0]) / 2.0
y1 = (y[0, 2] - y[0, 0]) / 2.0
x2 = (-x[2, 1] + 4.0 * x[1, 1] - 3.0 * x[0, 1]) / 2.0
y2 = (-y[2, 1] + 4.0 * y[1, 1] - 3.0 * y[0, 1]) / 2.0
phi[0, 1] = (4 * phi[1, 1] - phi[2, 1] - (y2 - x2) / (y1 - x1) * (phi[0, 2] - phi[0, 0])) / 3.0
phi[0, -1] = phi[0, 1]
x1 = (x[0, 3:-1] - x[0, 1:-3]) / 2.0
y1 = (y[0, 3:-1] - y[0, 1:-3]) / 2.0
x2 = (-x[2, 2:-2] + 4.0 * x[1, 2:-2] - 3.0 * x[0, 2:-2]) / 2.0
y2 = (-y[2, 2:-2] + 4.0 * y[1, 2:-2] - 3.0 * y[0, 2:-2]) / 2.0
phii[0, 1:-3] = (phi[0, 3:-1] - phi[0, 1:-3]) / 2.0
ak[0, 1:-3] = (x1 * x2 + y1 * y2) / (x1 ** 2 + y1 ** 2)
phi[0, 2:-2] = - (2.0 * phii[0, 1:-3] * ak[0, 1:-3] - 4.0 * phi[1, 2:-2] + phi[2, 2:-2]) / 3.0
phi[1:-1, 0], phi[1:-1, -1] = phi[1:-1, -3], phi[1:-1, 2]
alpha = ((x[2:, 1:-1] - x[0:-2, 1:-1]) ** 2 + (y[2:, 1:-1] - y[0:-2, 1:-1]) ** 2) / 4.0
beta = -(((x[2:, 1:-1] - x[0:-2, 1:-1]) * (x[1:-1, 2:] - x[1:-1, 0:-2])) + \
((y[2:, 1:-1] - y[0:-2, 1:-1]) * (y[1:-1, 2:] - y[1:-1, 0:-2]))) / 4.0
gamma = ((x[1:-1, 2:] - x[1:-1, 0:-2]) ** 2 + (y[1:-1, 2:] - y[1:-1, 0:-2]) ** 2) / 4.0
phi[1:-1, 1:-1] = 0.5 * (alpha * (phi[1:-1, 2:] + phi[1:-1, 0:-2]) + gamma * \
(phi[2:, 1:-1] + phi[0:-2, 1:-1]) + 0.5 * beta * (phi[2:, 2:] + phi[0:-2, 0:-2] -\
phi[0:-2, 2:] - phi[2:, 0:-2])) / (alpha + gamma)
if (
|
np.abs((phi - phis)[1:-1,1:-1])
|
numpy.abs
|
"""module for complete independent games"""
import functools
import itertools
import numpy as np
from gameanalysis import rsgame
from gameanalysis import utils
class _MatrixGame(rsgame._CompleteGame): # pylint: disable=protected-access
"""Matrix game representation
This represents a complete independent game more compactly than a Game, but
only works for complete independent games.
Parameters
----------
role_names : (str,)
The name of each role.
strat_names : ((str,),)
The name of each strategy per role.
payoff_matrix : ndarray
The matrix of payoffs for an asymmetric game. The last axis is the
payoffs for each player, the first axes are the strategies for each
player. matrix.shape[:-1] must correspond to the number of strategies
for each player. matrix.ndim - 1 must equal matrix.shape[-1].
"""
def __init__(self, role_names, strat_names, payoff_matrix):
super().__init__(role_names, strat_names, np.ones(len(role_names), int))
self._payoff_matrix = payoff_matrix
self._payoff_matrix.setflags(write=False)
self._prof_offset = np.zeros(self.num_strats, int)
self._prof_offset[self.role_starts] = 1
self._prof_offset.setflags(write=False)
self._payoff_view = self._payoff_matrix.view()
self._payoff_view.shape = (self.num_profiles, self.num_roles)
def payoff_matrix(self):
"""Return the payoff matrix"""
return self._payoff_matrix.view()
@utils.memoize
def min_strat_payoffs(self):
"""Returns the minimum payoff for each role"""
mpays = np.empty(self.num_strats)
for role, (pays, min_pays, strats) in enumerate(
zip(
np.rollaxis(self._payoff_matrix, -1),
np.split(mpays, self.role_starts[1:]),
self.num_role_strats,
)
):
np.rollaxis(pays, role).reshape((strats, -1)).min(1, min_pays)
mpays.setflags(write=False)
return mpays
@utils.memoize
def max_strat_payoffs(self):
"""Returns the minimum payoff for each role"""
mpays = np.empty(self.num_strats)
for role, (pays, max_pays, strats) in enumerate(
zip(
np.rollaxis(self._payoff_matrix, -1),
np.split(mpays, self.role_starts[1:]),
self.num_role_strats,
)
):
np.rollaxis(pays, role).reshape((strats, -1)).max(1, max_pays)
mpays.setflags(write=False)
return mpays
@functools.lru_cache(maxsize=1)
def payoffs(self):
profiles = self.profiles()
payoffs = np.zeros(profiles.shape)
payoffs[profiles > 0] = self._payoff_matrix.flat
return payoffs
def compress_profile(self, profile):
"""Compress profile in array of ints
Normal profiles are an array of number of players playing a strategy.
Since matrix games always have one player per role, this compresses
each roles counts into a single int representing the played strategy
per role.
"""
utils.check(self.is_profile(profile).all(), "must pass vaid profiles")
profile = np.asarray(profile, int)
return np.add.reduceat(
np.cumsum(self._prof_offset - profile, -1), self.role_starts, -1
)
def uncompress_profile(self, comp_prof):
"""Uncompress a profile"""
comp_prof = np.asarray(comp_prof, int)
utils.check(
np.all(comp_prof >= 0) and np.all(comp_prof < self.num_role_strats),
"must pass valid compressed profiles",
)
profile = np.zeros(comp_prof.shape[:-1] + (self.num_strats,), int)
inds = (
comp_prof.reshape((-1, self.num_roles))
+ self.role_starts
+ self.num_strats * np.arange(int(np.prod(comp_prof.shape[:-1])))[:, None]
)
profile.flat[inds] = 1
return profile
def get_payoffs(self, profiles):
"""Returns an array of profile payoffs"""
profiles = np.asarray(profiles, int)
ids = self.profile_to_id(profiles)
payoffs = np.zeros_like(profiles, float)
payoffs[profiles > 0] = self._payoff_view[ids].flat
return payoffs
def deviation_payoffs(
self, mixture, *, jacobian=False, **_
): # pylint: disable=too-many-locals
"""Computes the expected value of each pure strategy played against all
opponents playing mix.
Parameters
----------
mixture : ndarray
The mix all other players are using
jacobian : bool
If true, the second returned argument will be the jacobian of the
deviation payoffs with respect to the mixture. The first axis is
the deviating strategy, the second axis is the strategy in the mix
the jacobian is taken with respect to.
"""
rmixes = []
for role, rmix in enumerate(np.split(mixture, self.role_starts[1:])):
shape = [1] * self.num_roles
shape[role] = -1
rmixes.append(rmix.reshape(shape))
devpays = np.empty(self.num_strats)
for role, (out, strats) in enumerate(
zip(np.split(devpays, self.role_starts[1:]), self.num_role_strats)
):
pays = self._payoff_matrix[..., role].copy()
for rmix in (m for r, m in enumerate(rmixes) if r != role):
pays *= rmix
np.rollaxis(pays, role).reshape((strats, -1)).sum(1, out=out)
if not jacobian:
return devpays
jac = np.zeros((self.num_strats, self.num_strats))
for role, (jout, rstrats) in enumerate(
zip(np.split(jac, self.role_starts[1:]), self.num_role_strats)
):
for dev, (out, dstrats) in enumerate(
zip(np.split(jout, self.role_starts[1:], 1), self.num_role_strats)
):
if role == dev:
continue
pays = self._payoff_matrix[..., role].copy()
for rmix in (m for r, m in enumerate(rmixes) if r not in {role, dev}):
pays *= rmix
np.rollaxis(np.rollaxis(pays, role), dev + (role > dev), 1).reshape(
(rstrats, dstrats, -1)
).sum(2, out=out)
return devpays, jac
def restrict(self, restriction):
base = rsgame.empty_copy(self).restrict(restriction)
matrix = self._payoff_matrix
for i, mask in enumerate(np.split(restriction, self.role_starts[1:])):
matrix = matrix[(slice(None),) * i + (mask,)]
return _MatrixGame(base.role_names, base.strat_names, matrix.copy())
def _add_constant(self, constant):
return _MatrixGame(
self.role_names, self.strat_names, self._payoff_matrix + constant
)
def _multiply_constant(self, constant):
return _MatrixGame(
self.role_names, self.strat_names, self._payoff_matrix * constant
)
def _add_game(self, othr):
if not othr.is_complete():
return NotImplemented
try:
othr_mat = othr.payoff_matrix()
except AttributeError:
othr_mat = othr.get_payoffs(self.all_profiles())[
self.all_profiles() > 0
].reshape(self._payoff_matrix.shape)
return _MatrixGame(
self.role_names, self.strat_names, self._payoff_matrix + othr_mat
)
def _mat_to_json(self, matrix, role_index):
"""Convert a sub matrix into json representation"""
if role_index == self.num_roles:
return {role: float(pay) for role, pay in zip(self.role_names, matrix)}
strats = self.strat_names[role_index]
role_index += 1
return {
strat: self._mat_to_json(mat, role_index)
for strat, mat in zip(strats, matrix)
}
def to_json(self):
res = super().to_json()
res["payoffs"] = self._mat_to_json(self._payoff_matrix, 0)
res["type"] = "matrix.1"
return res
@utils.memoize
def __hash__(self):
return super().__hash__()
def __eq__(self, othr):
# pylint: disable-msg=protected-access
return (
super().__eq__(othr)
and
# Identical payoffs
np.allclose(self._payoff_matrix, othr._payoff_matrix)
)
def __repr__(self):
return "{}({})".format(self.__class__.__name__[1:], self.num_role_strats)
def matgame(payoff_matrix):
"""Create a game from a dense matrix with default names
Parameters
----------
payoff_matrix : ndarray-like
The matrix of payoffs for an asymmetric game.
"""
payoff_matrix = np.ascontiguousarray(payoff_matrix, float)
return matgame_replace(
rsgame.empty(
np.ones(payoff_matrix.ndim - 1, int),
|
np.array(payoff_matrix.shape[:-1], int)
|
numpy.array
|
import numpy as np
import pandas as pd
import random
import sys
import os
import math
import copy
import pickle
import pyclustering
from pyclustering.cluster import xmeans
from matplotlib import pyplot as plt
import seaborn as sns
sns.set_style(style="whitegrid")
import warnings
warnings.filterwarnings('ignore')
if int(sys.argv[1])==0:
if not os.path.exists("./figs"):
os.mkdir("./figs")
cluster_ls=[]
for i in range(50):
cluster_ls.append(i*10)
def cluster(data):
res=0
structure=0
data=np.array(data)
clans=[]
clusters=[]
structure_path=[]
for descent in [0,1]:
try:
init_center = xmeans.kmeans_plusplus_initializer(data[data[:,0]==descent], 2).initialize()
xm = xmeans.xmeans(data[data[:,0]==descent], init_center, ccore=False)
xm.process()
sizes = [len(cluster) for cluster in xm.get_clusters()]
centers=xm.get_centers()
clusters_candidate=xm.get_clusters()
for i in range(len(sizes)):
if sizes[i]>num_lineage/10:
clans.append(centers[i])
clusters.append(clusters_candidate[i])
# clans=xm.get_centers()
except:
continue
if len(clans)>0:
num_clans=len(clans)
clan_ls=[]
for i in range(num_clans):
mate=0
child=0
cur_mate=100
cur_child=100
for j in range(num_clans):
mate_cur=(clans[i][3]-clans[j][1])**2+(clans[i][4]-clans[j][2])**2
if mate_cur<cur_mate:
mate=j
cur_mate=mate_cur
if clans[i][0]==0:
clan_ls.append([i,mate,[i]])
else:
clan_ls.append([i,mate,[]])
for i in range(len(clan_ls)):
if clan_ls[i][2]==[]:
mates=[mate for mate in clan_ls if mate[1]==i]
children=[]
for mate in mates:
cur_child=100
for j in range(num_clans):
if clans[j][0]==1:
child_cur=(clans[i][1]-clans[j][1])**2+(clans[mate[0]][2]-clans[j][2])**2
if child_cur<cur_child:
child=j
cur_child=child_cur
children.append(child)
clan_ls[i][2]=children
counter=1
for clan in clan_ls:
if len(clan[2])==1:
clan[2]=clan[2][0]
elif len(clan[2])>1:
counter=counter*len(clan[2])
clan_ls_ls=[]
clan_ls_ori=copy.deepcopy(clan_ls[:])
for i in range(counter):
clan_ls=copy.deepcopy(clan_ls_ori[:])
for clan in clan_ls:
if type(clan[2])!=type(0):
if len(clan[2])==0:
clan[2]=-1
else:
clan[2]=clan[2][counter%len(clan[2])]
clan_ls_ls.append(clan_ls)
cur_man_cycle=0
cur_cycle=0
cur_woman_cycle=0
num_clans=0
for clan_ls in clan_ls_ls:
candidate=list(range(len(clans)))
while len(candidate)>0:
marriage_path=[]
cur=candidate[-1]
man_path=[cur]
vill_ls=[]
while True:
next=clan_ls[cur][2]
if next in man_path:
man_path=man_path[man_path.index(next):]
break
elif next == -1:
break
else:
man_path.append(next)
cur=next
cur_woman_cycle_cur=0
for clan in man_path:
if clan not in marriage_path:
cur_path=[clan]
cur=clan
while True:
next=clan_ls[clan_ls[cur][1]][2]
if next in cur_path:
marriage_path.extend(cur_path[cur_path.index(next):])
if len(cur_path[cur_path.index(next):])>cur_woman_cycle_cur:
cur_woman_cycle_cur=len(cur_path[cur_path.index(next):])
break
elif next == -1:
break
else:
cur_path.append(next)
cur=next
marriage_path=list(set(marriage_path))
candidate.pop()
for man in man_path:
if man not in marriage_path:
man_path.remove(man)
if man in candidate:
candidate.remove(man)
if len(marriage_path)>cur_cycle:
structure_path=marriage_path
cur_cycle=len(marriage_path)
cur_man_cycle=len(man_path)
cur_woman_cycle=cur_woman_cycle_cur
elif len(marriage_path)==cur_cycle and len(man_path)>cur_man_cycle:
structure_path=marriage_path
cur_cycle=len(marriage_path)
cur_man_cycle=len(man_path)
cur_woman_cycle=cur_woman_cycle_cur
elif len(marriage_path)==cur_cycle and len(man_path)==cur_man_cycle and cur_woman_cycle_cur>cur_woman_cycle:
structure_path=marriage_path
cur_cycle=len(marriage_path)
cur_man_cycle=len(man_path)
cur_woman_cycle=cur_woman_cycle_cur
else:
continue
rest=0
for man in marriage_path:
if len(clan_ls[man])==len(set(clan_ls[man])):
rest+=1
if rest>=cur_cycle/2:
rest=1
else:
rest=0
clan_ls=np.array(clan_ls)
ind_ls=[clan_ls[i,2] for i in list(set(clan_ls[:,1]))]
clan_ls=[list(clan) for clan in clan_ls if clan[0] in ind_ls]
if len(clan_ls)>num_clans:
num_clans=len(clan_ls)
if cur_cycle*cur_man_cycle*cur_woman_cycle!=0:
if cur_woman_cycle>1 and cur_man_cycle>1 and cur_cycle>3:
structure=4
elif cur_cycle==1:
structure=1
elif cur_cycle<=num_clans/3:
structure=5
elif cur_cycle==2:
structure=2
elif cur_woman_cycle>2 or cur_man_cycle>2:
structure=3
else:
structure=6
# structures=["dead","incest", "dual", "generalized", "restricted", "vill division", "others"]
res=[cur_cycle,cur_man_cycle,cur_woman_cycle,rest]
return [structure,res,np.array(clusters)[structure_path],np.array(clans)[structure_path]]
class Village:
def __init__(self):
self.lineages=[]
self.population=0
class Lineage:
def __init__(self,trait,preference,descent,num_couple):
self.trait=trait
self.preference=preference
self.couple=num_couple
self.man=0
self.woman=0
self.father=trait
self.descent=descent
self.candidate=[]
def year(vill):
vill.population=0
lineages=[lineage for lineage in vill.lineages if lineage.couple>0]
traits=np.array([lineage.trait for lineage in lineages])
fathers=np.array([lineage.father for lineage in lineages])
preferences=np.array([lineage.preference for lineage in lineages])
for lineage in lineages:
distance=np.array([np.sum((traits-lineage.trait)**2,axis=1),np.sum((traits-lineage.preference)**2,axis=1),np.sum((preferences-lineage.trait)**2,axis=1),np.sum((fathers-lineage.trait)**2,axis=1),np.sum((traits-lineage.father)**2,axis=1)]).min(axis=0)
friend=np.sum(np.exp(-distance))/len(lineages)
rate=1/(1+coop*(1-friend))
couple=birth*lineage.couple
lineage.man=round(np.random.poisson(lam=couple)*rate)
lineage.woman=round(np.random.poisson(lam=couple)*rate)
lineage.couple=0
for lineage in lineages:
if min(lineage.man,lineage.woman)>2*initial_pop:
n=math.floor(math.log2(min(lineage.man,lineage.woman)/initial_pop))
lineage.man=round(lineage.man/2**n)
lineage.woman=round(lineage.woman/2**n)
for i in [0]*(2**n-1):
lineages.append(Lineage(np.copy(lineage.trait),np.copy(lineage.preference),lineage.descent,0))
lineages[-1].man=lineage.man
lineages[-1].woman=lineage.woman
lineages=[lineage for lineage in lineages if lineage.man*lineage.woman>0]
for lineage in lineages:
if random.random()<descent_mut:
lineage.descent=(lineage.descent+1)%2
lineage.trait+=np.random.uniform(-mutation,mutation,2)
lineage.preference+=np.random.uniform(-mutation,mutation,2)
preferences=np.array([lineage.preference for lineage in lineages])
for lineage in lineages:
enemy=np.sum(np.exp(-np.sum((preferences-lineage.preference)**2,axis=1)))/len(lineages)
rate=1/(1+conflict*enemy)
lineage.man=round(lineage.man*rate)
lineage.woman=round(lineage.woman*rate)
vill.population+=lineage.man+lineage.woman
vill.lineages=lineages[:]
def mating(vill):
lineages=vill.lineages
mates=np.array([mate.preference for mate in lineages])
for lineage in lineages:
if lineage.man>0:
dist=np.exp(-np.sum((mates-lineage.trait)**2,axis=1))
dist=dist/np.sum(dist)
mate = np.random.choice(lineages, p=dist)
mate.candidate.append(lineage)
for mate in lineages:
if mate.woman<1 or len(mate.candidate)==0:
mate.candidate=[]
continue
random.shuffle(mate.candidate)
for lineage in mate.candidate:
if mate.woman<1:
break
couple=min(lineage.man,mate.woman)
lineage.man-=couple
mate.woman-=couple
if lineage.descent==1:
lineages.append(Lineage(
|
np.array([lineage.trait[0],mate.trait[1]])
|
numpy.array
|
import tensorflow as tf
import numpy as np
import os.path
from cossmo.data_pipeline import convert_to_tf_example, \
read_single_cossmo_example, dynamic_bucket_data_pipeline
from cossmo.scoring_network import ScoringNetwork
from cossmo.output_networks import RaggedOutputNetwork
SEQ_LEN = 20
def sample_sequence(len):
return ''.join(['ACGT'[k] for k in np.random.randint(0, 3, len)])
class TestDataPipeline(tf.test.TestCase):
@staticmethod
def sample_example(n_alt_ss, event_type=None):
if event_type is None:
event_type = ['acceptor', 'donor'][np.random.randint(0, 1)]
assert event_type in ('acceptor', 'donor')
const_exonic_seq = sample_sequence(SEQ_LEN)
const_intronic_seq = sample_sequence(SEQ_LEN)
alt_exonic_seq = [sample_sequence(SEQ_LEN) for _ in range(n_alt_ss)]
alt_intronic_seq = [sample_sequence(SEQ_LEN)
for _ in range(n_alt_ss)]
psi_distribution = np.random.rand(n_alt_ss, 1)
psi_distribution /= psi_distribution.sum(0, keepdims=True)
psi_std = np.random.rand(n_alt_ss, 1)
const_site_id = str(np.random.randint(1000))
const_site_pos = np.random.randint(100000)
alt_ss_position =
|
np.random.randint(0, 100000, n_alt_ss)
|
numpy.random.randint
|
import os
import gdal
import numpy as np
import warnings
def main(input_folder, output_folder, Date):
# Do not show warnings
warnings.filterwarnings('ignore')
import pyWAPOR.ETLook as ETLook
import pyWAPOR.Functions.Processing_Functions as PF
import pyWAPOR.ETLook.outputs as out
# Define Date string
Date_str = "%d%02d%02d" %(Date.year, Date.month, Date.day)
# Input folder Date
input_folder_date = os.path.join(input_folder, Date_str)
############################ Define inputs ################################
#input_files
ALBEDO_filename = os.path.join(input_folder_date, "ALBEDO_%s.tif" %Date_str)
NDVI_filename = os.path.join(input_folder_date, "NDVI_%s.tif" %Date_str)
LST_filename = os.path.join(input_folder_date, "LST_%s.tif" %Date_str)
Time_filename = os.path.join(input_folder_date, "Time_%s.tif" %Date_str)
Lat_filename = os.path.join(input_folder_date, "Lat_%s.tif" %Date_str)
Lon_filename = os.path.join(input_folder_date, "Lon_%s.tif" %Date_str)
DEM_filename = os.path.join(input_folder_date, "DEM.tif")
Slope_filename = os.path.join(input_folder_date, "Slope.tif")
Aspect_filename = os.path.join(input_folder_date, "Aspect.tif")
LandMask_filename = os.path.join(input_folder_date, "LandMask.tif")
Bulk_filename =os.path.join(input_folder_date, "Bulk_Stomatal_resistance.tif")
MaxObs_filename = os.path.join(input_folder_date, "Maximum_Obstacle_Height.tif")
Pair_24_0_filename = os.path.join(input_folder_date, "Pair_24_0_%s.tif" %Date_str)
Pair_inst_0_filename = os.path.join(input_folder_date, "Pair_inst_0_%s.tif" %Date_str)
Pair_inst_filename = os.path.join(input_folder_date, "Pair_inst_%s.tif" %Date_str)
Pre_filename = os.path.join(input_folder_date, "Precipitation_%s.tif" %Date_str)
Hum_24_filename = os.path.join(input_folder_date, "qv_24_%s.tif" %Date_str)
Hum_inst_filename = os.path.join(input_folder_date, "qv_inst_%s.tif" %Date_str)
Tair_24_filename = os.path.join(input_folder_date, "tair_24_%s.tif" %Date_str)
Tair_inst_filename = os.path.join(input_folder_date,"tair_inst_%s.tif" %Date_str)
Tair_amp_filename = os.path.join(input_folder_date, "Tair_amp_%s.tif" %Date_str)
Wind_24_filename = os.path.join(input_folder_date, "wind_24_%s.tif" %Date_str)
Wind_inst_filename = os.path.join(input_folder_date, "wind_inst_%s.tif" %Date_str)
WatCol_inst_filename = os.path.join(input_folder_date, "wv_inst_%s.tif" %Date_str)
Trans_24_filename = os.path.join(input_folder_date, "Trans_24_%s.tif" %Date_str)
############################ Define outputs ###############################
# Output folder Date
output_folder_date = os.path.join(output_folder, Date_str)
if not os.path.exists(output_folder_date):
os.makedirs(output_folder_date)
#output_files
vc_filename = os.path.join(output_folder_date, "vc_%s.tif" %Date_str)
lai_filename = os.path.join(output_folder_date, "LAI_%s.tif" %Date_str)
lai_eff_filename= os.path.join(output_folder_date, "LAI_eff_%s.tif" %Date_str)
sf_soil_filename = os.path.join(output_folder_date, "sf_soil_%s.tif" %Date_str)
lat_filename= os.path.join(output_folder_date, "lat_%s.tif" %Date_str)
slope_filename= os.path.join(output_folder_date, "slope_%s.tif" %Date_str)
aspect_filename = os.path.join(output_folder_date, "aspect_%s.tif" %Date_str)
ra_24_toa_filename = os.path.join(output_folder_date, "ra_24_toa_%s.tif" %Date_str)
ws_filename = os.path.join(output_folder_date, "ws_%s.tif" %Date_str)
diffusion_index_filename = os.path.join(output_folder_date, "diffusion_index_%s.tif" %Date_str)
ra_24_filename = os.path.join(output_folder_date, "ra_24_%s.tif" %Date_str)
stress_rad_filename = os.path.join(output_folder_date, "stress_rad_%s.tif" %Date_str)
p_air_24_filename = os.path.join(output_folder_date, "p_air_24_%s.tif" %Date_str)
vp_24_filename = os.path.join(output_folder_date, "vp_24_%s.tif" %Date_str)
svp_24_filename = os.path.join(output_folder_date, "svp_24_%s.tif" %Date_str)
vpd_24_filename = os.path.join(output_folder_date, "vpd_24_%s.tif" %Date_str)
stress_vpd_filename = os.path.join(output_folder_date, "stress_vpd_%s.tif" %Date_str)
stress_temp_filename = os.path.join(output_folder_date, "stress_temp_%s.tif" %Date_str)
r_canopy_0_filename= os.path.join(output_folder_date, "r_canopy_0_%s.tif" %Date_str)
t_air_k_24_filename = os.path.join(output_folder_date, "t_air_k_24_%s.tif" %Date_str)
l_net_filename = os.path.join(output_folder_date, "l_net_%s.tif" %Date_str)
int_mm_filename = os.path.join(output_folder_date, "int_mm_%s.tif" %Date_str)
lh_24_filename = os.path.join(output_folder_date, "lh_24_%s.tif" %Date_str)
int_wm2_filename = os.path.join(output_folder_date, "int_wm2_%s.tif" %Date_str)
rn_24_filename = os.path.join(output_folder_date, "rn_24_%s.tif" %Date_str)
rn_24_canopy_filename= os.path.join(output_folder_date, "rn_24_canopy_%s.tif" %Date_str)
t_air_k_i_filename = os.path.join(output_folder_date, "t_air_k_i_%s.tif" %Date_str)
vp_i_filename = os.path.join(output_folder_date, "vp_i_%s.tif" %Date_str)
ad_moist_i_filename= os.path.join(output_folder_date, "ad_moist_i_%s.tif" %Date_str)
ad_dry_i_filename = os.path.join(output_folder_date, "ad_dry_i_%s.tif" %Date_str)
ad_i_filename= os.path.join(output_folder_date, "ad_i_%s.tif" %Date_str)
u_b_i_bare_filename= os.path.join(output_folder_date, "u_b_i_bare_%s.tif" %Date_str)
lon_filename= os.path.join(output_folder_date, "lon_%s.tif" %Date_str)
ha_filename= os.path.join(output_folder_date, "ha_%s.tif" %Date_str)
ied_filename= os.path.join(output_folder_date, "ied_%s.tif" %Date_str)
h0_filename = os.path.join(output_folder_date, "h0_%s.tif" %Date_str)
h0ref_filename = os.path.join(output_folder_date, "h0ref_%s.tif" %Date_str)
m_filename = os.path.join(output_folder_date, "m_%s.tif" %Date_str)
rotm_filename = os.path.join(output_folder_date, "rotm_%s.tif" %Date_str)
Tl2_filename = os.path.join(output_folder_date, "Tl2_%s.tif" %Date_str)
B0c_filename = os.path.join(output_folder_date, "B0c_%s.tif" %Date_str)
Bhc_filename = os.path.join(output_folder_date, "Bhc_%s.tif" %Date_str)
Dhc_filename = os.path.join(output_folder_date, "Dhc_%s.tif" %Date_str)
ra_hor_clear_i_filename = os.path.join(output_folder_date, "ra_hor_clear_i_%s.tif" %Date_str)
emiss_atm_i_filename = os.path.join(output_folder_date, "emiss_atm_i_%s.tif" %Date_str)
rn_bare_filename = os.path.join(output_folder_date, "rn_bare_%s.tif" %Date_str)
rn_full_filename= os.path.join(output_folder_date, "rn_full_%s.tif" %Date_str)
u_b_i_full_filename = os.path.join(output_folder_date, "u_b_i_full_%s.tif" %Date_str)
u_star_i_bare_filename = os.path.join(output_folder_date, "u_star_i_bare_%s.tif" %Date_str)
u_star_i_full_filename = os.path.join(output_folder_date, "u_star_i_full_%s.tif" %Date_str)
u_i_soil_filename = os.path.join(output_folder_date, "u_i_soil_%s.tif" %Date_str)
ras_filename = os.path.join(output_folder_date, "ras_%s.tif" %Date_str)
raa_filename = os.path.join(output_folder_date, "raa_%s.tif" %Date_str)
rac_filename= os.path.join(output_folder_date, "rac_%s.tif" %Date_str)
t_max_bare_filename = os.path.join(output_folder_date, "t_max_bare_%s.tif" %Date_str)
t_max_full_filename= os.path.join(output_folder_date, "t_max_full_%s.tif" %Date_str)
w_i_filename = os.path.join(output_folder_date, "w_i_%s.tif" %Date_str)
t_dew_i_filename = os.path.join(output_folder_date, "t_dew_i_%s.tif" %Date_str)
t_wet_i_filename = os.path.join(output_folder_date, "t_wet_i_%s.tif" %Date_str)
t_wet_k_i_filename = os.path.join(output_folder_date, "t_wet_k_i_%s.tif" %Date_str)
lst_max_filename = os.path.join(output_folder_date, "lst_max_%s.tif" %Date_str)
se_root_filename = os.path.join(output_folder_date, "se_root_%s.tif" %Date_str)
stress_moist_filename= os.path.join(output_folder_date, "stress_moist_%s.tif" %Date_str)
r_canopy_0_filename= os.path.join(output_folder_date, "r_canopy_0_%s.tif" %Date_str)
r_canopy_filename= os.path.join(output_folder_date, "r_canopy_%s.tif" %Date_str)
z_obst_filename = os.path.join(output_folder_date, "z_obst_%s.tif" %Date_str)
z_oro_filename = os.path.join(output_folder_date, "z_oro_%s.tif" %Date_str)
z0m_filename = os.path.join(output_folder_date, "z0m_%s.tif" %Date_str)
ra_canopy_init_filename = os.path.join(output_folder_date, "ra_canopy_init_%s.tif" %Date_str)
u_b_24_filename = os.path.join(output_folder_date, "u_b_24_%s.tif" %Date_str)
disp_filename = os.path.join(output_folder_date, "disp_%s.tif" %Date_str)
u_star_24_init_filename = os.path.join(output_folder_date, "u_star_24_init_%s.tif" %Date_str)
ad_dry_24_filename = os.path.join(output_folder_date, "ad_dry_24_%s.tif" %Date_str)
ad_moist_24_filename = os.path.join(output_folder_date, "ad_moist_24_%s.tif" %Date_str)
ad_24_filename = os.path.join(output_folder_date, "ad_24_%s.tif" %Date_str)
psy_24_filename = os.path.join(output_folder_date, "psy_24_%s.tif" %Date_str)
ssvp_24_filename = os.path.join(output_folder_date, "ssvp_24_%s.tif" %Date_str)
t_24_init_filename = os.path.join(output_folder_date, "t_24_init_%s.tif" %Date_str)
h_canopy_24_init_filename= os.path.join(output_folder_date, "h_canopy_24_init_%s.tif" %Date_str)
t_24_filename= os.path.join(output_folder_date, "t_24_%s.tif" %Date_str)
t_24_mm_filename= os.path.join(output_folder_date, "t_24_mm_%s.tif" %Date_str)
sf_soil_filename= os.path.join(output_folder_date, "sf_soil_%s.tif" %Date_str)
rn_24_soil_filename= os.path.join(output_folder_date, "rn_24_soil_%s.tif" %Date_str)
r_soil_filename= os.path.join(output_folder_date, "r_soil_%s.tif" %Date_str)
ra_soil_init_filename= os.path.join(output_folder_date, "ra_soil_init_%s.tif" %Date_str)
u_b_24_filename= os.path.join(output_folder_date, "u_b_24_%s.tif" %Date_str)
u_star_24_soil_init_filename= os.path.join(output_folder_date, "u_star_24_soil_init_%s.tif" %Date_str)
g0_bs_filename= os.path.join(output_folder_date, "g0_bs_%s.tif" %Date_str)
g0_24_filename= os.path.join(output_folder_date, "g0_24_%s.tif" %Date_str)
e_24_init_filename= os.path.join(output_folder_date, "e_24_init_%s.tif" %Date_str)
h_soil_24_init_filename= os.path.join(output_folder_date, "h_soil_24_init_%s.tif" %Date_str)
e_24_filename= os.path.join(output_folder_date, "e_24_%s.tif" %Date_str)
e_24_mm_filename= os.path.join(output_folder_date, "e_24_mm_%s.tif" %Date_str)
et_24_mm_filename= os.path.join(output_folder_date, "et_24_mm_%s.tif" %Date_str)
rn_24_grass_filename= os.path.join(output_folder_date, "rn_24_grass_%s.tif" %Date_str)
et_ref_24_filename= os.path.join(output_folder_date, "et_ref_24_%s.tif" %Date_str)
et_ref_24_mm_filename= os.path.join(output_folder_date, "et_ref_24_mm_%s.tif" %Date_str)
########################## Open input rasters #############################
dest_lst = gdal.Open(LST_filename)
lst = dest_lst.GetRasterBand(1).ReadAsArray()
lst[lst == -9999] = np.nan
dest_albedo = gdal.Open(ALBEDO_filename)
r0 = dest_albedo.GetRasterBand(1).ReadAsArray()
r0[np.isnan(lst)] = np.nan
dest_ndvi = gdal.Open(NDVI_filename)
ndvi = dest_ndvi.GetRasterBand(1).ReadAsArray()
ndvi[np.isnan(lst)] = np.nan
desttime = gdal.Open(Time_filename)
dtime = desttime.GetRasterBand(1).ReadAsArray()
dtime[np.isnan(lst)] = np.nan
dest_lat = gdal.Open(Lat_filename)
lat_deg = dest_lat.GetRasterBand(1).ReadAsArray()
lat_deg[np.isnan(lst)] = np.nan
dest_lon = gdal.Open(Lon_filename)
lon_deg = dest_lon.GetRasterBand(1).ReadAsArray()
lon_deg[np.isnan(lst)] = np.nan
dest_dem = gdal.Open(DEM_filename)
z = dest_dem.GetRasterBand(1).ReadAsArray()
z[np.isnan(lst)] = np.nan
dest_slope = gdal.Open(Slope_filename)
slope_deg = dest_slope.GetRasterBand(1).ReadAsArray()
slope_deg[np.isnan(lst)] = np.nan
dest_aspect = gdal.Open(Aspect_filename)
aspect_deg = dest_aspect.GetRasterBand(1).ReadAsArray()
aspect_deg[np.isnan(lst)] = np.nan
dest_lm = gdal.Open(LandMask_filename)
land_mask = dest_lm.GetRasterBand(1).ReadAsArray()
land_mask[np.isnan(lst)] = np.nan
#dest_bulk = gdal.Open(Bulk_filename)
#bulk = dest_bulk.GetRasterBand(1).ReadAsArray()
dest_maxobs = gdal.Open(MaxObs_filename)
z_obst_max = dest_maxobs.GetRasterBand(1).ReadAsArray()
z_obst_max[np.isnan(lst)] = np.nan
dest_pairsea24 = gdal.Open(Pair_24_0_filename)
p_air_0_24 = dest_pairsea24.GetRasterBand(1).ReadAsArray()
p_air_0_24 = ETLook.meteo.air_pressure_kpa2mbar(p_air_0_24)
p_air_0_24[np.isnan(lst)] = np.nan
dest_pairseainst = gdal.Open(Pair_inst_0_filename)
p_air_0_i = dest_pairseainst.GetRasterBand(1).ReadAsArray()
p_air_0_i = ETLook.meteo.air_pressure_kpa2mbar(p_air_0_i)
p_air_0_i[np.isnan(lst)] = np.nan
dest_pairinst = gdal.Open(Pair_inst_filename)
p_air_i = dest_pairinst.GetRasterBand(1).ReadAsArray()
p_air_i = ETLook.meteo.air_pressure_kpa2mbar(p_air_i)
p_air_i[np.isnan(lst)] = np.nan
dest_precip = gdal.Open(Pre_filename)
P_24 = dest_precip.GetRasterBand(1).ReadAsArray()
P_24[np.isnan(lst)] = np.nan
dest_hum24 = gdal.Open(Hum_24_filename)
qv_24 = dest_hum24.GetRasterBand(1).ReadAsArray()
qv_24[np.isnan(lst)] = np.nan
dest_huminst = gdal.Open(Hum_inst_filename)
qv_i = dest_huminst.GetRasterBand(1).ReadAsArray()
qv_i[np.isnan(lst)] = np.nan
dest_tair24 = gdal.Open(Tair_24_filename)
t_air_k_24 = dest_tair24.GetRasterBand(1).ReadAsArray()
t_air_24 = ETLook.meteo.air_temperature_celcius(t_air_k_24)
#t_air_24 = ETLook.meteo.disaggregate_air_temperature_daily(t_air_24_coarse, z, z_coarse, lapse)
t_air_24[np.isnan(lst)] = np.nan
dest_tairinst = gdal.Open(Tair_inst_filename)
t_air_k_i = dest_tairinst.GetRasterBand(1).ReadAsArray()
t_air_i = ETLook.meteo.air_temperature_celcius(t_air_k_i)
t_air_i[np.isnan(lst)] = np.nan
dest_tairamp = gdal.Open(Tair_amp_filename)
t_amp_year = dest_tairamp.GetRasterBand(1).ReadAsArray()
t_amp_year[np.isnan(lst)] = np.nan
dest_wind24 = gdal.Open(Wind_24_filename)
u_24 = dest_wind24.GetRasterBand(1).ReadAsArray()
u_24[np.isnan(lst)] = np.nan
dest_windinst = gdal.Open(Wind_inst_filename)
u_i = dest_windinst.GetRasterBand(1).ReadAsArray()
u_i[np.isnan(lst)] = np.nan
dest_watcol = gdal.Open(WatCol_inst_filename)
wv_i = dest_watcol.GetRasterBand(1).ReadAsArray()
wv_i[np.isnan(lst)] = np.nan
dest_trans = gdal.Open(Trans_24_filename)
trans_24 = dest_trans.GetRasterBand(1).ReadAsArray()
trans_24[np.isnan(lst)] = np.nan
# example file
geo_ex = dest_albedo.GetGeoTransform()
proj_ex = dest_albedo.GetProjection()
########################## Open input constants ###########################
doy = int(Date.strftime("%j"))
aod550_i = 0.01 # https://ladsweb.modaps.eosdis.nasa.gov/archive/allData/61/MOD04_L2 heb niet echt een standaard product hiervan gevonden
se_top = 0.5
porosity = 0.4
'''
http://lawr.ucdavis.edu/classes/SSC100/probsets/pset01.html
6. Calculate the porosity of a soil sample that has a bulk density of 1.35 g/cm3. Assume the particle density is 2.65 g/cm3.
Porosity = (1-(r b/r d) x 100 = (1-(1.35/2.65)) x 100 = 49%
'''
# Create QC array
QC = np.ones(lst.shape)
QC[np.isnan(lst)] = np.nan
# page 31 flow diagram
# **effective_leaf_area_index**************************************************
# constants or predefined:
nd_min = 0.125
nd_max = 0.8
vc_pow = 0.7
vc_min = 0
vc_max = 0.9677324224821418
lai_pow = -0.45
# **atmospheric canopy resistance***********************************************
# constants or predefined:
diffusion_slope = -1.33
diffusion_intercept = 1.15
t_opt = 25 # optimal temperature for plant growth
t_min = 0 # minimal temperature for plant growth
t_max = 50 # maximal temperature for plant growth
vpd_slope = -0.3
rs_min = 70
rcan_max = 1000000
# **net radiation canopy******************************************************
# constants or predefined:
vp_slope = 0.14
vp_offset = 0.34
lw_slope = 1.35
lw_offset = 0.35
int_max = 0.2
# **canopy resistance***********************************************************
# constants or predefined:
z_obs = 2
z_b = 100
z0m_bare = 0.001
r0_bare = 0.38
r0_full = 0.18
tenacity = 1.5
disp_bare = 0.0
disp_full = 0.667
fraction_h_bare = 0.65
fraction_h_full = 0.95
z0m_full = 0.1
# **initial canopy aerodynamic resistance***********************************************************
# constants or predefined:
ndvi_obs_min = 0.25
ndvi_obs_max = 0.75
obs_fr = 0.25
dem_resolution = 250
# **ETLook.unstable.initial_friction_velocity_daily***********************************************************
# constants or predefined:
c1 = 1
# **ETLook.unstable.transpiration***********************************************************
# constants or predefined:
iter_h = 3
# **ETLook.resistance.soil_resistance***********************************************************
# constants or predefined:
r_soil_pow = -2.1
r_soil_min = 800
# **ETLook.unstable.initial_sensible_heat_flux_soil_daily***********************************************************
# constants or predefined:
#porosity = 0.4 #Note: soil dependent
#se_top = 1.0 #Note should be input !
rn_slope = 0.92
rn_offset = -61.0
# **ETLook.unstable.evaporation***********************************************************
# constants or predefined:
r0_grass = 0.23
######################## MODEL ETLOOK #########################################
# **effective_leaf_area_index**************************************************
vc = ETLook.leaf.vegetation_cover(ndvi, nd_min, nd_max, vc_pow)
lai = ETLook.leaf.leaf_area_index(vc, vc_min, vc_max, lai_pow)
lai_eff = ETLook.leaf.effective_leaf_area_index(lai)
vc[np.isnan(QC)] = np.nan
lai[np.isnan(QC)] = np.nan
lai_eff[np.isnan(QC)] = np.nan
if out.vc == 1:
PF.Save_as_tiff(vc_filename, vc, geo_ex, proj_ex)
if out.lai == 1:
PF.Save_as_tiff(lai_filename, lai, geo_ex, proj_ex)
if out.lai_eff == 1:
PF.Save_as_tiff(lai_eff_filename, lai_eff, geo_ex, proj_ex)
#*******TRANSPIRATION COMPONENT****************************************************************
# **soil fraction**************************************************************
sf_soil = ETLook.radiation.soil_fraction(lai)
sf_soil[np.isnan(QC)] = np.nan
if out.sf_soil == 1:
PF.Save_as_tiff(sf_soil_filename, sf_soil, geo_ex, proj_ex)
# **atmospheric canopy resistance***********************************************
iesd = ETLook.solar_radiation.inverse_earth_sun_distance(doy)
sc = ETLook.solar_radiation.seasonal_correction(doy)
day_angle = ETLook.clear_sky_radiation.day_angle(doy)
decl = ETLook.solar_radiation.declination(doy)
lat = ETLook.solar_radiation.latitude_rad(lat_deg)
slope = ETLook.solar_radiation.slope_rad(slope_deg)
aspect = ETLook.solar_radiation.aspect_rad(aspect_deg)
ra_24_toa = ETLook.solar_radiation.daily_solar_radiation_toa(sc, decl, iesd, lat, slope, aspect)
ws = ETLook.solar_radiation.sunset_hour_angle(lat, decl)
ra_24_toa_flat = ETLook.solar_radiation.daily_solar_radiation_toa_flat(decl, iesd, lat, ws)
diffusion_index = ETLook.solar_radiation.diffusion_index(trans_24, diffusion_slope, diffusion_intercept)
# choose one of the two options below
#ra_24 = ETLook.solar_radiation.daily_solar_radiation_flat(ra_24_toa_flat, trans_24)
ra_24 = ETLook.solar_radiation.daily_total_solar_radiation(ra_24_toa, ra_24_toa_flat, diffusion_index, trans_24)
stress_rad = ETLook.stress.stress_radiation(ra_24)
p_air_24 = ETLook.meteo.air_pressure_daily(z, p_air_0_24)
vp_24 = ETLook.meteo.vapour_pressure_from_specific_humidity_daily(qv_24, p_air_24)
svp_24 = ETLook.meteo.saturated_vapour_pressure_daily(t_air_24)
vpd_24 = ETLook.meteo.vapour_pressure_deficit_daily(svp_24, vp_24)
stress_vpd = ETLook.stress.stress_vpd(vpd_24, vpd_slope)
stress_temp = ETLook.stress.stress_temperature(t_air_24, t_opt, t_min, t_max)
r_canopy_0 = ETLook.resistance.atmospheric_canopy_resistance(lai_eff, stress_rad, stress_vpd, stress_temp, rs_min, rcan_max)
# Save as tiff files
lat[np.isnan(QC)] = np.nan
slope[np.isnan(QC)] = np.nan
aspect[np.isnan(QC)] = np.nan
ra_24_toa[np.isnan(QC)] = np.nan
ws[np.isnan(QC)] = np.nan
ra_24_toa_flat[np.isnan(QC)] = np.nan
diffusion_index[np.isnan(QC)] = np.nan
ra_24[np.isnan(QC)] = np.nan
stress_rad[np.isnan(QC)] = np.nan
p_air_24[np.isnan(QC)] = np.nan
vp_24[np.isnan(QC)] = np.nan
svp_24[np.isnan(QC)] = np.nan
vpd_24[np.isnan(QC)] = np.nan
stress_vpd[np.isnan(QC)] = np.nan
stress_temp[np.isnan(QC)] = np.nan
r_canopy_0[np.isnan(QC)] = np.nan
if out.lat == 1:
PF.Save_as_tiff(lat_filename, lat, geo_ex, proj_ex)
if out.slope == 1:
PF.Save_as_tiff(slope_filename, slope, geo_ex, proj_ex)
if out.aspect == 1:
PF.Save_as_tiff(aspect_filename, aspect, geo_ex, proj_ex)
if out.ws == 1:
PF.Save_as_tiff(ws_filename, ws, geo_ex, proj_ex)
if out.ra_24_toa == 1:
PF.Save_as_tiff(ra_24_toa_filename, ra_24_toa, geo_ex, proj_ex)
if out.diffusion_index == 1:
PF.Save_as_tiff(diffusion_index_filename, diffusion_index, geo_ex, proj_ex)
if out.ra_24 == 1:
PF.Save_as_tiff(ra_24_filename, ra_24, geo_ex, proj_ex)
if out.stress_rad == 1:
PF.Save_as_tiff(stress_rad_filename, stress_rad, geo_ex, proj_ex)
if out.p_air_24 == 1:
PF.Save_as_tiff(p_air_24_filename, p_air_24, geo_ex, proj_ex)
if out.vp_24 == 1:
PF.Save_as_tiff(vp_24_filename, vp_24, geo_ex, proj_ex)
if out.svp_24 == 1:
PF.Save_as_tiff(svp_24_filename, svp_24, geo_ex, proj_ex)
if out.vpd_24 == 1:
PF.Save_as_tiff(vpd_24_filename, vpd_24, geo_ex, proj_ex)
if out.stress_vpd == 1:
PF.Save_as_tiff(stress_vpd_filename, stress_vpd, geo_ex, proj_ex)
if out.stress_temp == 1:
PF.Save_as_tiff(stress_temp_filename, stress_temp, geo_ex, proj_ex)
if out.r_canopy_0 == 1:
PF.Save_as_tiff(r_canopy_0_filename, r_canopy_0, geo_ex, proj_ex)
# **net radiation canopy******************************************************
t_air_k_24 = ETLook.meteo.air_temperature_kelvin_daily(t_air_24)
# select one of the below two
#l_net = ETLook.radiation.longwave_radiation_fao_etref(t_air_k_24, vp_24, trans_24)
l_net = ETLook.radiation.longwave_radiation_fao(t_air_k_24, vp_24, trans_24, vp_slope, vp_offset, lw_slope, lw_offset)
int_mm = ETLook.evapotranspiration.interception_mm(P_24, vc, lai, int_max)
lh_24 = ETLook.meteo.latent_heat_daily(t_air_24)
int_wm2 = ETLook.radiation.interception_wm2(int_mm, lh_24)
rn_24 = ETLook.radiation.net_radiation(r0, ra_24, l_net, int_wm2)
rn_24_canopy = ETLook.radiation.net_radiation_canopy(rn_24, sf_soil)
# Save as tiff files
t_air_k_24[np.isnan(QC)] = np.nan
l_net[np.isnan(QC)] = np.nan
int_mm[np.isnan(QC)] = np.nan
lh_24[np.isnan(QC)] = np.nan
int_wm2[np.isnan(QC)] = np.nan
rn_24[np.isnan(QC)] = np.nan
rn_24_canopy[np.isnan(QC)] = np.nan
if out.t_air_k_24 == 1:
PF.Save_as_tiff(t_air_k_24_filename, t_air_k_24, geo_ex, proj_ex)
if out.l_net == 1:
PF.Save_as_tiff(l_net_filename, l_net, geo_ex, proj_ex)
if out.int_mm == 1:
PF.Save_as_tiff(int_mm_filename, int_mm, geo_ex, proj_ex)
if out.lh_24 == 1:
PF.Save_as_tiff(lh_24_filename, lh_24, geo_ex, proj_ex)
if out.int_wm2 == 1:
PF.Save_as_tiff(int_wm2_filename, int_wm2, geo_ex, proj_ex)
if out.rn_24 == 1:
PF.Save_as_tiff(rn_24_filename, rn_24, geo_ex, proj_ex)
if out.rn_24_canopy == 1:
PF.Save_as_tiff(rn_24_canopy_filename, rn_24_canopy, geo_ex, proj_ex)
# **canopy resistance***********************************************************
t_air_k_i = ETLook.meteo.air_temperature_kelvin_inst(t_air_i)
vp_i = ETLook.meteo.vapour_pressure_from_specific_humidity_inst(qv_i, p_air_i)
ad_moist_i = ETLook.meteo.moist_air_density_inst(vp_i, t_air_k_i)
ad_dry_i = ETLook.meteo.dry_air_density_inst(p_air_i, vp_i, t_air_k_i)
ad_i = ETLook.meteo.air_density_inst(ad_dry_i, ad_moist_i)
u_b_i_bare = ETLook.soil_moisture.wind_speed_blending_height_bare(u_i, z0m_bare, z_obs, z_b)
lon = ETLook.solar_radiation.longitude_rad(lon_deg)
ha = ETLook.solar_radiation.hour_angle(sc, dtime, lon)
I0 = ETLook.clear_sky_radiation.solar_constant()
ied = ETLook.clear_sky_radiation.inverse_earth_sun_distance(day_angle)
h0 = ETLook.clear_sky_radiation.solar_elevation_angle(lat, decl, ha)
h0ref = ETLook.clear_sky_radiation.solar_elevation_angle_refracted(h0)
m = ETLook.clear_sky_radiation.relative_optical_airmass(p_air_i, p_air_0_i, h0ref)
rotm = ETLook.clear_sky_radiation.rayleigh_optical_thickness(m)
Tl2 = ETLook.clear_sky_radiation.linke_turbidity(wv_i, aod550_i, p_air_i, p_air_0_i)
G0 = ETLook.clear_sky_radiation.extraterrestrial_irradiance_normal(I0, ied)
B0c = ETLook.clear_sky_radiation.beam_irradiance_normal_clear(G0, Tl2, m, rotm, h0)
Bhc = ETLook.clear_sky_radiation.beam_irradiance_horizontal_clear(B0c, h0)
Dhc = ETLook.clear_sky_radiation.diffuse_irradiance_horizontal_clear(G0, Tl2, h0)
ra_hor_clear_i = ETLook.clear_sky_radiation.ra_clear_horizontal(Bhc, Dhc)
emiss_atm_i = ETLook.soil_moisture.atmospheric_emissivity_inst(vp_i, t_air_k_i)
rn_bare = ETLook.soil_moisture.net_radiation_bare(ra_hor_clear_i, emiss_atm_i, t_air_k_i, lst, r0_bare)
rn_full = ETLook.soil_moisture.net_radiation_full(ra_hor_clear_i, emiss_atm_i, t_air_k_i, lst, r0_full)
h_bare = ETLook.soil_moisture.sensible_heat_flux_bare(rn_bare, fraction_h_bare)
h_full = ETLook.soil_moisture.sensible_heat_flux_full(rn_full, fraction_h_full)
u_b_i_full = ETLook.soil_moisture.wind_speed_blending_height_full_inst(u_i, z0m_full, z_obs, z_b)
u_star_i_bare = ETLook.soil_moisture.friction_velocity_bare_inst(u_b_i_bare, z0m_bare, disp_bare, z_b)
u_star_i_full = ETLook.soil_moisture.friction_velocity_full_inst(u_b_i_full, z0m_full, disp_full, z_b)
L_bare = ETLook.soil_moisture.monin_obukhov_length_bare(h_bare, ad_i, u_star_i_bare, t_air_k_i)
L_full = ETLook.soil_moisture.monin_obukhov_length_full(h_full, ad_i, u_star_i_full, t_air_k_i)
u_i_soil = ETLook.soil_moisture.wind_speed_soil_inst(u_i, L_bare, z_obs)
ras = ETLook.soil_moisture.aerodynamical_resistance_soil(u_i_soil)
raa = ETLook.soil_moisture.aerodynamical_resistance_bare(u_i, L_bare, z0m_bare, disp_bare, z_obs)
rac = ETLook.soil_moisture.aerodynamical_resistance_full(u_i, L_full, z0m_full, disp_full, z_obs)
t_max_bare = ETLook.soil_moisture.maximum_temperature_bare(ra_hor_clear_i, emiss_atm_i, t_air_k_i, ad_i, raa, ras, r0_bare)
t_max_full = ETLook.soil_moisture.maximum_temperature_full(ra_hor_clear_i, emiss_atm_i, t_air_k_i, ad_i, rac, r0_full)
w_i = ETLook.soil_moisture.dew_point_temperature_inst(vp_i)
t_dew_i = ETLook.soil_moisture.dew_point_temperature_inst(vp_i)
t_wet_i = ETLook.soil_moisture.wet_bulb_temperature_inst(t_air_i, t_dew_i)
t_wet_k_i = ETLook.meteo.wet_bulb_temperature_kelvin_inst(t_wet_i)
lst_max = ETLook.soil_moisture.maximum_temperature(t_max_bare, t_max_full, vc)
se_root = ETLook.soil_moisture.soil_moisture_from_maximum_temperature(lst_max, lst, t_wet_k_i)
stress_moist = ETLook.stress.stress_moisture(se_root, tenacity)
r_canopy_0 = ETLook.resistance.atmospheric_canopy_resistance(lai_eff, stress_rad, stress_vpd, stress_temp, rs_min, rcan_max)
r_canopy = ETLook.resistance.canopy_resistance(r_canopy_0, stress_moist, rcan_max)
# Save as tiff files
t_air_k_i[np.isnan(QC)] = np.nan
vp_i[np.isnan(QC)] = np.nan
ad_moist_i[np.isnan(QC)] = np.nan
ad_dry_i[np.isnan(QC)] = np.nan
ad_i[np.isnan(QC)] = np.nan
u_b_i_bare[np.isnan(QC)] = np.nan
lon[np.isnan(QC)] = np.nan
ha[np.isnan(QC)] = np.nan
h0[np.isnan(QC)] = np.nan
h0ref[np.isnan(QC)] = np.nan
m[np.isnan(QC)] = np.nan
rotm[np.isnan(QC)] = np.nan
Tl2[np.isnan(QC)] = np.nan
B0c[np.isnan(QC)] = np.nan
Bhc[np.isnan(QC)] = np.nan
Dhc[np.isnan(QC)] = np.nan
ra_hor_clear_i[np.isnan(QC)] = np.nan
emiss_atm_i[np.isnan(QC)] = np.nan
rn_bare[np.isnan(QC)] = np.nan
rn_full[np.isnan(QC)] = np.nan
u_b_i_full[np.isnan(QC)] = np.nan
u_star_i_bare[np.isnan(QC)] = np.nan
u_star_i_full[np.isnan(QC)] = np.nan
u_i_soil[np.isnan(QC)] = np.nan
ras[np.isnan(QC)] = np.nan
raa[np.isnan(QC)] = np.nan
rac[np.isnan(QC)] = np.nan
t_max_bare[np.isnan(QC)] = np.nan
t_max_full[np.isnan(QC)] = np.nan
w_i[np.isnan(QC)] = np.nan
t_dew_i[np.isnan(QC)] = np.nan
t_wet_i[np.isnan(QC)] = np.nan
t_wet_k_i[np.isnan(QC)] = np.nan
lst_max[np.isnan(QC)] = np.nan
se_root[np.isnan(QC)] = np.nan
stress_moist[np.isnan(QC)] = np.nan
r_canopy_0[np.isnan(QC)] = np.nan
r_canopy[np.isnan(QC)] = np.nan
if out.t_air_k_i == 1:
PF.Save_as_tiff(t_air_k_i_filename, t_air_k_i, geo_ex, proj_ex)
if out.vp_i == 1:
PF.Save_as_tiff(vp_i_filename, vp_i, geo_ex, proj_ex)
if out.ad_moist_i == 1:
PF.Save_as_tiff(ad_moist_i_filename, ad_moist_i, geo_ex, proj_ex)
if out.ad_dry_i == 1:
PF.Save_as_tiff(ad_dry_i_filename, ad_dry_i, geo_ex, proj_ex)
if out.ad_i == 1:
PF.Save_as_tiff(ad_i_filename, ad_i, geo_ex, proj_ex)
if out.u_b_i_bare == 1:
PF.Save_as_tiff(u_b_i_bare_filename, u_b_i_bare, geo_ex, proj_ex)
if out.lon == 1:
PF.Save_as_tiff(lon_filename, lon, geo_ex, proj_ex)
if out.ha == 1:
PF.Save_as_tiff(ha_filename, ha, geo_ex, proj_ex)
if out.ied == 1:
PF.Save_as_tiff(ied_filename, ied, geo_ex, proj_ex)
if out.h0 == 1:
PF.Save_as_tiff(h0_filename, h0, geo_ex, proj_ex)
if out.h0ref == 1:
PF.Save_as_tiff(h0ref_filename, h0ref, geo_ex, proj_ex)
if out.m == 1:
PF.Save_as_tiff(m_filename, m, geo_ex, proj_ex)
if out.rotm == 1:
PF.Save_as_tiff(rotm_filename, rotm, geo_ex, proj_ex)
if out.Tl2 == 1:
PF.Save_as_tiff(Tl2_filename, Tl2, geo_ex, proj_ex)
if out.B0c == 1:
PF.Save_as_tiff(B0c_filename, B0c, geo_ex, proj_ex)
if out.Bhc == 1:
PF.Save_as_tiff(Bhc_filename, Bhc, geo_ex, proj_ex)
if out.Dhc == 1:
PF.Save_as_tiff(Dhc_filename, Dhc, geo_ex, proj_ex)
if out.ra_hor_clear_i == 1:
PF.Save_as_tiff(ra_hor_clear_i_filename, ra_hor_clear_i, geo_ex, proj_ex)
if out.emiss_atm_i == 1:
PF.Save_as_tiff(emiss_atm_i_filename, emiss_atm_i, geo_ex, proj_ex)
if out.rn_bare == 1:
PF.Save_as_tiff(rn_bare_filename, rn_bare, geo_ex, proj_ex)
if out.rn_full == 1:
PF.Save_as_tiff(rn_full_filename, rn_full, geo_ex, proj_ex)
if out.u_b_i_full == 1:
PF.Save_as_tiff(u_b_i_full_filename, u_b_i_full, geo_ex, proj_ex)
if out.u_star_i_bare == 1:
PF.Save_as_tiff(u_star_i_bare_filename, u_star_i_bare, geo_ex, proj_ex)
if out.u_star_i_full == 1:
PF.Save_as_tiff(u_star_i_full_filename, u_star_i_full, geo_ex, proj_ex)
if out.u_i_soil == 1:
PF.Save_as_tiff(u_i_soil_filename, u_i_soil, geo_ex, proj_ex)
if out.ras == 1:
PF.Save_as_tiff(ras_filename, ras, geo_ex, proj_ex)
if out.raa == 1:
PF.Save_as_tiff(raa_filename, raa, geo_ex, proj_ex)
if out.rac == 1:
PF.Save_as_tiff(rac_filename, rac, geo_ex, proj_ex)
if out.t_max_bare == 1:
PF.Save_as_tiff(t_max_bare_filename, t_max_bare, geo_ex, proj_ex)
if out.t_max_full == 1:
PF.Save_as_tiff(t_max_full_filename, t_max_full, geo_ex, proj_ex)
if out.w_i == 1:
PF.Save_as_tiff(w_i_filename, w_i, geo_ex, proj_ex)
if out.t_dew_i == 1:
PF.Save_as_tiff(t_dew_i_filename, t_dew_i, geo_ex, proj_ex)
if out.t_wet_i == 1:
PF.Save_as_tiff(t_wet_i_filename, t_wet_i, geo_ex, proj_ex)
if out.t_wet_k_i == 1:
PF.Save_as_tiff(t_wet_k_i_filename, t_wet_k_i, geo_ex, proj_ex)
if out.lst_max == 1:
PF.Save_as_tiff(lst_max_filename, lst_max, geo_ex, proj_ex)
if out.se_root == 1:
PF.Save_as_tiff(se_root_filename, se_root, geo_ex, proj_ex)
if out.stress_moist == 1:
PF.Save_as_tiff(stress_moist_filename, stress_moist, geo_ex, proj_ex)
if out.r_canopy_0 == 1:
PF.Save_as_tiff(r_canopy_0_filename, r_canopy_0, geo_ex, proj_ex)
if out.r_canopy == 1:
PF.Save_as_tiff(r_canopy_filename, r_canopy, geo_ex, proj_ex)
# **initial canopy aerodynamic resistance***********************************************************
z_obst = ETLook.roughness.obstacle_height(ndvi, z_obst_max, ndvi_obs_min, ndvi_obs_max, obs_fr)
z_oro = ETLook.roughness.orographic_roughness(slope, dem_resolution) #careful - standard res is set to 250 # !!!
z0m = ETLook.roughness.roughness_length(lai, z_oro, z_obst, z_obst_max, land_mask)
ra_canopy_init = ETLook.neutral.initial_canopy_aerodynamic_resistance(u_24, z0m, z_obs)
# Save as tiff files
z_obst[np.isnan(QC)] = np.nan
z_oro[np.isnan(QC)] = np.nan
z0m[np.isnan(QC)] = np.nan
ra_canopy_init[np.isnan(QC)] = np.nan
if out.z_obst == 1:
PF.Save_as_tiff(z_obst_filename, z_obst, geo_ex, proj_ex)
if out.z_oro == 1:
PF.Save_as_tiff(z_oro_filename, z_oro, geo_ex, proj_ex)
if out.z0m == 1:
PF.Save_as_tiff(z0m_filename, z0m, geo_ex, proj_ex)
if out.ra_canopy_init == 1:
PF.Save_as_tiff(ra_canopy_init_filename, ra_canopy_init, geo_ex, proj_ex)
# **windspeed blending height daily***********************************************************
u_b_24 = ETLook.meteo.wind_speed_blending_height_daily(u_24, z_obs, z_b)
# Save as tiff files
u_b_24[np.isnan(QC)] = np.nan
if out.u_b_24 == 1:
PF.Save_as_tiff(u_b_24_filename, u_b_24, geo_ex, proj_ex)
# **ETLook.unstable.initial_friction_velocity_daily***********************************************************
disp = ETLook.roughness.displacement_height(lai, z_obst, land_mask, c1)
u_star_24_init = ETLook.unstable.initial_friction_velocity_daily(u_b_24, z0m, disp, z_b)
# Save as tiff files
disp[
|
np.isnan(QC)
|
numpy.isnan
|
# #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import torch
from torch import nn
dtype = torch.FloatTensor
from torch.autograd import Variable
import torch.nn.functional as F
import numpy as np
import warnings
from datetime import datetime
with warnings.catch_warnings():
warnings.simplefilter('ignore')
import h5py
from scipy import constants
speedoflight = constants.c / 1000.0
from scipy.interpolate import UnivariateSpline
import Payne
from ..utils.smoothing import smoothspec
class Net(nn.Module):
def __init__(self, D_in, H, D_out):
super(Net, self).__init__()
self.lin1 = nn.Linear(D_in, H)
self.lin2 = nn.Linear(H,H)
self.lin3 = nn.Linear(H,H)
self.lin4 = nn.Linear(H, D_out)
# self.lin3 = nn.Linear(H,D_out)
"""
def forward(self, x):
x_i = self.encode(x)
out1 = F.sigmoid(self.lin1(x_i))
out2 = F.sigmoid(self.lin2(out1))
# out3 = F.sigmoid(self.lin3(out2))
# y_i = self.lin4(out3)
y_i = self.lin3(out2)
return y_i
"""
def forward(self, x):
x_i = self.encode(x)
out1 = torch.sigmoid(self.lin1(x_i))
out2 = torch.sigmoid(self.lin2(out1))
out3 = torch.sigmoid(self.lin3(out2))
y_i = self.lin4(out3)
# y_i = self.lin3(out2)
return y_i
def encode(self,x):
# convert x into numpy to do math
x_np = x.data.cpu().numpy()
try:
self.xmin
self.xmax
except (NameError,AttributeError):
self.xmin = np.amin(x_np,axis=0)
self.xmax = np.amax(x_np,axis=0)
x = (x_np-self.xmin)/(self.xmax-self.xmin)
return Variable(torch.from_numpy(x).type(dtype))
class readNN(object):
"""docstring for nnBC"""
def __init__(self, nnh5=None,xmin=None,xmax=None):
super(readNN, self).__init__()
D_in = nnh5['model/lin1.weight'].shape[1]
H = nnh5['model/lin1.weight'].shape[0]
D_out = nnh5['model/lin4.weight'].shape[0]
self.model = Net(D_in,H,D_out)
self.model.xmin = xmin#torch.from_numpy(xmin).type(dtype)
self.model.xmax = xmax#torch.from_numpy(xmax).type(dtype)
newmoddict = {}
for kk in nnh5['model'].keys():
nparr = np.array(nnh5['model'][kk])
torarr = torch.from_numpy(nparr).type(dtype)
newmoddict[kk] = torarr
self.model.load_state_dict(newmoddict)
self.model.eval()
def eval(self,x):
if type(x) == type([]):
x = np.array(x)
if len(x.shape) == 1:
inputD = 1
else:
inputD = x.shape[0]
inputVar = Variable(
torch.from_numpy(x).type(dtype),
requires_grad=False).reshape(inputD,4)
outpars = self.model(inputVar)
return outpars.data.numpy().squeeze()
class fastreadNN(object):
def __init__(self, nnlist, wavearr, xmin=None, xmax=None):
self.wavearr = wavearr
self.w1 = np.array([nn.model.lin1.weight.data.numpy() for nn in nnlist])
self.b1 = np.expand_dims(np.array([nn.model.lin1.bias.data.numpy() for nn in nnlist]), -1)
self.w2 = np.array([nn.model.lin2.weight.data.numpy() for nn in nnlist])
self.b2 = np.expand_dims(np.array([nn.model.lin2.bias.data.numpy() for nn in nnlist]), -1)
self.w3 = np.array([nn.model.lin3.weight.data.numpy() for nn in nnlist])
self.b3 = np.expand_dims(np.array([nn.model.lin3.bias.data.numpy() for nn in nnlist]), -1)
self.w4 = np.array([nn.model.lin4.weight.data.numpy() for nn in nnlist])
self.b4 = np.expand_dims(np.array([nn.model.lin4.bias.data.numpy() for nn in nnlist]), -1)
self.set_minmax(nnlist[0])
def set_minmax(self, nn):
try:
self.xmin = nn.model.xmin
self.xmax = nn.model.xmax
except (NameError,AttributeError):
self.xmin = np.amin(nn.model.x.data.numpy(),axis=0)
self.xmax = np.amax(nn.model.x.data.numpy(),axis=0)
self.range = (self.xmax - self.xmin)
def encode(self, x):
xp = (np.atleast_2d(x) - self.xmin) / self.range
return xp.T
def sigmoid(self, a):
with np.errstate(over='ignore'):
return 1. / (1 + np.exp(-a))
def eval(self, x):
"""With some annying shape changes
"""
a1 = self.sigmoid(np.matmul(self.w1, self.encode(x)) + self.b1)
a2 = self.sigmoid(np.matmul(self.w2, a1) + self.b2)
a3 = self.sigmoid(np.matmul(self.w3, a2) + self.b3)
y = np.matmul(self.w4, a3) + self.b4
return np.squeeze(y).ravel()
class PayneSpecPredict(object):
"""
Class for taking a Payne-learned NN and predicting spectrum.
"""
def __init__(self, nnpath, **kwargs):
self.NN = {}
if nnpath != None:
self.nnpath = nnpath
else:
# define aliases for the MIST isochrones and C3K/CKC files
self.nnpath = Payne.__abspath__+'data/specANN/C3KANN_RVS31.h5'
self.carbon_bool = kwargs.get('carbon_bool',False)
# name of file that contains the neural-net output
self.NN['filename'] = self.nnpath
# restrore hdf5 file with the NN
self.NN['file'] = h5py.File(self.NN['filename'],'r')
# wavelength N-D array for predicted spectrum
self.NN['wavelength_arr'] = self.NN['file']['wavelength']
# array of ANN index since to match up wave and model
self.NN['ANN_ind'] = [int(x) for x in self.NN['wavelength_arr']]
self.NN['ANN_ind'].sort()
self.NN['ANN_ind'] = ['{}'.format(x) for x in self.NN['ANN_ind']]
# flattened wavelength array
self.NN['wavelength'] = np.concatenate(
[self.NN['wavelength_arr'][ii].__array__() for ii in self.NN['ANN_ind']]
)
# resolution that network was trained at
self.NN['resolution'] = np.array(self.NN['file']['resolution'])[0]
# min and max for trained ANN
self.NN['x_min'] = np.array(self.NN['file']['xmin'])
self.NN['x_max'] = np.array(self.NN['file']['xmax'])
# # OLD SLOW PYTORCH CODE
# # dictionary of trained NN models for predictions
# self.NN['model'] = {}
# for ii in self.NN['ANN_ind']:
# self.NN['model'][ii] = readNN(
# nnh5=self.NN['file']['model/{0}'.format(ii)],
# xmin=self.NN['x_min'],
# xmax=self.NN['x_max'],
# )
# NEW SMART MATRIX APPROACH
nnlist = (
[readNN(
nnh5=self.NN['file']['model/{0}'.format(ii)],
xmin=self.NN['x_min'],
xmax=self.NN['x_max'])
for ii in self.NN['ANN_ind']
]
)
self.anns = fastreadNN(nnlist, self.NN['wavelength'])
# Turn on C2 absorption feature in RV31
if self.carbon_bool:
from ..utils.carbonmod import carbonmod
self.carbonfun = carbonmod(
inres=500000.0,
outres=self.NN['resolution'],
outwave=self.NN['wavelength'],
)
# close the HDF5 file
self.NN['file'].close()
def predictspec(self,labels):
'''
predict spectra using set of labels and trained NN output
:params labels:
list of label values for the labels used to train the NN
ex. [Teff,log(g),[Fe/H],[alpha/Fe]]
:returns predict_flux:
predicted flux from the NN
'''
# # OLD SLOW PYTORCH CODE
# self.predict_flux = np.array([])
# for ii in self.NN['ANN_ind']:
# predval = self.NN['model'][ii].eval(labels)
# if predval.shape == np.array(1).shape:
# predval = np.array([predval])
# self.predict_flux = np.concatenate([self.predict_flux,predval])
# if self.predict_flux.ndim == 1:
# self.predict_flux = self.predict_flux.reshape(1,len(self.NN['ANN_ind']))
# self.predict_flux = np.concatenate(self.predict_flux)
# NEW SMART MATRIX APPROACH
self.predict_flux = self.anns.eval(labels)
return self.predict_flux
def getspec(self,**kwargs):
'''
function to take a set of kwarg based on labels and
return the predicted spectrum
default returns solar spectrum, rotating at 2 km/s, and
at R=32K
: returns modwave:
Wavelength array from the NN
:returns modspec:
Predicted spectrum from the NN
'''
self.inputdict = {}
if 'Teff' in kwargs:
self.inputdict['logt'] = np.log10(kwargs['Teff'])
elif 'logt' in kwargs:
self.inputdict['logt'] = kwargs['logt']
else:
self.inputdict['logt'] =
|
np.log10(5770.0)
|
numpy.log10
|
import open3d as o3d
import numpy as np
import scipy.linalg as la
from skimage import measure
import pycuda.autoinit
import pycuda.driver as cuda
from pycuda import gpuarray
from .cuda_kernels import source_module
class TSDFVolume:
def __init__(self, voxel_size, vol_bnds=None, vol_origin=None, vol_dim=None, trunc_margin=0.015):
"""
Args:
vol_bnds (ndarray): An ndarray of shape (3, 2). Specifies the xyz bounds (min/max) in meters.
voxel_size (float): The volume discretization in meters.
"""
if vol_dim is not None and vol_origin is not None:
self._vol_dim = vol_dim
self._vol_origin = vol_origin
elif vol_bnds is not None:
self._vol_bnds = np.ascontiguousarray(vol_bnds, dtype=np.float32)
self._vol_dim = np.round((self._vol_bnds[:, 1] - self._vol_bnds[:, 0]) / voxel_size).astype(np.int32)
self._vol_origin = self._vol_bnds[:, 0].copy()
else:
raise ValueError("must either provide 'vol_dim' or (both 'vol_bnds' and 'vol_origin')")
self._voxel_size = voxel_size
self._trunc_margin = trunc_margin
self._color_const = np.float32(256 * 256)
print(f"TSDF volume dim: {self._vol_dim}, # points: {np.prod(self._vol_dim)}")
# Copy voxel volumes to GPU
self.block_x, self.block_y, self.block_z = 8, 8, 16 # block_x * block_y * block_z must equal to 1024
x_dim, y_dim, z_dim = int(self._vol_dim[0]), int(self._vol_dim[1]), int(self._vol_dim[2])
self.grid_x = int(np.ceil(x_dim / self.block_x))
self.grid_y = int(np.ceil(y_dim / self.block_y))
self.grid_z = int(np.ceil(z_dim / self.block_z))
# initialize tsdf values to be -1
xyz = x_dim * y_dim * z_dim
self._tsdf_vol_gpu = gpuarray.zeros(shape=(xyz), dtype=np.float32) - 1
self._weight_vol_gpu = gpuarray.zeros(shape=(xyz), dtype=np.float32)
self._color_vol_gpu= gpuarray.zeros(shape=(xyz), dtype=np.float32)
# integrate function using PyCuda
self._cuda_integrate = source_module.get_function("integrate")
self._cuda_ray_casting = source_module.get_function("rayCasting")
self._cuda_batch_ray_casting = source_module.get_function("batchRayCasting")
def integrate(self, color_im, depth_im, cam_intr, cam_pose, weight=1.0):
""" Integrate an RGB-D frame into the TSDF volume.
Args:
color_im (np.ndarray): input RGB image of shape (H, W, 3)
depth_im (np.ndarray): input depth image of shape (H, W)
cam_intr (np.ndarray): Camera intrinsics matrix of shape (3, 3)
cam_pose (np.ndarray): Camera pose of shape (4, 4)
weight (float, optional): weight to be assigned for the current observation. Defaults to 1.0.
"""
im_h, im_w = depth_im.shape
# color image is always from host
color_im = color_im.astype(np.float32)
color_im = np.floor(color_im[...,2]*self._color_const + color_im[...,1]*256 + color_im[...,0])
if isinstance(depth_im, np.ndarray):
depth_im_gpu = gpuarray.to_gpu(depth_im.astype(np.float32))
self._cuda_integrate(
self._tsdf_vol_gpu,
self._weight_vol_gpu,
self._color_vol_gpu,
cuda.InOut(self._vol_dim.astype(np.int32)),
cuda.InOut(self._vol_origin.astype(np.float32)),
np.float32(self._voxel_size),
cuda.InOut(cam_intr.reshape(-1).astype(np.float32)),
cuda.InOut(cam_pose.reshape(-1).astype(np.float32)),
np.int32(im_h),
np.int32(im_w),
cuda.InOut(color_im.astype(np.float32)),
depth_im_gpu,
np.float32(self._trunc_margin),
np.float32(weight),
block=(self.block_x, self.block_y, self.block_z),
grid=(self.grid_x, self.grid_y, self.grid_z)
)
def batch_ray_casting(self, im_w, im_h, cam_intr, cam_poses, inv_cam_poses, start_row=0, start_col=0,
batch_size=1, to_host=True):
batch_color_im_gpu = gpuarray.zeros(shape=(batch_size, 3, im_h, im_w), dtype=np.uint8)
batch_depth_im_gpu = gpuarray.zeros(shape=(batch_size, im_h, im_w), dtype=np.float32)
self._cuda_batch_ray_casting(
self._tsdf_vol_gpu,
self._color_vol_gpu,
self._weight_vol_gpu,
cuda.InOut(self._vol_dim.astype(np.int32)),
cuda.InOut(self._vol_origin.astype(np.float32)),
|
np.float32(self._voxel_size)
|
numpy.float32
|
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.special import lpmv, factorial, eval_legendre
from scipy.integrate import simps, odeint
from scipy.optimize import brentq
from eos import Polytrope
from rotation_laws import JConstantRotation
class Solver(metaclass=ABCMeta):
def __init__(self, star):
self.star = star
self.initialized = False
@abstractmethod
def initialize_solver(self, parameters):
pass
@abstractmethod
def solve(self, max_steps=100, delta=1e-3):
pass
@abstractmethod
def calc_mass(self):
pass
@abstractmethod
def calc_gravitational_energy(self):
pass
class Roxburgh(Solver):
def initialize_solver(self, parameters):
self.theta_m = 0
self.Ro = 0.9
self.Rs = self.star.r_coords[-1]
# inverted coordinate system is driving me mad so I am .T'ing them all.
self.star.rho = self.star.rho.T
self.star.Phi = self.star.Phi.T
self.star.Omega2 = parameters['Omega']**2
self.rhom = self.star.rho[:, 0]
self.gm = np.zeros_like(self.star.rho)
def step(self):
# solve Poisson for Phi given rho
Phi = self.Phi_given_rho(self.star.rho)
# solve for rho given Phi subject to rho(r,0) = rhom(r)
rho = self.rho_given_Phi(Phi)
# test conversion
rho_err = np.max(np.abs(rho - self.star.rho)) / np.max(rho)
print(f"Error: rho_err = {rho_err}")
# print(f"rho = {rho}")
# print(f"Phi = {Phi}")
self.star.rho = rho
self.star.Phi = Phi
return rho_err
def Phi_given_rho(self, rho):
r = self.star.r_coords
th = self.star.theta_coords
nth = len(th)
nr = len(r)
nk = min(20, nth)
Phi = np.zeros_like(self.star.Phi)
W = np.array([[eval_legendre(2 * k, np.cos(th[n]))
for k in range(nk)] for n in range(nk)])
ck = np.zeros((nr, nk))
fk = np.zeros((nr, nk))
for i in range(nr):
ck[i, :] = np.linalg.inv(W) @ rho[i, :nk]
# for k in range(nth):
# solve equation 12 using shooting
# def dfdr(x, _r, dfdr0):
# if _r == 0:
# return np.array([0, dfdr0])
# else:
# _ck = np.interp(_r, r, ck[:,k])
# print(_ck)
# return np.array([4 * np.pi * self.star.G * _ck - 2 /
# _r * x[0] + 2 * k * (k + 1) * x[1] / _r**2,
# x[0]])
#
# def shoot_me(dfdr0):
# _fk = odeint(dfdr, [dfdr0, 0], r, args=(dfdr0,))
#
# print(f"_fk = {_fk[-1,:]}")
#
# # calculate boundary condition at r=Rs
# return (2 * k + 1) * _fk[-1, 1] + r[-1] * _fk[-1, 0]
#
# s_min = -1
# s_max = 1
#
# if shoot_me(s_min) * shoot_me(s_max) > 0:
# s_min *= 10
#
# # if shoot_me(s_min) * shoot_me(s_max) > 0:
# # s_max *= 10
#
# print(f"shoot_me({s_min}) = {shoot_me(s_min)}, shoot_me({s_max}) = {shoot_me(s_max)}")
# dfdr0 = brentq(shoot_me, s_min, s_max)
#
# fk[:, k] = odeint(dfdr, [dfdr0, 0], r)[:, 1]
for i in range(1,nr):
for k in range(nk):
I = ck[:, k] * r**(2*k+2)
lk = 4 * np.pi * self.star.G / ((4 * k + 1) * self.Rs**(4*k+1)) * simps(I, r)
outer_I = np.zeros_like(r[i:])
for j in range(i, nr):
inner_I = np.zeros_like(r[:j+1])
for n in range(j+1):
inner_I[n] = ck[n, k] * r[n]**(2*k+2)
outer_I[j-i] = 4 * np.pi * self.star.G / r[j]**(4*k+2) * simps(inner_I, r[:j+1])
fk[i, k] = -r[i]**(2*k) * simps(outer_I, r[i:]) - lk * r[i]**(2*k)
# now given fk we can find Phi
for i in range(nr):
for j in range(nth):
Phi[i, j] = np.sum(
[fk[i, k] * eval_legendre(2 * k, np.cos(th[j])) for k in range(nk)])
return Phi
def rho_given_Phi(self, Phi):
r = self.star.r_coords
th = self.star.theta_coords
dr = r[1] - r[0]
dth = th[1] - th[0]
nth = len(th)
nr = len(r)
rho = np.zeros_like(Phi)
dPhidr = np.zeros_like(Phi)
dPhidth = np.zeros_like(Phi)
dPhidr[1:-1, :] = (Phi[2:, :] - Phi[:-2, :]) / (2 * dr)
dPhidth[:, 1:-1] = (Phi[:, 2:] - Phi[:, :-2]) / (2 * dth)
# do boundaries by copying. At r=0, we want the derivative of the potential to be 0, so we'll leave that.
dPhidr[-1, :] = dPhidr[-2, :]
dPhidth[:, 0] = dPhidth[:, 1]
dPhidth[:, -1] = dPhidth[:, -2]
gm = np.zeros_like(Phi)
for i in range(1,nr):
for j in range(1,nth):
I = -1 / r[i] * (dPhidth[i, :j] - self.star.Omega2 * r[i]**2 * np.sin(th[:j]) * np.cos(
th[:j])) / (dPhidr[i, :j] - self.star.Omega2 * r[i] * np.sin(th[:j]**2))
# print(f"I = {I}")
log_gm = simps(I, th[:j])
gm[i, j] = np.exp(log_gm)
if self.star.Omega2 == 0:
rho[i, :] = self.rhom[i]
else:
for j in range(1,nth):
dgdth = -1 / r[i] * (dPhidth[i, :j] - self.star.Omega2 * r[i]**2 * np.sin(
th[:j]) * np.cos(th[:j])) / (dPhidr[i, :j] - self.star.Omega2 * r[i] * np.sin(th[:j]**2))
I = np.zeros(j)
for k in range(j):
if not dgdth[k] == 0:
I[k] = -r[i] * self.star.Omega2 * np.cos(th[k]) / (
dPhidr[i, k] - self.star.Omega2 * r[i] * np.sin(th[k])**2) * 1 / dgdth[k]
# print(f"I = {I}")
log_rho = simps(I, gm[i, :j]) #+ np.log(self.rhom[i])
# print(f"log_rho = {log_rho}")
rho[i, j] = np.exp(log_rho) * self.rhom[i]
# save characteristics for later
self.gm = gm
return rho
def P_given_rho_Phi(self, rho, Phi):
r = self.star.r_coords
th = self.star.theta_coords
dr = r[1] - r[0]
dth = th[1] - th[0]
nth = len(th)
nr = len(r)
dPhidr = np.zeros_like(Phi)
dPhidr[1:-1, :] = (Phi[2:, :] - Phi[:-2, :]) / (2 * dr)
dPhidr[-1, :] = dPhidr[-2, :]
P = np.zeros((nr, nth))
# NOTE: used the fact that theta_m = 0
Ro_index = self.star.eos.A[1]
for i in range(Ro_index):
P[i, 0] = simps(rho[i:Ro_index, 0] *
dPhidr[i:Ro_index, 0], r[i:Ro_index])
# now we have to do something clever and interpolate these along the characteristics. eurgh.
# given the value of gm(theta) and r at some theta, we can find the corresponding point
# rm along theta = thetam. We can then look up what the pressure is at this point
for j in range(1, nth):
rm = self.gm[1:, j] / r[1:]
P[1:, j] = np.interp(rm, r[1:], P[1:, 0])
return P
def solve(self, max_steps=100, delta=1e-3):
for i in range(max_steps):
print(f"Step {i}")
rho_err = self.step()
if rho_err <= delta:
print("Solution found!")
break
self.star.p = self.P_given_rho_Phi(self.star.rho, self.star.Phi)
# untranspose
self.star.rho = self.star.rho.T
self.star.Phi = self.star.Phi.T
self.star.p = self.star.p.T
def calc_mass(self):
pass
def calc_gravitational_energy(self):
pass
class SCF(Solver):
def initialize_solver(self, parameters):
self.star.r_coords = np.array(
range(1, self.star.mesh_size[1] + 1)) / (self.star.mesh_size[1] - 1)
self.star.mu_coords = np.array(
range(self.star.mesh_size[0])) / (self.star.mesh_size[0] - 1)
self.star.Psi = np.zeros(self.star.mesh_size)
self.star.Psi[:, :] = -0.5 * self.star.r_coords[np.newaxis, :]**2 * \
(1 - self.star.mu_coords[:, np.newaxis]**2)
self.star.omegabar = np.sqrt(-2 * self.star.Psi)
def step(self):
""" Implements 2d self-consistent field algorithm """
lmax = 32
star = self.star
K, N = star.mesh_size
mu = star.mu_coords
r = star.r_coords
def D1(k, n):
return 1 / 6 * np.sum((mu[2::2] - mu[:-2:2]) *
(eval_legendre(2 * n, mu[:-2:2]) * star.rho[:-2:2, k] +
4 * eval_legendre(2 * n, mu[1:-1:2]) * star.rho[1:-1:2, k] +
eval_legendre(2 * n, mu[2::2]) * star.rho[2::2, k]))
def D2(n, j):
sum = 0
def fl(r_dash, r, l=2 * n):
if r_dash < r:
return r_dash**(l + 2) / r**(l + 1)
else:
return r**l / r_dash**(l - 1)
for k in range(0, N - 2, 2):
sum += (r[k + 2] - r[k]) * (fl(r[k], r[j]) * D1(k, n) +
4 * fl(r[k + 1], r[j]) * D1(k + 1, n) +
fl(r[k + 2], r[j]) * D1(k + 2, n))
return sum / 6
def calc_Phi(i, j):
Phi = 0
for n in range(lmax + 1):
Phi -= 4 * np.pi * D2(n, j) * eval_legendre(2 * n, mu[i])
return Phi
# calculate Phi across grid
for n in range(N):
for k in range(K):
star.Phi[k, n] = calc_Phi(k, n)
# print(f'Phi = {star.Phi[0,:]}')
# update the enthalpy
Omega2 = star.eos.Omega2(star.Phi, star.Psi)
C = star.eos.C(star.Phi, star.Psi)
H = C - star.Phi - Omega2 * star.Psi
# use new enthalpy and Phi to calculate the density
star.rho = star.eos.rho_from_h(H)
star.rho /= np.max(star.rho)
# print(f"rho = {np.average(star.rho, axis=0)}")
# calculate the errors
H_err = np.max(np.abs(H - star.H)) / np.max(np.abs(H))
if
|
np.max(Omega2)
|
numpy.max
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as pl
import george
from george.kernels import ExpSquaredKernel
np.random.seed(1234)
# Generate some fake noisy data.
x = 10 * np.sort(np.random.rand(10))
yerr = 0.2 * np.ones_like(x)
y =
|
np.sin(x)
|
numpy.sin
|
import argparse
import json
from rlkit.envs.wrappers import NormalizedBoxEnv
import numpy as np
from envs.cloth import ClothEnvPickled as ClothEnv
import cv2
import os
import pandas as pd
import time
import shutil
def rollout(variant, output_folder, deltas):
try:
os.makedirs(output_folder)
cnn_path = f"{output_folder}/sim_eval_images/cnn"
corners_path = f"{output_folder}/sim_eval_images/corners"
eval_path = f"{output_folder}/sim_eval_images/eval"
os.makedirs(cnn_path)
os.makedirs(corners_path)
os.makedirs(eval_path)
except:
print("folders exist already")
env = ClothEnv(**variant['env_kwargs'], has_viewer=True, save_folder=output_folder, initial_xml_dump=True)
env = NormalizedBoxEnv(env)
smallest_reached_goal = np.inf
got_done = False
env_timestep = env.timestep
steps_per_second = 1 / env.timestep
new_action_every_ctrl_step = steps_per_second / variant['env_kwargs']['control_frequency']
o = env.reset()
trajectory_log = []
trajectory_log.append(np.concatenate([env.desired_pos_step_W, env.desired_pos_ctrl_W, env.get_ee_position_I(), env.get_ee_position_W(), np.zeros(9)]))
for path_length, delta in enumerate(deltas):
train_image, eval_image = env.capture_image(None)
cv2.imwrite(f'{output_folder}/sim_eval_images/corners/{str(path_length).zfill(3)}.png', train_image)
cv2.imwrite(f'{output_folder}/sim_eval_images/eval/{str(path_length).zfill(3)}.png', eval_image)
if "image" in o.keys():
data = o['image'].copy().reshape((100, 100, 1))
cv2.imwrite(f'{output_folder}/sim_eval_images/cnn/{str(path_length).zfill(3)}.png', data*255)
o, r, d, env_info = env.step(delta[:3])
if d:
got_done = True
reached_goal = np.linalg.norm(o['achieved_goal']-o['desired_goal'])
print(path_length, "Reached goal", reached_goal, "Done", got_done)
if reached_goal < smallest_reached_goal:
smallest_reached_goal = reached_goal
delta = env.get_ee_position_W() - trajectory_log[-1][9:12]
velocity = delta / (env_timestep*new_action_every_ctrl_step)
acceleration = (velocity - trajectory_log[-1][15:18]) / (env_timestep*new_action_every_ctrl_step)
trajectory_log.append(np.concatenate([env.desired_pos_step_W, env.desired_pos_ctrl_W, env.get_ee_position_I(), env.get_ee_position_W(), delta, velocity, acceleration]))
np.savetxt(f"{output_folder}/sim_trajectory.csv",
trajectory_log, delimiter=",", fmt='%f')
return got_done, smallest_reached_goal, reached_goal
def main(variant, input_folder, base_output_folder):
deltas = np.genfromtxt(f"{input_folder}/executable_deltas.csv", delimiter=',')[20:]
variant['env_kwargs']['output_max'] = 1
del variant['env_kwargs']['timestep']
joint_solimp_low_range = np.linspace(0.98, 0.9999, 10)
joint_solimp_high_range = np.linspace(0.99, 0.9999, 10)
joint_solimp_width_range = np.linspace(0.001, 0.03, 10)
joint_solref_timeconst_range = np.linspace(0.005, 0.05, 10)
joint_solref_dampratio_range = np.linspace(0.98, 1.02, 10)
tendon_shear_solimp_low_range = np.linspace(0.98, 0.9999, 10)
tendon_shear_solimp_high_range = np.linspace(0.99, 0.9999, 10)
tendon_shear_solimp_width_range = np.linspace(0.001, 0.03, 10)
tendon_shear_solref_timeconst_range = np.linspace(0.005, 0.05, 10)
tendon_shear_solref_dampratio_range = np.linspace(0.98, 1.02, 10)
tendon_main_solimp_low_range = np.linspace(0.98, 0.9999, 10)
tendon_main_solimp_high_range = np.linspace(0.99, 0.9999, 10)
tendon_main_solimp_width_range = np.linspace(0.001, 0.03, 10)
tendon_main_solref_timeconst_range = np.linspace(0.005, 0.05, 10)
tendon_main_solref_dampratio_range = np.linspace(0.98, 1.02, 10)
geom_solimp_low_range = np.linspace(0.98, 0.9999, 10)
geom_solimp_high_range = np.linspace(0.99, 0.9999, 10)
geom_solimp_width_range = np.linspace(0.001, 0.03, 10)
geom_solref_timeconst_range = np.linspace(0.005, 0.05, 10)
geom_solref_dampratio_range = np.linspace(0.98, 1.02, 10)
grasp_solimp_low_range = np.linspace(0.98, 0.9999, 10)
grasp_solimp_high_range = np.linspace(0.99, 0.9999, 10)
grasp_solimp_width_range = np.linspace(0.001, 0.03, 10)
grasp_solref_timeconst_range = np.linspace(0.005, 0.05, 10)
grasp_solref_dampratio_range = np.linspace(0.98, 1.02, 10)
geom_size_range = np.linspace(0.008, 0.011, 10)
friction_range = np.linspace(0.05, 1, 10)
cone_type_range = ["pyramidal", "elliptic"]
stats_df = pd.DataFrame()
tests = 0
while True:
model_kwargs = dict(
joint_solimp_low = np.random.choice(joint_solimp_low_range),
joint_solimp_high = np.random.choice(joint_solimp_high_range),
joint_solimp_width = np.random.choice(joint_solimp_width_range),
joint_solref_timeconst = np.random.choice(joint_solref_timeconst_range),
joint_solref_dampratio = np.random.choice(joint_solref_dampratio_range),
tendon_shear_solimp_low = np.random.choice(tendon_shear_solimp_low_range),
tendon_shear_solimp_high = np.random.choice(tendon_shear_solimp_high_range),
tendon_shear_solimp_width = np.random.choice(tendon_shear_solimp_width_range),
tendon_shear_solref_timeconst = np.random.choice(tendon_shear_solref_timeconst_range),
tendon_shear_solref_dampratio =
|
np.random.choice(tendon_shear_solref_dampratio_range)
|
numpy.random.choice
|
from __future__ import print_function
###################################################################################################
# Libraries
###################################################################################################
import os
import numpy as np
from pysam import Samfile, Fastafile
from Bio import motifs
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from scipy.signal import savgol_filter
from scipy.stats import scoreatpercentile
from argparse import SUPPRESS
import pyx
# Internal
from rgt.Util import GenomeData, AuxiliaryFunctions
from rgt.HINT.signalProcessing import GenomicSignal
from rgt.GenomicRegionSet import GenomicRegionSet
from rgt.HINT.biasTable import BiasTable
def plotting_args(parser):
# Parameters Options
parser.add_argument("--organism", type=str, metavar="STRING", default="hg19",
help=("Organism considered on the analysis. Check our full documentation for all available "
"options. All default files such as genomes will be based on the chosen organism "
"and the data.config file."))
parser.add_argument("--reads-file", type=str, metavar="FILE", default=None)
parser.add_argument("--region-file", type=str, metavar="FILE", default=None)
parser.add_argument("--reads-file1", type=str, metavar="FILE", default=None)
parser.add_argument("--reads-file2", type=str, metavar="FILE", default=None)
parser.add_argument("--motif-file", type=str, metavar="FILE", default=None)
parser.add_argument("--bias-table", type=str, metavar="FILE1_F,FILE1_R", default=None)
parser.add_argument("--bias-table1", type=str, metavar="FILE1_F,FILE1_R", default=None)
parser.add_argument("--bias-table2", type=str, metavar="FILE1_F,FILE1_R", default=None)
parser.add_argument("--window-size", type=int, metavar="INT", default=400)
# Hidden Options
parser.add_argument("--initial-clip", type=int, metavar="INT", default=50, help=SUPPRESS)
parser.add_argument("--downstream-ext", type=int, metavar="INT", default=1, help=SUPPRESS)
parser.add_argument("--upstream-ext", type=int, metavar="INT", default=0, help=SUPPRESS)
parser.add_argument("--forward-shift", type=int, metavar="INT", default=5, help=SUPPRESS)
parser.add_argument("--reverse-shift", type=int, metavar="INT", default=-5, help=SUPPRESS)
parser.add_argument("--k-nb", type=int, metavar="INT", default=6, help=SUPPRESS)
parser.add_argument("--y-lim", type=float, metavar="FLOAT", default=0.3, help=SUPPRESS)
# Output Options
parser.add_argument("--output-location", type=str, metavar="PATH", default=os.getcwd(),
help="Path where the output bias table files will be written.")
parser.add_argument("--output-prefix", type=str, metavar="STRING", default=None,
help="The prefix for results files.")
# plot type
parser.add_argument("--seq-logo", default=False, action='store_true')
parser.add_argument("--bias-raw-bc-line", default=False, action='store_true')
parser.add_argument("--raw-bc-line", default=False, action='store_true')
parser.add_argument("--strand-line", default=False, action='store_true')
parser.add_argument("--unstrand-line", default=False, action='store_true')
parser.add_argument("--bias-line", default=False, action='store_true')
parser.add_argument("--atac-dnase-line", default=False, action='store_true')
parser.add_argument("--bias-raw-bc-strand-line2", default=False, action='store_true')
parser.add_argument("--fragment-raw-size-line", default=False, action='store_true')
parser.add_argument("--fragment-bc-size-line", default=False, action='store_true')
def plotting_run(args):
if args.seq_logo:
seq_logo(args)
if args.bias_raw_bc_line:
bias_raw_bc_strand_line(args)
if args.strand_line:
strand_line(args)
if args.unstrand_line:
unstrand_line(args)
if args.raw_bc_line:
raw_bc_line(args)
if args.bias_raw_bc_strand_line2:
bias_raw_bc_strand_line2(args)
if args.fragment_raw_size_line:
fragment_size_raw_line(args)
if args.fragment_bc_size_line:
fragment_size_bc_line(args)
def seq_logo(args):
logo_fname = os.path.join(args.output_location, "{}.logo.eps".format(args.output_prefix))
pwm_file = os.path.join(args.output_location, "{}.pwm".format(args.output_prefix))
pwm_dict = dict(
[("A", [0.0] * args.window_size), ("C", [0.0] * args.window_size), ("G", [0.0] * args.window_size),
("T", [0.0] * args.window_size), ("N", [0.0] * args.window_size)])
genome_data = GenomeData(args.organism)
fasta_file = Fastafile(genome_data.get_genome())
bam = Samfile(args.reads_file, "rb")
regions = GenomicRegionSet("Peaks")
regions.read(args.region_file)
for region in regions:
for r in bam.fetch(region.chrom, region.initial, region.final):
if not r.is_reverse:
cut_site = r.pos - 1
p1 = cut_site - int(args.window_size / 2)
else:
cut_site = r.aend + 1
p1 = cut_site - int(args.window_size / 2)
p2 = p1 + args.window_size
# Fetching k-mer
currStr = str(fasta_file.fetch(region.chrom, p1, p2)).upper()
if r.is_reverse: continue
for i in range(0, len(currStr)):
pwm_dict[currStr[i]][i] += 1
with open(pwm_file, "w") as f:
for e in ["A", "C", "G", "T"]:
f.write(" ".join([str(int(c)) for c in pwm_dict[e]]) + "\n")
pwm = motifs.read(open(pwm_file), "pfm")
pwm.weblogo(logo_fname, format="eps", stack_width="large", stacks_per_line=str(args.window_size),
color_scheme="color_classic", unit_name="", show_errorbars=False, logo_title="",
show_xaxis=False, xaxis_label="", show_yaxis=False, yaxis_label="",
show_fineprint=False, show_ends=False, yaxis_scale=args.y_lim)
start = -(args.window_size / 2)
end = (args.window_size / 2) - 1
x = np.linspace(start, end, num=args.window_size).tolist()
fig = plt.figure(figsize=(8, 2))
ax = fig.add_subplot(111)
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_position(('outward', 15))
ax.tick_params(direction='out')
ax.xaxis.set_ticks(map(int, x))
x1 = map(int, x)
ax.set_xticklabels(map(str, x1), rotation=90)
ax.set_xlabel("Coordinates from Read Start", fontweight='bold')
ax.set_ylim([0, args.y_lim])
ax.yaxis.set_ticks([0, args.y_lim])
ax.set_yticklabels([str(0), str(args.y_lim)], rotation=90)
ax.set_ylabel("bits", rotation=90)
figure_name = os.path.join(args.output_location, "{}.line.eps".format(args.output_prefix))
fig.tight_layout()
fig.savefig(figure_name, format="eps", dpi=300)
# Creating canvas and printing eps / pdf with merged results
output_fname = os.path.join(args.output_location, "{}.eps".format(args.output_prefix))
c = pyx.canvas.canvas()
c.insert(pyx.epsfile.epsfile(0, 0, figure_name, scale=1.0))
c.insert(pyx.epsfile.epsfile(1.5, 1.5, logo_fname, width=18.8, height=3.5))
c.writeEPSfile(output_fname)
os.system("epstopdf " + output_fname)
os.remove(os.path.join(args.output_location, "{}.line.eps".format(args.output_prefix)))
os.remove(os.path.join(args.output_location, "{}.logo.eps".format(args.output_prefix)))
os.remove(os.path.join(args.output_location, "{}.eps".format(args.output_prefix)))
def bias_raw_bc_line(args):
signal = GenomicSignal(args.reads_file)
signal.load_sg_coefs(slope_window_size=9)
bias_table = BiasTable()
bias_table_list = args.bias_table.split(",")
table = bias_table.load_table(table_file_name_F=bias_table_list[0],
table_file_name_R=bias_table_list[1])
genome_data = GenomeData(args.organism)
fasta = Fastafile(genome_data.get_genome())
pwm_dict = dict([("A", [0.0] * args.window_size), ("C", [0.0] * args.window_size),
("G", [0.0] * args.window_size), ("T", [0.0] * args.window_size),
("N", [0.0] * args.window_size)])
num_sites = 0
mpbs_regions = GenomicRegionSet("Motif Predicted Binding Sites")
mpbs_regions.read(args.motif_file)
bam = Samfile(args.reads_file, "rb")
mean_signal_bias_f = np.zeros(args.window_size)
mean_signal_bias_r = np.zeros(args.window_size)
mean_signal_raw = np.zeros(args.window_size)
mean_signal_raw_f = np.zeros(args.window_size)
mean_signal_raw_r = np.zeros(args.window_size)
mean_signal_bc = np.zeros(args.window_size)
mean_signal_bc_f = np.zeros(args.window_size)
mean_signal_bc_r = np.zeros(args.window_size)
motif_len = 0
for region in mpbs_regions:
if str(region.name).split(":")[-1] == "Y":
# Extend by window_size
mid = (region.initial + region.final) / 2
p1 = mid - (args.window_size / 2)
p2 = mid + (args.window_size / 2)
motif_len = region.final - region.initial
signal_bias_f, signal_bias_r, raw, raw_f, raw_r, bc, bc_f, bc_r = \
signal.get_bias_raw_bc_signal(ref=region.chrom, start=p1, end=p2, bam=bam,
fasta=fasta, bias_table=table,
forward_shift=args.forward_shift,
reverse_shift=args.reverse_shift,
strand=True)
num_sites += 1
mean_signal_bias_f = np.add(mean_signal_bias_f, np.array(signal_bias_f))
mean_signal_bias_r = np.add(mean_signal_bias_r, np.array(signal_bias_r))
mean_signal_raw = np.add(mean_signal_raw, np.array(raw))
mean_signal_raw_f = np.add(mean_signal_raw_f, np.array(raw_f))
mean_signal_raw_r = np.add(mean_signal_raw_r, np.array(raw_r))
mean_signal_bc = np.add(mean_signal_bc, np.array(bc))
mean_signal_bc_f = np.add(mean_signal_bc_f, np.array(bc_f))
mean_signal_bc_r = np.add(mean_signal_bc_r, np.array(bc_r))
# Update pwm
aux_plus = 1
dna_seq = str(fasta.fetch(region.chrom, p1, p2)).upper()
if (region.final - region.initial) % 2 == 0:
aux_plus = 0
if region.orientation == "+":
for i in range(len(dna_seq)):
pwm_dict[dna_seq[i]][i] += 1
mean_signal_bias_f = mean_signal_bias_f / num_sites
mean_signal_bias_r = mean_signal_bias_r / num_sites
mean_signal_raw = mean_signal_raw / num_sites
mean_signal_bc = mean_signal_bc / num_sites
mean_signal_bc_f = mean_signal_bc_f / num_sites
mean_signal_bc_r = mean_signal_bc_r / num_sites
# Output the norm and slope signal
output_fname = os.path.join(args.output_location, "{}.txt".format(args.output_prefix))
f = open(output_fname, "w")
f.write("\t".join((map(str, mean_signal_bias_f))) + "\n")
f.write("\t".join((map(str, mean_signal_bias_r))) + "\n")
f.write("\t".join((map(str, mean_signal_raw))) + "\n")
f.write("\t".join((map(str, mean_signal_bc))) + "\n")
f.close()
# Output PWM and create logo
pwm_fname = os.path.join(args.output_location, "{}.pwm".format(args.output_prefix))
pwm_file = open(pwm_fname, "w")
for e in ["A", "C", "G", "T"]:
pwm_file.write(" ".join([str(int(f)) for f in pwm_dict[e]]) + "\n")
pwm_file.close()
logo_fname = os.path.join(args.output_location, "{}.logo.eps".format(args.output_prefix))
pwm = motifs.read(open(pwm_fname), "pfm")
pwm.weblogo(logo_fname, format="eps", stack_width="large", stacks_per_line=str(args.window_size),
color_scheme="color_classic", unit_name="", show_errorbars=False, logo_title="",
show_xaxis=False, xaxis_label="", show_yaxis=False, yaxis_label="",
show_fineprint=False, show_ends=False)
fig, (ax1, ax2, ax3) = plt.subplots(3, figsize=(8, 6))
start = -(args.window_size / 2)
end = (args.window_size / 2) - 1
x = np.linspace(start, end, num=args.window_size)
if motif_len % 2 == 0:
x1 = int(- (motif_len / 2))
x2 = int(motif_len / 2)
else:
x1 = int(-(motif_len / 2) - 1)
x2 = int((motif_len / 2) + 1)
############################################################
# bias signal per strand
fp_score = sum(mean_signal_raw[args.window_size / 2 + x1: args.window_size / 2 + x2])
shoulder_l = sum(mean_signal_raw[args.window_size / 2 + x1 - motif_len:args.window_size / 2 + x1])
shoulder_r = sum(mean_signal_raw[args.window_size / 2 + x2:args.window_size / 2 + x2 + motif_len])
sfr = (shoulder_l + shoulder_r) / (2 * fp_score)
min_ax1 = min(mean_signal_raw)
max_ax1 = max(mean_signal_raw)
ax1.plot(x, mean_signal_raw, color='blue', label='Uncorrected')
ax1.text(0.15, 0.9, 'n = {}'.format(num_sites), verticalalignment='bottom',
horizontalalignment='right', transform=ax1.transAxes, fontweight='bold')
ax1.text(0.35, 0.15, 'SFR = {}'.format(round(sfr, 2)), verticalalignment='bottom',
horizontalalignment='right', transform=ax1.transAxes, fontweight='bold')
ax1.xaxis.set_ticks_position('bottom')
ax1.yaxis.set_ticks_position('left')
ax1.spines['top'].set_visible(False)
ax1.spines['right'].set_visible(False)
ax1.spines['left'].set_position(('outward', 15))
ax1.spines['bottom'].set_position(('outward', 5))
ax1.tick_params(direction='out')
ax1.set_xticks([start, 0, end])
ax1.set_xticklabels([str(start), 0, str(end)])
ax1.set_yticks([min_ax1, max_ax1])
ax1.set_yticklabels([str(round(min_ax1, 2)), str(round(max_ax1, 2))], rotation=90)
ax1.set_title(args.output_prefix, fontweight='bold')
ax1.set_xlim(start, end)
ax1.set_ylim([min_ax1, max_ax1])
ax1.legend(loc="lower right", frameon=False)
####################################################################
#####################################################################
# Bias corrected, non-bias corrected (not strand specific)
fp_score = sum(mean_signal_bc[args.window_size / 2 + x1: args.window_size / 2 + x2])
shoulder_l = sum(mean_signal_bc[args.window_size / 2 + x1 - motif_len:args.window_size / 2 + x1])
shoulder_r = sum(mean_signal_bc[args.window_size / 2 + x2:args.window_size / 2 + x2 + motif_len])
sfr = (shoulder_l + shoulder_r) / (2 * fp_score)
min_ax2 = min(mean_signal_bc)
max_ax2 = max(mean_signal_bc)
ax2.plot(x, mean_signal_bc, color='red', label='Corrected')
ax2.text(0.35, 0.15, 'SFR = {}'.format(round(sfr, 2)), verticalalignment='bottom',
horizontalalignment='right', transform=ax2.transAxes, fontweight='bold')
ax2.xaxis.set_ticks_position('bottom')
ax2.yaxis.set_ticks_position('left')
ax2.spines['top'].set_visible(False)
ax2.spines['right'].set_visible(False)
ax2.spines['left'].set_position(('outward', 15))
ax2.tick_params(direction='out')
ax2.set_xticks([start, 0, end])
ax2.set_xticklabels([str(start), 0, str(end)])
ax2.set_yticks([min_ax2, max_ax2])
ax2.set_yticklabels([str(round(min_ax2, 2)), str(round(max_ax2, 2))], rotation=90)
ax2.set_xlim(start, end)
ax2.set_ylim([min_ax2, max_ax2])
ax2.legend(loc="lower right", frameon=False)
fp_score_f = sum(mean_signal_bc_f[args.window_size / 2 + x1: args.window_size / 2 + x2])
shoulder_l_f = sum(mean_signal_bc_f[args.window_size / 2 + x1 - motif_len:args.window_size / 2 + x1])
shoulder_r_f = sum(mean_signal_bc_f[args.window_size / 2 + x2:args.window_size / 2 + x2 + motif_len])
sfr_f = (shoulder_l_f + shoulder_r_f) / (2 * fp_score_f)
fp_score_r = sum(mean_signal_bc_r[args.window_size / 2 + x1: args.window_size / 2 + x2])
shoulder_l_r = sum(mean_signal_bc_r[args.window_size / 2 + x1 - motif_len:args.window_size / 2 + x1])
shoulder_r_r = sum(mean_signal_bc_r[args.window_size / 2 + x2:args.window_size / 2 + x2 + motif_len])
sfr_r = (shoulder_l_r + shoulder_r_r) / (2 * fp_score_r)
min_ax3 = min(min(mean_signal_bc_f), min(mean_signal_bc_r))
max_ax3 = max(max(mean_signal_bc_f), max(mean_signal_bc_r))
ax3.plot(x, mean_signal_bc_f, color='purple', label='Forward')
ax3.plot(x, mean_signal_bc_r, color='green', label='Reverse')
ax3.text(0.35, 0.15, 'SFR_f = {}'.format(round(sfr_f, 2)), verticalalignment='bottom',
horizontalalignment='right', transform=ax3.transAxes, fontweight='bold')
ax3.text(0.35, 0.05, 'SFR_r = {}'.format(round(sfr_r, 2)), verticalalignment='bottom',
horizontalalignment='right', transform=ax3.transAxes, fontweight='bold')
ax3.xaxis.set_ticks_position('bottom')
ax3.yaxis.set_ticks_position('left')
ax3.spines['top'].set_visible(False)
ax3.spines['right'].set_visible(False)
ax3.spines['left'].set_position(('outward', 15))
ax3.tick_params(direction='out')
ax3.set_xticks([start, 0, end])
ax3.set_xticklabels([str(start), 0, str(end)])
ax3.set_yticks([min_ax3, max_ax3])
ax3.set_yticklabels([str(round(min_ax3, 2)), str(round(max_ax3, 2))], rotation=90)
ax3.set_xlim(start, end)
ax3.set_ylim([min_ax3, max_ax3])
ax3.legend(loc="lower right", frameon=False)
ax3.spines['bottom'].set_position(('outward', 40))
ax1.axvline(x=x1, ymin=-0.3, ymax=1, c="black", lw=0.5, ls='dashed', zorder=0, clip_on=False)
ax1.axvline(x=x2, ymin=-0.3, ymax=1, c="black", lw=0.5, ls='dashed', zorder=0, clip_on=False)
ax2.axvline(x=x1, ymin=-0.5, ymax=1.2, c="black", lw=0.5, ls='dashed', zorder=0, clip_on=False)
ax2.axvline(x=x2, ymin=-0.5, ymax=1.2, c="black", lw=0.5, ls='dashed', zorder=0, clip_on=False)
###############################################################################
# merge the above figures
figure_name = os.path.join(args.output_location, "{}.line.eps".format(args.output_prefix))
fig.subplots_adjust(bottom=.2, hspace=.5)
fig.tight_layout()
fig.savefig(figure_name, format="eps", dpi=300)
# Creating canvas and printing eps / pdf with merged results
output_fname = os.path.join(args.output_location, "{}.eps".format(args.output_prefix))
c = pyx.canvas.canvas()
c.insert(pyx.epsfile.epsfile(0, 0, figure_name, scale=1.0))
c.insert(pyx.epsfile.epsfile(1.45, 0.89, logo_fname, width=18.3, height=1.75))
c.writeEPSfile(output_fname)
os.system("epstopdf " + figure_name)
os.system("epstopdf " + logo_fname)
os.system("epstopdf " + output_fname)
os.remove(pwm_fname)
os.remove(os.path.join(args.output_location, "{}.line.eps".format(args.output_prefix)))
os.remove(os.path.join(args.output_location, "{}.logo.eps".format(args.output_prefix)))
os.remove(os.path.join(args.output_location, "{}.line.pdf".format(args.output_prefix)))
os.remove(os.path.join(args.output_location, "{}.logo.pdf".format(args.output_prefix)))
os.remove(os.path.join(args.output_location, "{}.eps".format(args.output_prefix)))
def raw_bc_line(args):
signal = GenomicSignal(args.reads_file)
signal.load_sg_coefs(slope_window_size=9)
bias_table = BiasTable()
bias_table_list = args.bias_table.split(",")
table = bias_table.load_table(table_file_name_F=bias_table_list[0],
table_file_name_R=bias_table_list[1])
genome_data = GenomeData(args.organism)
fasta = Fastafile(genome_data.get_genome())
pwm_dict = dict([("A", [0.0] * args.window_size), ("C", [0.0] * args.window_size),
("G", [0.0] * args.window_size), ("T", [0.0] * args.window_size),
("N", [0.0] * args.window_size)])
num_sites = 0
mpbs_regions = GenomicRegionSet("Motif Predicted Binding Sites")
mpbs_regions.read(args.motif_file)
bam = Samfile(args.reads_file, "rb")
mean_signal_raw = np.zeros(args.window_size)
mean_signal_bc = np.zeros(args.window_size)
for region in mpbs_regions:
if str(region.name).split(":")[-1] == "Y":
# Extend by window_size
mid = (region.initial + region.final) / 2
p1 = mid - (args.window_size / 2)
p2 = mid + (args.window_size / 2)
signal_bias_f, signal_bias_r, raw, raw_f, raw_r, bc, bc_f, bc_r = \
signal.get_bias_raw_bc_signal(ref=region.chrom, start=p1, end=p2, bam=bam,
fasta=fasta, bias_table=table,
forward_shift=args.forward_shift,
reverse_shift=args.reverse_shift,
strand=True)
num_sites += 1
mean_signal_raw = np.add(mean_signal_raw, np.array(raw))
mean_signal_bc = np.add(mean_signal_bc, np.array(bc))
# Update pwm
aux_plus = 1
dna_seq = str(fasta.fetch(region.chrom, p1, p2)).upper()
if (region.final - region.initial) % 2 == 0:
aux_plus = 0
if region.orientation == "+":
for i in range(len(dna_seq)):
pwm_dict[dna_seq[i]][i] += 1
mean_signal_raw = mean_signal_raw / num_sites
mean_signal_bc = mean_signal_bc / num_sites
# Output the norm and slope signal
output_fname = os.path.join(args.output_location, "{}.txt".format(args.output_prefix))
f = open(output_fname, "w")
f.write("\t".join((map(str, mean_signal_raw))) + "\n")
f.write("\t".join((map(str, mean_signal_bc))) + "\n")
f.close()
# Output PWM and create logo
pwm_fname = os.path.join(args.output_location, "{}.pwm".format(args.output_prefix))
pwm_file = open(pwm_fname, "w")
for e in ["A", "C", "G", "T"]:
pwm_file.write(" ".join([str(int(f)) for f in pwm_dict[e]]) + "\n")
pwm_file.close()
logo_fname = os.path.join(args.output_location, "{}.logo.eps".format(args.output_prefix))
pwm = motifs.read(open(pwm_fname), "pfm")
pwm.weblogo(logo_fname, format="eps", stack_width="large", stacks_per_line=str(args.window_size),
color_scheme="color_classic", unit_name="", show_errorbars=False, logo_title="",
show_xaxis=False, xaxis_label="", show_yaxis=False, yaxis_label="",
show_fineprint=False, show_ends=False)
fig = plt.figure(figsize=(8, 4))
ax = fig.add_subplot(111)
start = -(args.window_size / 2)
end = (args.window_size / 2) - 1
x = np.linspace(start, end, num=args.window_size)
############################################################
min_ = min(min(mean_signal_raw), min(mean_signal_bc))
max_ = max(max(mean_signal_raw), max(mean_signal_bc))
ax.plot(x, mean_signal_raw, color='red', label='Uncorrected')
ax.plot(x, mean_signal_bc, color='blue', label='Corrected')
ax.text(0.15, 0.9, 'n = {}'.format(num_sites), verticalalignment='bottom',
horizontalalignment='right', transform=ax.transAxes, fontweight='bold')
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_position(('outward', 15))
ax.spines['bottom'].set_position(('outward', 5))
ax.tick_params(direction='out')
ax.set_xticks([start, 0, end])
ax.set_xticklabels([str(start), 0, str(end)])
ax.set_yticks([min_, max_])
ax.set_yticklabels([str(round(min_, 2)), str(round(max_, 2))], rotation=90)
ax.set_title(args.output_prefix, fontweight='bold')
ax.set_xlim(start, end)
ax.set_ylim(min_, max_)
ax.legend(loc="lower right", frameon=False)
ax.spines['bottom'].set_position(('outward', 40))
###############################################################################
# merge the above figures
figure_name = os.path.join(args.output_location, "{}.line.eps".format(args.output_prefix))
fig.subplots_adjust(bottom=.2, hspace=.5)
fig.tight_layout()
fig.savefig(figure_name, format="eps", dpi=300)
# Creating canvas and printing eps / pdf with merged results
output_fname = os.path.join(args.output_location, "{}.eps".format(args.output_prefix))
c = pyx.canvas.canvas()
c.insert(pyx.epsfile.epsfile(0, 0, figure_name, scale=1.0))
c.insert(pyx.epsfile.epsfile(1.45, 0.89, logo_fname, width=18.3, height=1.75))
c.writeEPSfile(output_fname)
os.system("epstopdf " + figure_name)
os.system("epstopdf " + logo_fname)
os.system("epstopdf " + output_fname)
os.remove(pwm_fname)
os.remove(os.path.join(args.output_location, "{}.line.eps".format(args.output_prefix)))
os.remove(os.path.join(args.output_location, "{}.logo.eps".format(args.output_prefix)))
os.remove(os.path.join(args.output_location, "{}.line.pdf".format(args.output_prefix)))
os.remove(os.path.join(args.output_location, "{}.logo.pdf".format(args.output_prefix)))
os.remove(os.path.join(args.output_location, "{}.eps".format(args.output_prefix)))
def bias_raw_bc_strand_line(args):
signal = GenomicSignal(args.reads_file)
signal.load_sg_coefs(slope_window_size=9)
bias_table = BiasTable()
bias_table_list = args.bias_table.split(",")
table = bias_table.load_table(table_file_name_F=bias_table_list[0],
table_file_name_R=bias_table_list[1])
genome_data = GenomeData(args.organism)
fasta = Fastafile(genome_data.get_genome())
pwm_dict = dict([("A", [0.0] * args.window_size), ("C", [0.0] * args.window_size),
("G", [0.0] * args.window_size), ("T", [0.0] * args.window_size),
("N", [0.0] * args.window_size)])
num_sites = 0
mpbs_regions = GenomicRegionSet("Motif Predicted Binding Sites")
mpbs_regions.read(args.motif_file)
bam = Samfile(args.reads_file, "rb")
mean_signal_bias_f = np.zeros(args.window_size)
mean_signal_bias_r = np.zeros(args.window_size)
mean_signal_raw = np.zeros(args.window_size)
mean_signal_bc = np.zeros(args.window_size)
for region in mpbs_regions:
if str(region.name).split(":")[-1] == "Y":
mid = (region.initial + region.final) / 2
p1 = mid - (args.window_size / 2)
p2 = mid + (args.window_size / 2)
signal_bias_f, signal_bias_r, signal_raw, signal_bc = \
signal.get_bias_raw_bc_signal(ref=region.chrom, start=p1, end=p2, bam=bam,
fasta=fasta, bias_table=table,
forward_shift=args.forward_shift, reverse_shift=args.reverse_shift)
num_sites += 1
mean_signal_bias_f = np.add(mean_signal_bias_f, np.array(signal_bias_f))
mean_signal_bias_r = np.add(mean_signal_bias_r, np.array(signal_bias_r))
mean_signal_raw = np.add(mean_signal_raw, np.array(signal_raw))
mean_signal_bc = np.add(mean_signal_bc, np.array(signal_bc))
# Update pwm
aux_plus = 1
dna_seq = str(fasta.fetch(region.chrom, p1, p2)).upper()
if (region.final - region.initial) % 2 == 0:
aux_plus = 0
dna_seq_rev = AuxiliaryFunctions.revcomp(str(fasta.fetch(region.chrom,
p1 + aux_plus, p2 + aux_plus)).upper())
if region.orientation == "+":
for i in range(0, len(dna_seq)):
pwm_dict[dna_seq[i]][i] += 1
mean_signal_bias_f = mean_signal_bias_f / num_sites
mean_signal_bias_r = mean_signal_bias_r / num_sites
mean_signal_raw = mean_signal_raw / num_sites
mean_signal_bc = mean_signal_bc / num_sites
# Output the norm and slope signal
output_fname = os.path.join(args.output_location, "{}.txt".format(args.output_prefix))
f = open(output_fname, "w")
f.write("\t".join((map(str, mean_signal_bias_f))) + "\n")
f.write("\t".join((map(str, mean_signal_bias_r))) + "\n")
f.write("\t".join((map(str, mean_signal_raw))) + "\n")
f.write("\t".join((map(str, mean_signal_bc))) + "\n")
f.close()
# Output PWM and create logo
pwm_fname = os.path.join(args.output_location, "{}.pwm".format(args.output_prefix))
pwm_file = open(pwm_fname, "w")
for e in ["A", "C", "G", "T"]:
pwm_file.write(" ".join([str(int(f)) for f in pwm_dict[e]]) + "\n")
pwm_file.close()
logo_fname = os.path.join(args.output_location, "{}.logo.eps".format(args.output_prefix))
pwm = motifs.read(open(pwm_fname), "pfm")
pwm.weblogo(logo_fname, format="eps", stack_width="large", stacks_per_line=str(args.window_size),
color_scheme="color_classic", unit_name="", show_errorbars=False, logo_title="",
show_xaxis=False, xaxis_label="", show_yaxis=False, yaxis_label="",
show_fineprint=False, show_ends=False)
fig, (ax1, ax2) = plt.subplots(2, figsize=(8, 4))
start = -(args.window_size / 2)
end = (args.window_size / 2) - 1
x = np.linspace(start, end, num=args.window_size)
############################################################
# bias signal per strand
min_ = min(min(mean_signal_bias_f), min(mean_signal_bias_r))
max_ = max(max(mean_signal_bias_f), max(mean_signal_bias_r))
ax1.plot(x, mean_signal_bias_f, color='purple', label='Forward')
ax1.plot(x, mean_signal_bias_r, color='green', label='Reverse')
ax1.text(0.15, 0.9, 'n = {}'.format(num_sites), verticalalignment='bottom',
horizontalalignment='right', transform=ax1.transAxes, fontweight='bold')
ax1.xaxis.set_ticks_position('bottom')
ax1.yaxis.set_ticks_position('left')
ax1.spines['top'].set_visible(False)
ax1.spines['right'].set_visible(False)
ax1.spines['left'].set_position(('outward', 15))
ax1.spines['bottom'].set_position(('outward', 5))
ax1.tick_params(direction='out')
ax1.set_xticks([start, 0, end])
ax1.set_xticklabels([str(start), 0, str(end)])
ax1.set_yticks([min_, max_])
ax1.set_yticklabels([str(round(min_, 2)), str(round(max_, 2))], rotation=90)
ax1.set_title(args.output_prefix, fontweight='bold')
ax1.set_xlim(start, end)
ax1.set_ylim([min_, max_])
ax1.legend(loc="upper right", frameon=False)
####################################################################
#####################################################################
# Bias corrected, non-bias corrected (not strand specific)
min_ = min(min(mean_signal_raw), min(mean_signal_bc))
max_ = max(max(mean_signal_raw), max(mean_signal_bc))
ax2.plot(x, mean_signal_raw, color='blue', label='Uncorrected')
ax2.plot(x, mean_signal_bc, color='red', label='Corrected')
ax2.xaxis.set_ticks_position('bottom')
ax2.yaxis.set_ticks_position('left')
ax2.spines['top'].set_visible(False)
ax2.spines['right'].set_visible(False)
ax2.spines['left'].set_position(('outward', 15))
ax2.tick_params(direction='out')
ax2.set_xticks([start, 0, end])
ax2.set_xticklabels([str(start), 0, str(end)])
ax2.set_yticks([min_, max_])
ax2.set_yticklabels([str(round(min_, 2)), str(round(max_, 2))], rotation=90)
ax2.set_xlim(start, end)
ax2.set_ylim([min_, max_])
ax2.legend(loc="upper right", frameon=False)
ax2.spines['bottom'].set_position(('outward', 40))
###############################################################################
# merge the above figures
figure_name = os.path.join(args.output_location, "{}.line.eps".format(args.output_prefix))
fig.subplots_adjust(bottom=.2, hspace=.5)
fig.tight_layout()
fig.savefig(figure_name, format="eps", dpi=300)
# Creating canvas and printing eps / pdf with merged results
output_fname = os.path.join(args.output_location, "{}.eps".format(args.output_prefix))
c = pyx.canvas.canvas()
c.insert(pyx.epsfile.epsfile(0, 0, figure_name, scale=1.0))
c.insert(pyx.epsfile.epsfile(1.51, 0.89, logo_fname, width=18.3, height=1.75))
c.writeEPSfile(output_fname)
os.system("epstopdf " + figure_name)
os.system("epstopdf " + logo_fname)
os.system("epstopdf " + output_fname)
os.remove(pwm_fname)
os.remove(os.path.join(args.output_location, "{}.line.eps".format(args.output_prefix)))
os.remove(os.path.join(args.output_location, "{}.logo.eps".format(args.output_prefix)))
os.remove(os.path.join(args.output_location, "{}.line.pdf".format(args.output_prefix)))
os.remove(os.path.join(args.output_location, "{}.logo.pdf".format(args.output_prefix)))
os.remove(os.path.join(args.output_location, "{}.eps".format(args.output_prefix)))
def bias_raw_bc_strand_line2(args):
signal = GenomicSignal(args.reads_file)
signal.load_sg_coefs(slope_window_size=9)
bias_table = BiasTable()
bias_table_list = args.bias_table.split(",")
table = bias_table.load_table(table_file_name_F=bias_table_list[0],
table_file_name_R=bias_table_list[1])
genome_data = GenomeData(args.organism)
fasta = Fastafile(genome_data.get_genome())
pwm_dict = dict([("A", [0.0] * args.window_size), ("C", [0.0] * args.window_size),
("G", [0.0] * args.window_size), ("T", [0.0] * args.window_size),
("N", [0.0] * args.window_size)])
num_sites = 0
mpbs_regions = GenomicRegionSet("Motif Predicted Binding Sites")
mpbs_regions.read(args.motif_file)
bam = Samfile(args.reads_file, "rb")
mean_signal_bias_f = np.zeros(args.window_size)
mean_signal_bias_r = np.zeros(args.window_size)
mean_signal_raw = np.zeros(args.window_size)
mean_signal_raw_f = np.zeros(args.window_size)
mean_signal_raw_r = np.zeros(args.window_size)
mean_signal_bc = np.zeros(args.window_size)
mean_signal_bc_f = np.zeros(args.window_size)
mean_signal_bc_r = np.zeros(args.window_size)
for region in mpbs_regions:
if str(region.name).split(":")[-1] == "Y":
mid = (region.initial + region.final) / 2
p1 = mid - (args.window_size / 2)
p2 = mid + (args.window_size / 2)
signal_bias_f, signal_bias_r, signal_raw, signal_raw_f, signal_raw_r, signal_bc, signal_bc_f, signal_bc_r = \
signal.get_bias_raw_bc_signal(ref=region.chrom, start=p1, end=p2, bam=bam,
fasta=fasta, bias_table=table,
forward_shift=args.forward_shift,
reverse_shift=args.reverse_shift,
strand=True)
num_sites += 1
mean_signal_bias_f = np.add(mean_signal_bias_f, np.array(signal_bias_f))
mean_signal_bias_r = np.add(mean_signal_bias_r, np.array(signal_bias_r))
mean_signal_raw = np.add(mean_signal_raw, np.array(signal_raw))
mean_signal_raw_f = np.add(mean_signal_raw_f, np.array(signal_raw_f))
mean_signal_raw_r = np.add(mean_signal_raw_r, np.array(signal_raw_r))
mean_signal_bc = np.add(mean_signal_bc, np.array(signal_bc))
mean_signal_bc_f = np.add(mean_signal_bc_f, np.array(signal_bc_f))
mean_signal_bc_r = np.add(mean_signal_bc_r, np.array(signal_bc_r))
# Update pwm
aux_plus = 1
dna_seq = str(fasta.fetch(region.chrom, p1, p2)).upper()
if (region.final - region.initial) % 2 == 0:
aux_plus = 0
dna_seq_rev = AuxiliaryFunctions.revcomp(str(fasta.fetch(region.chrom,
p1 + aux_plus, p2 + aux_plus)).upper())
if region.orientation == "+":
for i in range(0, len(dna_seq)):
pwm_dict[dna_seq[i]][i] += 1
mean_signal_bias_f = mean_signal_bias_f / num_sites
mean_signal_bias_r = mean_signal_bias_r / num_sites
mean_signal_raw = mean_signal_raw / num_sites
mean_signal_raw_f = mean_signal_raw_f / num_sites
mean_signal_raw_r = mean_signal_raw_r / num_sites
mean_signal_bc = mean_signal_bc / num_sites
mean_signal_bc_f = mean_signal_bc_f / num_sites
mean_signal_bc_r = mean_signal_bc_r / num_sites
# mean_signal_raw = rescaling(mean_signal_raw)
# mean_signal_bc = rescaling(mean_signal_bc)
# Output the norm and slope signal
output_fname = os.path.join(args.output_location, "{}.txt".format(args.output_prefix))
f = open(output_fname, "w")
f.write("\t".join((map(str, mean_signal_bias_f))) + "\n")
f.write("\t".join((map(str, mean_signal_bias_r))) + "\n")
f.write("\t".join((map(str, mean_signal_raw))) + "\n")
f.write("\t".join((map(str, mean_signal_raw_f))) + "\n")
f.write("\t".join((map(str, mean_signal_raw_r))) + "\n")
f.write("\t".join((map(str, mean_signal_bc))) + "\n")
f.write("\t".join((map(str, mean_signal_bc_f))) + "\n")
f.write("\t".join((map(str, mean_signal_bc_r))) + "\n")
f.close()
# Output PWM and create logo
pwm_fname = os.path.join(args.output_location, "{}.pwm".format(args.output_prefix))
pwm_file = open(pwm_fname, "w")
for e in ["A", "C", "G", "T"]:
pwm_file.write(" ".join([str(int(f)) for f in pwm_dict[e]]) + "\n")
pwm_file.close()
logo_fname = os.path.join(args.output_location, "{}.logo.eps".format(args.output_prefix))
pwm = motifs.read(open(pwm_fname), "pfm")
pwm.weblogo(logo_fname, format="eps", stack_width="large", stacks_per_line=str(args.window_size),
color_scheme="color_classic", unit_name="", show_errorbars=False, logo_title="",
show_xaxis=False, xaxis_label="", show_yaxis=False, yaxis_label="",
show_fineprint=False, show_ends=False)
# fig, (ax1, ax2, ax3, ax4) = plt.subplots(4, figsize=(8, 8))
fig, (ax1, ax4) = plt.subplots(2, figsize=(8, 4))
start = -(args.window_size / 2)
end = (args.window_size / 2) - 1
x = np.linspace(start, end, num=args.window_size)
############################################################
# bias signal per strand
min_ = min(min(mean_signal_bias_f), min(mean_signal_bias_r))
max_ = max(max(mean_signal_bias_f), max(mean_signal_bias_r))
ax1.plot(x, mean_signal_bias_f, color='purple', label='Forward')
ax1.plot(x, mean_signal_bias_r, color='green', label='Reverse')
ax1.text(0.15, 0.9, 'n = {}'.format(num_sites), verticalalignment='bottom',
horizontalalignment='right', transform=ax1.transAxes, fontweight='bold')
ax1.xaxis.set_ticks_position('bottom')
ax1.yaxis.set_ticks_position('left')
ax1.spines['top'].set_visible(False)
ax1.spines['right'].set_visible(False)
ax1.spines['left'].set_position(('outward', 15))
ax1.spines['bottom'].set_position(('outward', 5))
ax1.tick_params(direction='out')
ax1.set_xticks([start, 0, end])
ax1.set_xticklabels([str(start), 0, str(end)])
ax1.set_yticks([min_, max_])
ax1.set_yticklabels([str(round(min_, 2)), str(round(max_, 2))], rotation=90)
ax1.set_title(args.output_prefix, fontweight='bold')
ax1.set_xlim(start, end)
ax1.set_ylim([min_, max_])
ax1.legend(loc="upper right", frameon=False)
####################################################################
#####################################################################
# Bias corrected, non-bias corrected (not strand specific)
# min_ = min(min(mean_signal_raw_f), min(mean_signal_raw_r))
# max_ = max(max(mean_signal_raw_f), max(mean_signal_raw_r))
# ax2.plot(x, mean_signal_raw_f, color='red', label='Forward')
# ax2.plot(x, mean_signal_raw_r, color='green', label='Reverse')
# ax2.xaxis.set_ticks_position('bottom')
# ax2.yaxis.set_ticks_position('left')
# ax2.spines['top'].set_visible(False)
# ax2.spines['right'].set_visible(False)
# ax2.spines['left'].set_position(('outward', 15))
# ax2.tick_params(direction='out')
# ax2.set_xticks([start, -1, 0, 1, end])
# ax2.set_xticklabels([str(start), -1, 0,1, str(end)])
# ax2.set_yticks([min_, max_])
# ax2.set_yticklabels([str(round(min_, 2)), str(round(max_, 2))], rotation=90)
# ax2.set_xlim(start, end)
# ax2.set_ylim([min_, max_])
# ax2.legend(loc="upper right", frameon=False)
#####################################################################
# Bias corrected and strand specific
# min_ = min(min(mean_signal_bc_f), min(mean_signal_bc_r))
# max_ = max(max(mean_signal_bc_f), max(mean_signal_bc_r))
# ax3.plot(x, mean_signal_bc_f, color='red', label='Forward')
# ax3.plot(x, mean_signal_bc_r, color='green', label='Reverse')
# ax3.xaxis.set_ticks_position('bottom')
# ax3.yaxis.set_ticks_position('left')
# ax3.spines['top'].set_visible(False)
# ax3.spines['right'].set_visible(False)
# ax3.spines['left'].set_position(('outward', 15))
# ax3.tick_params(direction='out')
# ax3.set_xticks([start, 0, end])
# ax3.set_xticklabels([str(start), 0, str(end)])
# ax3.set_yticks([min_, max_])
# ax3.set_yticklabels([str(round(min_, 2)), str(round(max_, 2))], rotation=90)
# ax3.set_xlim(start, end)
# ax3.set_ylim([min_, max_])
# ax3.legend(loc="upper right", frameon=False)
#####################################################################
# Bias corrected, non-bias corrected (not strand specific)
min_ = min(min(mean_signal_raw), min(mean_signal_bc))
max_ = max(max(mean_signal_raw), max(mean_signal_bc))
ax4.plot(x, mean_signal_raw, color='blue', label='Uncorrected')
ax4.plot(x, mean_signal_bc, color='red', label='Corrected')
ax4.xaxis.set_ticks_position('bottom')
ax4.yaxis.set_ticks_position('left')
ax4.spines['top'].set_visible(False)
ax4.spines['right'].set_visible(False)
ax4.spines['left'].set_position(('outward', 15))
ax4.tick_params(direction='out')
ax4.set_xticks([start, 0, end])
ax4.set_xticklabels([str(start), 0, str(end)])
ax4.set_yticks([min_, max_])
ax4.set_yticklabels([str(round(min_, 2)), str(round(max_, 2))], rotation=90)
ax4.set_xlim(start, end)
ax4.set_ylim([min_, max_])
ax4.legend(loc="upper right", frameon=False)
ax4.spines['bottom'].set_position(('outward', 40))
###############################################################################
# merge the above figures
figure_name = os.path.join(args.output_location, "{}.line.eps".format(args.output_prefix))
fig.subplots_adjust(bottom=.2, hspace=.5)
fig.tight_layout()
fig.savefig(figure_name, format="eps", dpi=300)
# Creating canvas and printing eps / pdf with merged results
output_fname = os.path.join(args.output_location, "{}.eps".format(args.output_prefix))
c = pyx.canvas.canvas()
c.insert(pyx.epsfile.epsfile(0, 0, figure_name, scale=1.0))
c.insert(pyx.epsfile.epsfile(1.45, 0.89, logo_fname, width=18.3, height=1.75))
c.writeEPSfile(output_fname)
os.system("epstopdf " + figure_name)
os.system("epstopdf " + logo_fname)
os.system("epstopdf " + output_fname)
os.remove(pwm_fname)
os.remove(os.path.join(args.output_location, "{}.line.eps".format(args.output_prefix)))
os.remove(os.path.join(args.output_location, "{}.logo.eps".format(args.output_prefix)))
os.remove(os.path.join(args.output_location, "{}.line.pdf".format(args.output_prefix)))
os.remove(os.path.join(args.output_location, "{}.logo.pdf".format(args.output_prefix)))
os.remove(os.path.join(args.output_location, "{}.eps".format(args.output_prefix)))
def strand_line(args):
genomic_signal = GenomicSignal(args.reads_file)
genomic_signal.load_sg_coefs(slope_window_size=9)
table = None
if args.bias_table is not None:
bias_table = BiasTable()
bias_table_list = args.bias_table.split(",")
table = bias_table.load_table(table_file_name_F=bias_table_list[0],
table_file_name_R=bias_table_list[1])
genome_data = GenomeData(args.organism)
fasta = Fastafile(genome_data.get_genome())
num_sites = 0
mpbs_regions = GenomicRegionSet("Motif Predicted Binding Sites")
mpbs_regions.read(args.motif_file)
bam = Samfile(args.reads_file, "rb")
mean_signal_f = np.zeros(args.window_size)
mean_signal_r = np.zeros(args.window_size)
pwm_dict = None
for region in mpbs_regions:
if str(region.name).split(":")[-1] == "Y":
# Extend by 50 bp
mid = (region.initial + region.final) / 2
p1 = mid - (args.window_size / 2)
p2 = mid + (args.window_size / 2)
if args.bias_table is not None:
signal_f, signal_r = genomic_signal.get_bc_signal_by_fragment_length(ref=region.chrom, start=p1,
end=p2, bam=bam, fasta=fasta,
bias_table=table,
forward_shift=args.forward_shift,
reverse_shift=args.reverse_shift)
else:
signal_f, signal_r = genomic_signal.get_raw_signal_by_fragment_length(ref=region.chrom, start=p1,
end=p2,
bam=bam,
forward_shift=args.forward_shift,
reverse_shift=args.reverse_shift)
num_sites += 1
mean_signal_f = np.add(mean_signal_f, signal_f)
mean_signal_r = np.add(mean_signal_r, signal_r)
# Update pwm
if pwm_dict is None:
pwm_dict = dict([("A", [0.0] * (p2 - p1)), ("C", [0.0] * (p2 - p1)),
("G", [0.0] * (p2 - p1)), ("T", [0.0] * (p2 - p1)),
("N", [0.0] * (p2 - p1))])
aux_plus = 1
dna_seq = str(fasta.fetch(region.chrom, p1, p2)).upper()
if (region.final - region.initial) % 2 == 0:
aux_plus = 0
dna_seq_rev = AuxiliaryFunctions.revcomp(str(fasta.fetch(region.chrom,
p1 + aux_plus, p2 + aux_plus)).upper())
if region.orientation == "+":
for i in range(0, len(dna_seq)):
pwm_dict[dna_seq[i]][i] += 1
elif region.orientation == "-":
for i in range(0, len(dna_seq_rev)):
pwm_dict[dna_seq_rev[i]][i] += 1
mean_norm_signal_f = genomic_signal.boyle_norm(mean_signal_f)
perc = scoreatpercentile(mean_norm_signal_f, 98)
std = np.std(mean_norm_signal_f)
mean_norm_signal_f = genomic_signal.hon_norm_atac(mean_norm_signal_f, perc, std)
mean_norm_signal_r = genomic_signal.boyle_norm(mean_signal_r)
perc = scoreatpercentile(mean_norm_signal_r, 98)
std = np.std(mean_norm_signal_r)
mean_norm_signal_r = genomic_signal.hon_norm_atac(mean_norm_signal_r, perc, std)
mean_slope_signal_f = genomic_signal.slope(mean_norm_signal_f, genomic_signal.sg_coefs)
mean_slope_signal_r = genomic_signal.slope(mean_norm_signal_r, genomic_signal.sg_coefs)
# Output the norm and slope signal
output_fname = os.path.join(args.output_location, "{}.txt".format(args.output_prefix))
f = open(output_fname, "w")
f.write("\t".join((map(str, mean_norm_signal_f))) + "\n")
f.write("\t".join((map(str, mean_slope_signal_f))) + "\n")
f.write("\t".join((map(str, mean_norm_signal_r))) + "\n")
f.write("\t".join((map(str, mean_slope_signal_r))) + "\n")
f.close()
# Output PWM and create logo
pwm_fname = os.path.join(args.output_location, "{}.pwm".format(args.output_prefix))
pwm_file = open(pwm_fname, "w")
for e in ["A", "C", "G", "T"]:
pwm_file.write(" ".join([str(int(f)) for f in pwm_dict[e]]) + "\n")
pwm_file.close()
logo_fname = os.path.join(args.output_location, "{}.logo.eps".format(args.output_prefix))
pwm = motifs.read(open(pwm_fname), "pfm")
pwm.weblogo(logo_fname, format="eps", stack_width="large", stacks_per_line=str(args.window_size),
color_scheme="color_classic", unit_name="", show_errorbars=False, logo_title="",
show_xaxis=False, xaxis_label="", show_yaxis=False, yaxis_label="",
show_fineprint=False, show_ends=False)
start = -(args.window_size / 2)
end = (args.window_size / 2) - 1
x = np.linspace(start, end, num=args.window_size)
fig = plt.figure(figsize=(8, 4))
ax = fig.add_subplot(111)
min_signal = min(min(mean_signal_f), min(mean_signal_r))
max_signal = max(max(mean_signal_f), max(mean_signal_r))
ax.plot(x, mean_signal_f, color='red', label='Forward')
ax.plot(x, mean_signal_r, color='green', label='Reverse')
ax.set_title(args.output_prefix, fontweight='bold')
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_position(('outward', 15))
ax.tick_params(direction='out')
ax.set_xticks([start, 0, end])
ax.set_xticklabels([str(start), 0, str(end)])
ax.set_yticks([min_signal, max_signal])
ax.set_yticklabels([str(round(min_signal, 2)), str(round(max_signal, 2))], rotation=90)
ax.set_xlim(start, end)
ax.set_ylim([min_signal, max_signal])
ax.legend(loc="upper right", frameon=False)
ax.spines['bottom'].set_position(('outward', 40))
figure_name = os.path.join(args.output_location, "{}.line.eps".format(args.output_prefix))
fig.subplots_adjust(bottom=.2, hspace=.5)
fig.tight_layout()
fig.savefig(figure_name, format="eps", dpi=300)
# Creating canvas and printing eps / pdf with merged results
output_fname = os.path.join(args.output_location, "{}.eps".format(args.output_prefix))
c = pyx.canvas.canvas()
c.insert(pyx.epsfile.epsfile(0, 0, figure_name, scale=1.0))
c.insert(pyx.epsfile.epsfile(1.37, 0.89, logo_fname, width=18.5, height=1.75))
c.writeEPSfile(output_fname)
os.system("epstopdf " + figure_name)
os.system("epstopdf " + logo_fname)
os.system("epstopdf " + output_fname)
# os.remove(pwm_fname)
os.remove(os.path.join(args.output_location, "{}.line.eps".format(args.output_prefix)))
os.remove(os.path.join(args.output_location, "{}.logo.eps".format(args.output_prefix)))
os.remove(os.path.join(args.output_location, "{}.line.pdf".format(args.output_prefix)))
os.remove(os.path.join(args.output_location, "{}.logo.pdf".format(args.output_prefix)))
os.remove(os.path.join(args.output_location, "{}.eps".format(args.output_prefix)))
def unstrand_line(args):
genomic_signal = GenomicSignal(args.reads_file)
genomic_signal.load_sg_coefs(slope_window_size=9)
table = None
if args.bias_table is not None:
bias_table = BiasTable()
bias_table_list = args.bias_table.split(",")
table = bias_table.load_table(table_file_name_F=bias_table_list[0], table_file_name_R=bias_table_list[1])
genome_data = GenomeData(args.organism)
fasta = Fastafile(genome_data.get_genome())
num_sites = 0
mpbs_regions = GenomicRegionSet("Motif Predicted Binding Sites")
mpbs_regions.read(args.motif_file)
bam = Samfile(args.reads_file, "rb")
mean_signal = np.zeros(args.window_size)
pwm_dict = None
output_fname = os.path.join(args.output_location, "{}.txt".format(args.output_prefix))
with open(output_fname, "w") as output_f:
for region in mpbs_regions:
mid = (region.initial + region.final) / 2
p1 = mid - (args.window_size / 2)
p2 = mid + (args.window_size / 2)
if args.bias_table is not None:
signal = genomic_signal.get_bc_signal_by_fragment_length(ref=region.chrom, start=p1, end=p2,
bam=bam, fasta=fasta,
bias_table=table,
forward_shift=args.forward_shift,
reverse_shift=args.reverse_shift,
strand=False)
else:
signal = genomic_signal.get_raw_signal_by_fragment_length(ref=region.chrom, start=p1, end=p2,
bam=bam,
forward_shift=args.forward_shift,
reverse_shift=args.reverse_shift,
strand=False)
if region.orientation == "-":
signal = np.flip(signal)
name = "{}_{}_{}".format(region.chrom, str(region.initial), str(region.final))
output_f.write(name + "\t" + "\t".join(map(str, map(int, signal))) + "\n")
num_sites += 1
mean_signal = np.add(mean_signal, signal)
# Update pwm
if pwm_dict is None:
pwm_dict = dict([("A", [0.0] * (p2 - p1)), ("C", [0.0] * (p2 - p1)),
("G", [0.0] * (p2 - p1)), ("T", [0.0] * (p2 - p1)),
("N", [0.0] * (p2 - p1))])
aux_plus = 1
dna_seq = str(fasta.fetch(region.chrom, p1, p2)).upper()
if (region.final - region.initial) % 2 == 0:
aux_plus = 0
dna_seq_rev = AuxiliaryFunctions.revcomp(
str(fasta.fetch(region.chrom, p1 + aux_plus, p2 + aux_plus)).upper())
if region.orientation == "+":
for i in range(0, len(dna_seq)):
pwm_dict[dna_seq[i]][i] += 1
elif region.orientation == "-":
for i in range(0, len(dna_seq_rev)):
pwm_dict[dna_seq_rev[i]][i] += 1
mean_signal = mean_signal / num_sites
# Output PWM and create logo
pwm_fname = os.path.join(args.output_location, "{}.pwm".format(args.output_prefix))
pwm_file = open(pwm_fname, "w")
for e in ["A", "C", "G", "T"]:
pwm_file.write(" ".join([str(int(f)) for f in pwm_dict[e]]) + "\n")
pwm_file.close()
logo_fname = os.path.join(args.output_location, "{}.logo.eps".format(args.output_prefix))
pwm = motifs.read(open(pwm_fname), "pfm")
pwm.weblogo(logo_fname, format="eps", stack_width="large", stacks_per_line=str(args.window_size),
color_scheme="color_classic", unit_name="", show_errorbars=False, logo_title="",
show_xaxis=False, xaxis_label="", show_yaxis=False, yaxis_label="",
show_fineprint=False, show_ends=False)
start = -(args.window_size / 2)
end = (args.window_size / 2) - 1
x = np.linspace(start, end, num=args.window_size)
fig = plt.figure(figsize=(8, 4))
ax = fig.add_subplot(111)
min_signal = min(mean_signal)
max_signal = max(mean_signal)
ax.plot(x, mean_signal, color='red')
ax.set_title(args.output_prefix, fontweight='bold')
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_position(('outward', 15))
ax.tick_params(direction='out')
ax.set_xticks([start, 0, end])
ax.set_xticklabels([str(start), 0, str(end)])
ax.set_yticks([min_signal, max_signal])
ax.set_yticklabels([str(round(min_signal, 2)), str(round(max_signal, 2))], rotation=90)
ax.set_xlim(start, end)
ax.set_ylim([min_signal, max_signal])
ax.legend(loc="upper right", frameon=False)
ax.spines['bottom'].set_position(('outward', 40))
figure_name = os.path.join(args.output_location, "{}.line.eps".format(args.output_prefix))
fig.subplots_adjust(bottom=.2, hspace=.5)
fig.tight_layout()
fig.savefig(figure_name, format="eps", dpi=300)
# Creating canvas and printing eps / pdf with merged results
output_fname = os.path.join(args.output_location, "{}.eps".format(args.output_prefix))
c = pyx.canvas.canvas()
c.insert(pyx.epsfile.epsfile(0, 0, figure_name, scale=1.0))
c.insert(pyx.epsfile.epsfile(1.31, 0.89, logo_fname, width=18.5, height=1.75))
c.writeEPSfile(output_fname)
os.system("epstopdf " + figure_name)
os.system("epstopdf " + logo_fname)
os.system("epstopdf " + output_fname)
os.remove(pwm_fname)
os.remove(os.path.join(args.output_location, "{}.line.eps".format(args.output_prefix)))
os.remove(os.path.join(args.output_location, "{}.logo.eps".format(args.output_prefix)))
os.remove(os.path.join(args.output_location, "{}.line.pdf".format(args.output_prefix)))
# os.remove(os.path.join(args.output_location, "{}.logo.pdf".format(args.output_prefix)))
os.remove(os.path.join(args.output_location, "{}.eps".format(args.output_prefix)))
def fragment_size_raw_line(args):
mpbs_regions = GenomicRegionSet("Motif Predicted Binding Sites")
mpbs_regions.read(args.motif_file)
bam = Samfile(args.reads_file, "rb")
signal_f_max_145 = np.zeros(args.window_size)
signal_r_max_145 = np.zeros(args.window_size)
signal_f_146_307 = np.zeros(args.window_size)
signal_r_146_307 = np.zeros(args.window_size)
signal_f_min_307 = np.zeros(args.window_size)
signal_r_min_307 = np.zeros(args.window_size)
signal_f = np.zeros(args.window_size)
signal_r = np.zeros(args.window_size)
for region in mpbs_regions:
if str(region.name).split(":")[-1] == "Y":
# Extend by 50 bp
mid = (region.initial + region.final) / 2
p1 = mid - (args.window_size / 2)
p2 = mid + (args.window_size / 2)
# Fetch raw signal
for read in bam.fetch(region.chrom, p1, p2):
# All reads
if not read.is_reverse:
cut_site = read.pos + args.forward_shift
if p1 <= cut_site < p2:
signal_f[cut_site - p1] += 1.0
else:
cut_site = read.aend + args.reverse_shift - 1
if p1 <= cut_site < p2:
signal_r[cut_site - p1] += 1.0
# length <= 145
if abs(read.template_length) <= 145:
if not read.is_reverse:
cut_site = read.pos + args.forward_shift
if p1 <= cut_site < p2:
signal_f_max_145[cut_site - p1] += 1.0
else:
cut_site = read.aend + args.reverse_shift - 1
if p1 <= cut_site < p2:
signal_r_max_145[cut_site - p1] += 1.0
# length > 145 and <= 307
if 145 < abs(read.template_length) <= 307:
if not read.is_reverse:
cut_site = read.pos + args.forward_shift
if p1 <= cut_site < p2:
signal_f_146_307[cut_site - p1] += 1.0
else:
cut_site = read.aend + args.reverse_shift - 1
if p1 <= cut_site < p2:
signal_r_146_307[cut_site - p1] += 1.0
# length > 307
if abs(read.template_length) > 307:
if not read.is_reverse:
cut_site = read.pos + args.forward_shift
if p1 <= cut_site < p2:
signal_f_min_307[cut_site - p1] += 1.0
else:
cut_site = read.aend + args.reverse_shift - 1
if p1 <= cut_site < p2:
signal_r_min_307[cut_site - p1] += 1.0
# Output the norm and slope signal
output_fname = os.path.join(args.output_location, "{}.txt".format(args.output_prefix))
f = open(output_fname, "w")
f.write("\t".join((map(str, signal_f))) + "\n")
f.write("\t".join((map(str, signal_r))) + "\n")
f.write("\t".join((map(str, signal_f_max_145))) + "\n")
f.write("\t".join((map(str, signal_r_max_145))) + "\n")
f.write("\t".join((map(str, signal_f_146_307))) + "\n")
f.write("\t".join((map(str, signal_r_146_307))) + "\n")
f.write("\t".join((map(str, signal_f_min_307))) + "\n")
f.write("\t".join((map(str, signal_r_min_307))) + "\n")
f.close()
# find out the linker position
pos_f_1, pos_r_1, pos_f_2, pos_r_2 = get_linkers_position(signal_f_146_307,
signal_r_146_307,
signal_f_min_307,
signal_r_min_307)
p1 = (pos_f_1 - pos_f_2) / 2 + pos_f_2
p2 = p1 + 180
p3 = args.window_size - p2
p4 = args.window_size - p1
fig, (ax1, ax2, ax3, ax4) = plt.subplots(4, 1, figsize=(8, 8))
start = -(args.window_size / 2)
end = (args.window_size / 2) - 1
x = np.linspace(start, end, num=args.window_size)
x_ticks = [start, p1 - 500, p2 - 500, 0, p3 - 500, p4 - 500, end]
update_axes_for_fragment_size_line(ax1, x, x_ticks, start, end, signal_f, signal_r, p1, p2,
p3, p4)
update_axes_for_fragment_size_line(ax2, x, x_ticks, start, end, signal_f_max_145, signal_r_max_145, p1, p2,
p3, p4)
update_axes_for_fragment_size_line(ax3, x, x_ticks, start, end, signal_f_146_307, signal_r_146_307, p1, p2,
p3, p4)
update_axes_for_fragment_size_line(ax4, x, x_ticks, start, end, signal_f_min_307, signal_r_min_307, p1, p2,
p3, p4)
figure_name = os.path.join(args.output_location, "{}.pdf".format(args.output_prefix))
fig.subplots_adjust(bottom=.2, hspace=.5)
fig.tight_layout()
fig.savefig(figure_name, format="pdf", dpi=300)
def fragment_size_bc_line(args):
mpbs_regions = GenomicRegionSet("Motif Predicted Binding Sites")
mpbs_regions.read(args.motif_file)
genomic_signal = GenomicSignal(args.reads_file)
genomic_signal.load_sg_coefs(11)
bam = Samfile(args.reads_file, "rb")
genome_data = GenomeData(args.organism)
fasta = Fastafile(genome_data.get_genome())
bias_table = BiasTable()
bias_table_list = args.bias_table.split(",")
table = bias_table.load_table(table_file_name_F=bias_table_list[0],
table_file_name_R=bias_table_list[1])
signal_f_max_145 = np.zeros(args.window_size)
signal_r_max_145 = np.zeros(args.window_size)
signal_f_146_307 = np.zeros(args.window_size)
signal_r_146_307 = np.zeros(args.window_size)
signal_f_min_307 = np.zeros(args.window_size)
signal_r_min_307 = np.zeros(args.window_size)
signal_f = np.zeros(args.window_size)
signal_r = np.zeros(args.window_size)
for region in mpbs_regions:
if str(region.name).split(":")[-1] == "Y":
mid = (region.initial + region.final) / 2
p1 = mid - (args.window_size / 2)
p2 = mid + (args.window_size / 2)
# All reads
signal_bc_f, signal_bc_r = \
genomic_signal.get_bc_signal_by_fragment_length(ref=region.chrom, start=p1, end=p2,
bam=bam, fasta=fasta,
bias_table=table,
forward_shift=args.forward_shift,
reverse_shift=args.reverse_shift,
min_length=None, max_length=None,
strand=True)
# length <= 145
signal_bc_max_145_f, signal_bc_max_145_r = \
genomic_signal.get_bc_signal_by_fragment_length(ref=region.chrom, start=p1, end=p2,
bam=bam, fasta=fasta,
bias_table=table,
forward_shift=args.forward_shift,
reverse_shift=args.reverse_shift,
min_length=None, max_length=145,
strand=True)
# length > 145 and <= 307
signal_bc_146_307_f, signal_bc_146_307_r = \
genomic_signal.get_bc_signal_by_fragment_length(ref=region.chrom, start=p1, end=p2,
bam=bam, fasta=fasta,
bias_table=table,
forward_shift=args.forward_shift,
reverse_shift=args.reverse_shift,
min_length=145, max_length=307,
strand=True)
# length > 307
signal_bc_min_307_f, signal_bc_min_307_r = \
genomic_signal.get_bc_signal_by_fragment_length(ref=region.chrom, start=p1, end=p2,
bam=bam, fasta=fasta,
bias_table=table,
forward_shift=args.forward_shift,
reverse_shift=args.reverse_shift,
min_length=307, max_length=None,
strand=True)
signal_f = np.add(signal_f, np.array(signal_bc_f))
signal_r = np.add(signal_r, np.array(signal_bc_r))
signal_f_max_145 = np.add(signal_f_max_145, np.array(signal_bc_max_145_f))
signal_r_max_145 = np.add(signal_r_max_145, np.array(signal_bc_max_145_r))
signal_f_146_307 = np.add(signal_f_146_307, np.array(signal_bc_146_307_f))
signal_r_146_307 = np.add(signal_r_146_307, np.array(signal_bc_146_307_r))
signal_f_min_307 = np.add(signal_f_min_307, np.array(signal_bc_min_307_f))
signal_r_min_307 = np.add(signal_r_min_307, np.array(signal_bc_min_307_r))
# Output the norm and slope signal
output_fname = os.path.join(args.output_location, "{}.txt".format(args.output_prefix))
f = open(output_fname, "w")
f.write("\t".join((map(str, signal_f))) + "\n")
f.write("\t".join((map(str, signal_r))) + "\n")
f.write("\t".join((map(str, signal_f_max_145))) + "\n")
f.write("\t".join((map(str, signal_r_max_145))) + "\n")
f.write("\t".join((map(str, signal_f_146_307))) + "\n")
f.write("\t".join((map(str, signal_r_146_307))) + "\n")
f.write("\t".join((map(str, signal_f_min_307))) + "\n")
f.write("\t".join((map(str, signal_r_min_307))) + "\n")
f.close()
# find out the linker position
pos_f_1, pos_r_1, pos_f_2, pos_r_2 = get_linkers_position(signal_f_146_307,
signal_r_146_307,
signal_f_min_307,
signal_r_min_307)
p1 = (pos_f_1 - pos_f_2) / 2 + pos_f_2
p2 = p1 + 180
p3 = args.window_size - p2
p4 = args.window_size - p1
fig, (ax1, ax2, ax3, ax4) = plt.subplots(4, 1, figsize=(8, 8))
start = -(args.window_size / 2)
end = (args.window_size / 2) - 1
x = np.linspace(start, end, num=args.window_size)
x_ticks = [start, p1 - 500, p2 - 500, 0, p3 - 500, p4 - 500, end]
update_axes_for_fragment_size_line(ax1, x, x_ticks, start, end, signal_f, signal_r, p1, p2, p3, p4)
update_axes_for_fragment_size_line(ax2, x, x_ticks, start, end, signal_f_max_145, signal_r_max_145, p1, p2,
p3, p4)
update_axes_for_fragment_size_line(ax3, x, x_ticks, start, end, signal_f_146_307, signal_r_146_307, p1, p2,
p3, p4)
update_axes_for_fragment_size_line(ax4, x, x_ticks, start, end, signal_f_min_307, signal_r_min_307, p1, p2,
p3, p4)
figure_name = os.path.join(args.output_location, "{}.pdf".format(args.output_prefix))
fig.subplots_adjust(bottom=.2, hspace=.5)
fig.tight_layout()
fig.savefig(figure_name, format="pdf", dpi=300)
def update_axes_for_fragment_size_line(ax, x, x_ticks, start, end, signal_f, signal_r, p1, p2, p3, p4):
max_signal = max(max(signal_f), max(signal_r))
min_signal = min(min(signal_f), min(signal_r))
ax.plot(x, signal_f, color='red', label='Forward')
ax.plot(x, signal_r, color='green', label='Reverse')
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_position(('outward', 15))
ax.tick_params(direction='out')
ax.set_xticks(x_ticks)
ax.set_xticklabels(map(str, x_ticks))
ax.set_xlim(start, end)
ax.set_yticks([min_signal, max_signal])
ax.set_yticklabels([str(int(min_signal)), str(int(max_signal))], rotation=90)
ax.set_ylim([min_signal, max_signal])
ax.legend().set_visible(False)
f_1, r_1 = sum(signal_f[:p1]) / sum(signal_r), sum(signal_r[:p1]) / sum(signal_r)
f_2, r_2 = sum(signal_f[p1:p2]) / sum(signal_r), sum(signal_r[p1:p2]) / sum(signal_r)
f_3, r_3 = sum(signal_f[p2:500]) / sum(signal_r), sum(signal_r[p2:500]) / sum(signal_r)
f_4, r_4 = sum(signal_f[500:p3]) / sum(signal_r), sum(signal_r[500:p3]) / sum(signal_r)
f_5, r_5 = sum(signal_f[p3:p4]) / sum(signal_r), sum(signal_r[p3:p4]) / sum(signal_r)
f_6, r_6 = sum(signal_f[p4:]) / sum(signal_r), sum(signal_r[p4:]) / sum(signal_r)
text_x_1 = ((p1 - 0) / 2.0 + 0) / 1000
text_x_2 = ((p2 - p1) / 2.0 + p1) / 1000
text_x_3 = ((500 - p2) / 2.0 + p2) / 1000
text_x_4 = ((p3 - 500) / 2.0 + 500) / 1000
text_x_5 = ((p4 - p3) / 2.0 + p3) / 1000
text_x_6 = ((1000 - p4) / 2.0 + p4) / 1000
ax.text(text_x_1, 1.0, str(round(f_1, 2)), verticalalignment='center', color='red',
horizontalalignment='center', transform=ax.transAxes, fontsize=12)
ax.text(text_x_1, 0.9, str(round(r_1, 2)), verticalalignment='center', color='green',
horizontalalignment='center', transform=ax.transAxes, fontsize=12)
ax.text(text_x_2, 1.0, str(round(f_2, 2)), verticalalignment='center', color='red',
horizontalalignment='center', transform=ax.transAxes, fontsize=12)
ax.text(text_x_2, 0.9, str(round(r_2, 2)), verticalalignment='center', color='green',
horizontalalignment='center', transform=ax.transAxes, fontsize=12)
ax.text(text_x_3, 1.0, str(round(f_3, 2)), verticalalignment='center', color='red',
horizontalalignment='center', transform=ax.transAxes, fontsize=12)
ax.text(text_x_3, 0.9, str(round(r_3, 2)), verticalalignment='center', color='green',
horizontalalignment='center', transform=ax.transAxes, fontsize=12)
ax.text(text_x_4, 1.0, str(round(f_4, 2)), verticalalignment='center', color='red',
horizontalalignment='center', transform=ax.transAxes, fontsize=12)
ax.text(text_x_4, 0.9, str(round(r_4, 2)), verticalalignment='center', color='green',
horizontalalignment='center', transform=ax.transAxes, fontsize=12)
ax.text(text_x_5, 1.0, str(round(f_5, 2)), verticalalignment='center', color='red',
horizontalalignment='center', transform=ax.transAxes, fontsize=12)
ax.text(text_x_5, 0.9, str(round(r_5, 2)), verticalalignment='center', color='green',
horizontalalignment='center', transform=ax.transAxes, fontsize=12)
ax.text(text_x_6, 1.0, str(round(f_6, 2)), verticalalignment='center', color='red',
horizontalalignment='center', transform=ax.transAxes, fontsize=12)
ax.text(text_x_6, 0.9, str(round(r_6, 2)), verticalalignment='center', color='green',
horizontalalignment='center', transform=ax.transAxes, fontsize=12)
def get_linkers_position(signal_f_146_307, signal_r_146_307, signal_f_min_307, signal_r_min_307):
smooth_signal_f_146_307 = savgol_filter(signal_f_146_307, window_length=51, polyorder=2)
smooth_signal_r_146_307 = savgol_filter(signal_r_146_307, window_length=51, polyorder=2)
smooth_signal_f_min_307 = savgol_filter(signal_f_min_307, window_length=51, polyorder=2)
smooth_signal_r_min_307 = savgol_filter(signal_r_min_307, window_length=51, polyorder=2)
position_f_1 = np.argmax(smooth_signal_f_146_307[:400])
position_f_2 = np.argmax(smooth_signal_f_min_307[:position_f_1])
position_r_1 = np.argmax(smooth_signal_r_146_307[600:]) + 600
position_r_2 = np.argmax(smooth_signal_r_min_307[position_r_1:]) + position_r_1
return position_f_1, position_r_1, position_f_2, position_r_2
def rescaling(vector):
maxN = max(vector)
minN = min(vector)
return [(e - minN) / (maxN - minN) for e in vector]
class Plot:
def __init__(self, organism, reads_file, motif_file, window_size,
downstream_ext, upstream_ext, forward_shift, reverse_shift,
initial_clip, bias_table, k_nb, output_loc, output_prefix):
self.organism = organism
self.reads_file = reads_file
self.motif_file = motif_file
self.window_size = window_size
self.downstream_ext = downstream_ext
self.upstream_ext = upstream_ext
self.forward_shift = forward_shift
self.reverse_shift = reverse_shift
self.initial_clip = initial_clip
self.bias_table = bias_table
self.k_nb = k_nb
self.output_loc = output_loc
self.output_prefix = output_prefix
def line3(self, bias_table1, bias_table2):
signal = GenomicSignal(self.reads_file)
signal.load_sg_coefs(slope_window_size=9)
bias_table = BiasTable()
bias_table_list = bias_table1.split(",")
table1 = bias_table.load_table(table_file_name_F=bias_table_list[0],
table_file_name_R=bias_table_list[1])
bias_table = BiasTable()
bias_table_list = bias_table2.split(",")
table2 = bias_table.load_table(table_file_name_F=bias_table_list[0],
table_file_name_R=bias_table_list[1])
genome_data = GenomeData(self.organism)
fasta = Fastafile(genome_data.get_genome())
pwm_dict = dict([("A", [0.0] * self.window_size), ("C", [0.0] * self.window_size),
("G", [0.0] * self.window_size), ("T", [0.0] * self.window_size),
("N", [0.0] * self.window_size)])
num_sites = 0
mpbs_regions = GenomicRegionSet("Motif Predicted Binding Sites")
mpbs_regions.read(self.motif_file)
bam = Samfile(self.reads_file, "rb")
mean_signal_raw = np.zeros(self.window_size)
mean_signal_bc1 = np.zeros(self.window_size)
mean_signal_bc2 = np.zeros(self.window_size)
for region in mpbs_regions:
if str(region.name).split(":")[-1] == "Y":
# Extend by window_size
mid = (region.initial + region.final) / 2
p1 = mid - (self.window_size / 2)
p2 = mid + (self.window_size / 2)
signal_raw, signal_bc1 = \
self.get_signal3(ref=region.chrom, start=p1, end=p2, bam=bam, fasta=fasta, bias_table=table1)
signal_raw, signal_bc2 = \
self.get_signal3(ref=region.chrom, start=p1, end=p2, bam=bam, fasta=fasta, bias_table=table2)
num_sites += 1
mean_signal_raw = np.add(mean_signal_raw, np.array(signal_raw))
mean_signal_bc1 = np.add(mean_signal_bc1, np.array(signal_bc1))
mean_signal_bc2 = np.add(mean_signal_bc2, np.array(signal_bc2))
# Update pwm
aux_plus = 1
dna_seq = str(fasta.fetch(region.chrom, p1, p2)).upper()
if (region.final - region.initial) % 2 == 0:
aux_plus = 0
dna_seq_rev = AuxiliaryFunctions.revcomp(
str(fasta.fetch(region.chrom, p1 + aux_plus, p2 + aux_plus)).upper())
if region.orientation == "+":
for i in range(0, len(dna_seq)):
pwm_dict[dna_seq[i]][i] += 1
elif region.orientation == "-":
for i in range(0, len(dna_seq_rev)):
pwm_dict[dna_seq_rev[i]][i] += 1
mean_signal_raw = mean_signal_raw / num_sites
mean_signal_bc1 = mean_signal_bc1 / num_sites
mean_signal_bc2 = mean_signal_bc2 / num_sites
mean_signal_raw = self.rescaling(mean_signal_raw)
mean_signal_bc1 = self.rescaling(mean_signal_bc1)
mean_signal_bc2 = self.rescaling(mean_signal_bc2)
# Output the norm and slope signal
output_fname = os.path.join(self.output_loc, "{}.txt".format(self.output_prefix))
f = open(output_fname, "w")
f.write("\t".join((map(str, mean_signal_raw))) + "\n")
f.write("\t".join((map(str, mean_signal_bc1))) + "\n")
f.write("\t".join((map(str, mean_signal_bc2))) + "\n")
f.close()
# Output PWM and create logo
pwm_fname = os.path.join(self.output_loc, "{}.pwm".format(self.output_prefix))
pwm_file = open(pwm_fname, "w")
for e in ["A", "C", "G", "T"]:
pwm_file.write(" ".join([str(int(f)) for f in pwm_dict[e]]) + "\n")
pwm_file.close()
logo_fname = os.path.join(self.output_loc, "{}.logo.eps".format(self.output_prefix))
pwm = motifs.read(open(pwm_fname), "pfm")
pwm.weblogo(logo_fname, format="eps", stack_width="large", stacks_per_line=str(self.window_size),
color_scheme="color_classic", unit_name="", show_errorbars=False, logo_title="",
show_xaxis=False, xaxis_label="", show_yaxis=False, yaxis_label="",
show_fineprint=False, show_ends=False)
fig, (ax1, ax2) = plt.subplots(2)
start = -(self.window_size / 2)
end = (self.window_size / 2) - 1
x = np.linspace(start, end, num=self.window_size)
############################################################
# bias signal per strand
min_ = min(mean_signal_raw)
max_ = max(mean_signal_raw)
ax1.plot(x, mean_signal_raw, color='red')
ax1.xaxis.set_ticks_position('bottom')
ax1.yaxis.set_ticks_position('left')
ax1.spines['top'].set_visible(False)
ax1.spines['right'].set_visible(False)
ax1.spines['left'].set_position(('outward', 15))
ax1.spines['bottom'].set_position(('outward', 5))
ax1.tick_params(direction='out')
ax1.set_xticks([start, 0, end])
ax1.set_xticklabels([str(start), 0, str(end)])
ax1.set_yticks([min_, max_])
ax1.set_yticklabels([str(round(min_, 2)), str(round(max_, 2))], rotation=90)
ax1.set_title(self.output_prefix, fontweight='bold')
ax1.set_xlim(start, end)
ax1.set_ylim([min_, max_])
ax1.legend(loc="upper right", frameon=False)
ax1.set_ylabel("Raw Signal", rotation=90, fontweight='bold')
####################################################################
#####################################################################
# Bias corrected, non-bias corrected (not strand specific)
min_ = min(min(mean_signal_bc1), min(mean_signal_bc2))
max_ = max(max(mean_signal_bc1), max(mean_signal_bc2))
ax2.plot(x, mean_signal_bc1, color='red', label='VOM_8_NonNaked')
ax2.plot(x, mean_signal_bc2, color='green', label='KMER_6_NonNaked')
ax2.xaxis.set_ticks_position('bottom')
ax2.yaxis.set_ticks_position('left')
ax2.spines['top'].set_visible(False)
ax2.spines['right'].set_visible(False)
ax2.spines['left'].set_position(('outward', 15))
ax2.tick_params(direction='out')
ax2.set_xticks([start, 0, end])
ax2.set_xticklabels([str(start), 0, str(end)])
ax2.set_yticks([min_, max_])
ax2.set_yticklabels([str(round(min_, 2)), str(round(max_, 2))], rotation=90)
ax2.set_xlim(start, end)
ax2.set_ylim([min_, max_])
ax2.legend(loc="lower right", frameon=False)
ax2.spines['bottom'].set_position(('outward', 40))
ax2.set_xlabel("Coordinates from Motif Center", fontweight='bold')
ax2.set_ylabel("Bias Corrected Signal", rotation=90, fontweight='bold')
###################################################################################
###############################################################################
# merge the above figures
figure_name = os.path.join(self.output_loc, "{}.line.eps".format(self.output_prefix))
fig.subplots_adjust(bottom=.2, hspace=.5)
fig.tight_layout()
fig.savefig(figure_name, format="eps", dpi=300)
# Creating canvas and printing eps / pdf with merged results
output_fname = os.path.join(self.output_loc, "{}.eps".format(self.output_prefix))
c = pyx.canvas.canvas()
c.insert(pyx.epsfile.epsfile(0, 0, figure_name, scale=1.0))
c.insert(pyx.epsfile.epsfile(2.0, 1.39, logo_fname, width=13.8, height=1.75))
c.writeEPSfile(output_fname)
os.system("epstopdf " + figure_name)
os.system("epstopdf " + logo_fname)
os.system("epstopdf " + output_fname)
os.remove(pwm_fname)
os.remove(os.path.join(self.output_loc, "{}.line.eps".format(self.output_prefix)))
os.remove(os.path.join(self.output_loc, "{}.logo.eps".format(self.output_prefix)))
os.remove(os.path.join(self.output_loc, "{}.line.pdf".format(self.output_prefix)))
os.remove(os.path.join(self.output_loc, "{}.logo.pdf".format(self.output_prefix)))
os.remove(os.path.join(self.output_loc, "{}.eps".format(self.output_prefix)))
def line4(self):
genome_data = GenomeData(self.organism)
fasta = Fastafile(genome_data.get_genome())
mpbs_regions = GenomicRegionSet("Motif Predicted Binding Sites")
mpbs_regions.read(self.motif_file)
bam = Samfile(self.reads_file, "rb")
pwm_dict = None
signal_raw_f = None
signal_raw_r = None
num_sites = 0
for region in mpbs_regions:
if str(region.name).split(":")[-1] == "Y":
# Extend by 50 bp
mid = (region.initial + region.final) / 2
p1 = mid - (self.window_size / 2)
p2 = mid + (self.window_size / 2)
# p1 = region.initial - (self.window_size / 2)
# p2 = region.final + (self.window_size / 2)
size = p2 - p1
if pwm_dict is None:
pwm_dict = dict([("A", [0.0] * size), ("C", [0.0] * size),
("G", [0.0] * size), ("T", [0.0] * size),
("N", [0.0] * size)])
if signal_raw_f is None:
signal_raw_f = np.zeros(size)
if signal_raw_r is None:
signal_raw_r = np.zeros(size)
# Fetch raw signal
for read in bam.fetch(region.chrom, p1, p2):
if not read.is_reverse:
cut_site = read.pos + self.forward_shift
if p1 <= cut_site < p2:
signal_raw_f[cut_site - p1] += 1.0
else:
cut_site = read.aend + self.reverse_shift - 1
if p1 <= cut_site < p2:
signal_raw_r[cut_site - p1] += 1.0
num_sites += 1
# Update pwm
aux_plus = 1
dna_seq = str(fasta.fetch(region.chrom, p1, p2)).upper()
if (region.final - region.initial) % 2 == 0:
aux_plus = 0
dna_seq_rev = AuxiliaryFunctions.revcomp(str(fasta.fetch(region.chrom,
p1 + aux_plus, p2 + aux_plus)).upper())
if region.orientation == "+":
for i in range(0, len(dna_seq)):
pwm_dict[dna_seq[i]][i] += 1
elif region.orientation == "-":
for i in range(0, len(dna_seq_rev)):
pwm_dict[dna_seq_rev[i]][i] += 1
# mean_signal_raw_f = self.rescaling(signal_raw_f)
# mean_signal_raw_r = self.rescaling(signal_raw_r)
mean_signal_raw_f = signal_raw_f
mean_signal_raw_r = signal_raw_r
# Output the norm and slope signal
output_fname = os.path.join(self.output_loc, "{}.txt".format(self.output_prefix))
f = open(output_fname, "w")
f.write("\t".join((map(str, mean_signal_raw_f))) + "\n")
f.write("\t".join((map(str, mean_signal_raw_r))) + "\n")
f.close()
# Output PWM and create logo
pwm_fname = os.path.join(self.output_loc, "{}.pwm".format(self.output_prefix))
pwm_file = open(pwm_fname, "w")
for e in ["A", "C", "G", "T"]:
pwm_file.write(" ".join([str(int(f)) for f in pwm_dict[e]]) + "\n")
pwm_file.close()
logo_fname = os.path.join(self.output_loc, "{}.logo.eps".format(self.output_prefix))
pwm = motifs.read(open(pwm_fname), "pfm")
pwm.weblogo(logo_fname, format="eps", stack_width="large", stacks_per_line=str(size),
color_scheme="color_classic", unit_name="", show_errorbars=False, logo_title="",
show_xaxis=False, xaxis_label="", show_yaxis=False, yaxis_label="",
show_fineprint=False, show_ends=False)
start = -(size / 2)
end = (size / 2) - 1
x = np.linspace(start, end, num=size)
fig = plt.figure(figsize=(8, 4))
ax2 = fig.add_subplot(111)
min_signal = min(min(mean_signal_raw_f), min(mean_signal_raw_r))
max_signal = max(max(mean_signal_raw_f), max(mean_signal_raw_r))
ax2.plot(x, mean_signal_raw_f, color='red', label='Forward')
ax2.plot(x, mean_signal_raw_r, color='green', label='Reverse')
ax2.xaxis.set_ticks_position('bottom')
ax2.yaxis.set_ticks_position('left')
ax2.spines['top'].set_visible(False)
ax2.spines['right'].set_visible(False)
ax2.spines['left'].set_position(('outward', 15))
ax2.tick_params(direction='out')
ax2.set_title(self.output_prefix, fontweight='bold')
ax2.set_xticks([start, 0, end])
ax2.set_xticklabels([str(start), 0, str(end)])
ax2.set_yticks([min_signal, max_signal])
ax2.set_yticklabels([str(round(min_signal, 2)), str(round(max_signal, 2))], rotation=90)
ax2.set_xlim(start, end)
ax2.set_ylim([min_signal, max_signal])
ax2.legend(loc="upper right", frameon=False)
ax2.spines['bottom'].set_position(('outward', 40))
# ax2.set_xlabel("Coordinates from Motif Center", fontweight='bold')
# ax2.set_ylabel("Average Signal", rotation=90, fontweight='bold')
figure_name = os.path.join(self.output_loc, "{}.line.eps".format(self.output_prefix))
fig.subplots_adjust(bottom=.2, hspace=.5)
fig.tight_layout()
fig.savefig(figure_name, format="eps", dpi=300)
# Creating canvas and printing eps / pdf with merged results
output_fname = os.path.join(self.output_loc, "{}.eps".format(self.output_prefix))
c = pyx.canvas.canvas()
c.insert(pyx.epsfile.epsfile(0, 0, figure_name, scale=1.0))
c.insert(pyx.epsfile.epsfile(1.48, 0.92, logo_fname, width=18.5, height=1.75))
c.writeEPSfile(output_fname)
os.system("epstopdf " + figure_name)
os.system("epstopdf " + logo_fname)
os.system("epstopdf " + output_fname)
os.remove(pwm_fname)
os.remove(os.path.join(self.output_loc, "{}.line.eps".format(self.output_prefix)))
os.remove(os.path.join(self.output_loc, "{}.logo.eps".format(self.output_prefix)))
os.remove(os.path.join(self.output_loc, "{}.line.pdf".format(self.output_prefix)))
os.remove(os.path.join(self.output_loc, "{}.logo.pdf".format(self.output_prefix)))
os.remove(os.path.join(self.output_loc, "{}.eps".format(self.output_prefix)))
def atac_dnase_bc_line(self, reads_file1, reads_file2, bias_table1, bias_table2):
genome_data = GenomeData(self.organism)
fasta = Fastafile(genome_data.get_genome())
pwm_dict = dict([("A", [0.0] * self.window_size), ("C", [0.0] * self.window_size),
("G", [0.0] * self.window_size), ("T", [0.0] * self.window_size),
("N", [0.0] * self.window_size)])
bias_table = BiasTable()
bias_table_list = bias_table1.split(",")
table1 = bias_table.load_table(table_file_name_F=bias_table_list[0],
table_file_name_R=bias_table_list[1])
bias_table_list = bias_table2.split(",")
table2 = bias_table.load_table(table_file_name_F=bias_table_list[0],
table_file_name_R=bias_table_list[1])
mpbs_regions = GenomicRegionSet("Motif Predicted Binding Sites")
mpbs_regions.read(self.motif_file)
bam_atac = Samfile(reads_file1, "rb")
bam_dnase = Samfile(reads_file2, "rb")
mean_signal_atac = np.zeros(self.window_size)
mean_signal_dnase = np.zeros(self.window_size)
num_sites = 0
for region in mpbs_regions:
if str(region.name).split(":")[-1] == "Y":
# Extend by 50 bp
mid = (region.initial + region.final) / 2
p1 = mid - (self.window_size / 2)
p2 = mid + (self.window_size / 2)
# Fetch raw signal
signal_atac = self.get_bc_signal(ref=region.chrom, start=p1, end=p2, bam=bam_atac,
fasta=fasta, bias_table=table1,
forward_shift=5, reverse_shift=-4)
signal_dnase = self.get_bc_signal(ref=region.chrom, start=p1, end=p2, bam=bam_dnase,
fasta=fasta, bias_table=table2,
forward_shift=0, reverse_shift=0)
num_sites += 1
mean_signal_atac = np.add(mean_signal_atac, np.array(signal_atac))
mean_signal_dnase = np.add(mean_signal_dnase, np.array(signal_dnase))
# Update pwm
aux_plus = 1
dna_seq = str(fasta.fetch(region.chrom, p1, p2)).upper()
if (region.final - region.initial) % 2 == 0:
aux_plus = 0
dna_seq_rev = AuxiliaryFunctions.revcomp(str(fasta.fetch(region.chrom,
p1 + aux_plus, p2 + aux_plus)).upper())
if region.orientation == "+":
for i in range(0, len(dna_seq)):
pwm_dict[dna_seq[i]][i] += 1
elif region.orientation == "-":
for i in range(0, len(dna_seq_rev)):
pwm_dict[dna_seq_rev[i]][i] += 1
mean_signal_atac = self.rescaling(mean_signal_atac)
mean_signal_dnase = self.rescaling(mean_signal_dnase)
# Output the norm and slope signal
output_fname = os.path.join(self.output_loc, "{}.txt".format(self.output_prefix))
f = open(output_fname, "w")
f.write("\t".join((map(str, mean_signal_atac))) + "\n")
f.write("\t".join((map(str, mean_signal_dnase))) + "\n")
f.close()
# Output PWM and create logo
pwm_fname = os.path.join(self.output_loc, "{}.pwm".format(self.output_prefix))
pwm_file = open(pwm_fname, "w")
for e in ["A", "C", "G", "T"]:
pwm_file.write(" ".join([str(int(f)) for f in pwm_dict[e]]) + "\n")
pwm_file.close()
logo_fname = os.path.join(self.output_loc, "{}.logo.eps".format(self.output_prefix))
pwm = motifs.read(open(pwm_fname), "pfm")
pwm.weblogo(logo_fname, format="eps", stack_width="large", stacks_per_line=str(self.window_size),
color_scheme="color_classic", unit_name="", show_errorbars=False, logo_title="",
show_xaxis=False, xaxis_label="", show_yaxis=False, yaxis_label="",
show_fineprint=False, show_ends=False)
start = -(self.window_size / 2)
end = (self.window_size / 2) - 1
x = np.linspace(start, end, num=self.window_size)
fig = plt.figure(figsize=(8, 4))
ax2 = fig.add_subplot(111)
min_signal = min(min(mean_signal_atac), min(mean_signal_dnase))
max_signal = max(max(mean_signal_atac), max(mean_signal_dnase))
ax2.plot(x, mean_signal_atac, color='red', label='ATAC-seq')
ax2.plot(x, mean_signal_dnase, color='green', label='DNase-seq')
ax2.xaxis.set_ticks_position('bottom')
ax2.yaxis.set_ticks_position('left')
ax2.spines['top'].set_visible(False)
ax2.spines['right'].set_visible(False)
ax2.spines['left'].set_position(('outward', 15))
ax2.tick_params(direction='out')
ax2.set_title(self.output_prefix, fontweight='bold')
ax2.set_xticks([start, 0, end])
ax2.set_xticklabels([str(start), 0, str(end)])
ax2.set_yticks([min_signal, max_signal])
ax2.set_yticklabels([str(round(min_signal, 2)), str(round(max_signal, 2))], rotation=90)
ax2.set_xlim(start, end)
ax2.set_ylim([min_signal, max_signal])
ax2.legend(loc="upper right", frameon=False)
# ax2.legend(loc="lower right", frameon=False)
ax2.spines['bottom'].set_position(('outward', 40))
figure_name = os.path.join(self.output_loc, "{}.line.eps".format(self.output_prefix))
fig.subplots_adjust(bottom=.2, hspace=.5)
fig.tight_layout()
fig.savefig(figure_name, format="eps", dpi=300)
# Creating canvas and printing eps / pdf with merged results
output_fname = os.path.join(self.output_loc, "{}.eps".format(self.output_prefix))
c = pyx.canvas.canvas()
c.insert(pyx.epsfile.epsfile(0, 0, figure_name, scale=1.0))
c.insert(pyx.epsfile.epsfile(1.51, 0.89, logo_fname, width=18.3, height=1.75))
c.writeEPSfile(output_fname)
os.system("epstopdf " + figure_name)
os.system("epstopdf " + logo_fname)
os.system("epstopdf " + output_fname)
os.remove(pwm_fname)
os.remove(os.path.join(self.output_loc, "{}.line.eps".format(self.output_prefix)))
os.remove(os.path.join(self.output_loc, "{}.logo.eps".format(self.output_prefix)))
os.remove(os.path.join(self.output_loc, "{}.line.pdf".format(self.output_prefix)))
os.remove(os.path.join(self.output_loc, "{}.logo.pdf".format(self.output_prefix)))
os.remove(os.path.join(self.output_loc, "{}.eps".format(self.output_prefix)))
def get_bc_signal(self, ref, start, end, bam, fasta, bias_table, forward_shift, reverse_shift):
# Parameters
window = 50
defaultKmerValue = 1.0
# Initialization
fBiasDict = bias_table[0]
rBiasDict = bias_table[1]
k_nb = len(fBiasDict.keys()[0])
p1 = start
p2 = end
p1_w = p1 - (window / 2)
p2_w = p2 + (window / 2)
p1_wk = p1_w - int(k_nb / 2.)
p2_wk = p2_w + int(k_nb / 2.)
currStr = str(fasta.fetch(ref, p1_wk, p2_wk - 1)).upper()
currRevComp = AuxiliaryFunctions.revcomp(str(fasta.fetch(ref, p1_wk + 1, p2_wk)).upper())
# Iterating on sequence to create the bias signal
signal_bias_f = []
signal_bias_r = []
for i in range(int(k_nb / 2.), len(currStr) - int(k_nb / 2) + 1):
fseq = currStr[i - int(k_nb / 2.):i + int(k_nb / 2.)]
rseq = currRevComp[len(currStr) - int(k_nb / 2.) - i:len(currStr) + int(k_nb / 2.) - i]
try:
signal_bias_f.append(fBiasDict[fseq])
except Exception:
signal_bias_f.append(defaultKmerValue)
try:
signal_bias_r.append(rBiasDict[rseq])
except Exception:
signal_bias_r.append(defaultKmerValue)
# Raw counts
signal_raw_f = [0.0] * (p2_w - p1_w)
signal_raw_r = [0.0] * (p2_w - p1_w)
for read in bam.fetch(ref, p1_w, p2_w):
if not read.is_reverse:
cut_site = read.pos + self.forward_shift
if p1_w <= cut_site < p2_w:
signal_raw_f[cut_site - p1_w] += 1.0
else:
cut_site = read.aend + self.reverse_shift - 1
if p1_w <= cut_site < p2_w:
signal_raw_r[cut_site - p1_w] += 1.0
# Smoothed counts
Nf = []
Nr = []
fSum = sum(signal_raw_f[:window])
rSum = sum(signal_raw_r[:window])
fLast = signal_raw_f[0]
rLast = signal_raw_r[0]
for i in range((window / 2), len(signal_raw_f) - (window / 2)):
Nf.append(fSum)
Nr.append(rSum)
fSum -= fLast
fSum += signal_raw_f[i + (window / 2)]
fLast = signal_raw_f[i - (window / 2) + 1]
rSum -= rLast
rSum += signal_raw_r[i + (window / 2)]
rLast = signal_raw_r[i - (window / 2) + 1]
# Calculating bias and writing to wig file
fSum = sum(signal_bias_f[:window])
rSum = sum(signal_bias_r[:window])
fLast = signal_bias_f[0]
rLast = signal_bias_r[0]
signal_bc = []
for i in range((window / 2), len(signal_bias_f) - (window / 2)):
nhatf = Nf[i - (window / 2)] * (signal_bias_f[i] / fSum)
nhatr = Nr[i - (window / 2)] * (signal_bias_r[i] / rSum)
signal_bc.append(nhatf + nhatr)
fSum -= fLast
fSum += signal_bias_f[i + (window / 2)]
fLast = signal_bias_f[i - (window / 2) + 1]
rSum -= rLast
rSum += signal_bias_r[i + (window / 2)]
rLast = signal_bias_r[i - (window / 2) + 1]
return signal_bc
def atac_dnase_raw_line(self, reads_file1, reads_file2):
genome_data = GenomeData(self.organism)
fasta = Fastafile(genome_data.get_genome())
pwm_dict = dict([("A", [0.0] * self.window_size), ("C", [0.0] * self.window_size),
("G", [0.0] * self.window_size), ("T", [0.0] * self.window_size),
("N", [0.0] * self.window_size)])
mpbs_regions = GenomicRegionSet("Motif Predicted Binding Sites")
mpbs_regions.read(self.motif_file)
bam_atac = Samfile(reads_file1, "rb")
bam_dnase = Samfile(reads_file2, "rb")
mean_signal_atac = np.zeros(self.window_size)
mean_signal_dnase = np.zeros(self.window_size)
num_sites = 0
for region in mpbs_regions:
if str(region.name).split(":")[-1] == "Y":
# Extend by 50 bp
mid = (region.initial + region.final) / 2
p1 = mid - (self.window_size / 2)
p2 = mid + (self.window_size / 2)
# Fetch raw signal
for read in bam_atac.fetch(region.chrom, p1, p2):
if not read.is_reverse:
cut_site = read.pos + self.forward_shift
if p1 <= cut_site < p2:
mean_signal_atac[cut_site - p1] += 1.0
else:
cut_site = read.aend + self.reverse_shift - 1
if p1 <= cut_site < p2:
mean_signal_atac[cut_site - p1] += 1.0
# Fetch raw signal
for read in bam_dnase.fetch(region.chrom, p1, p2):
if not read.is_reverse:
cut_site = read.pos
if p1 <= cut_site < p2:
mean_signal_dnase[cut_site - p1] += 1.0
else:
cut_site = read.aend - 1
if p1 <= cut_site < p2:
mean_signal_dnase[cut_site - p1] += 1.0
num_sites += 1
# Update pwm
aux_plus = 1
dna_seq = str(fasta.fetch(region.chrom, p1, p2)).upper()
if (region.final - region.initial) % 2 == 0:
aux_plus = 0
dna_seq_rev = AuxiliaryFunctions.revcomp(str(fasta.fetch(region.chrom,
p1 + aux_plus, p2 + aux_plus)).upper())
if region.orientation == "+":
for i in range(0, len(dna_seq)):
pwm_dict[dna_seq[i]][i] += 1
elif region.orientation == "-":
for i in range(0, len(dna_seq_rev)):
pwm_dict[dna_seq_rev[i]][i] += 1
mean_signal_atac = self.rescaling(mean_signal_atac)
mean_signal_dnase = self.rescaling(mean_signal_dnase)
# Output the norm and slope signal
output_fname = os.path.join(self.output_loc, "{}.txt".format(self.output_prefix))
f = open(output_fname, "w")
f.write("\t".join((map(str, mean_signal_atac))) + "\n")
f.write("\t".join((map(str, mean_signal_dnase))) + "\n")
f.close()
# Output PWM and create logo
pwm_fname = os.path.join(self.output_loc, "{}.pwm".format(self.output_prefix))
pwm_file = open(pwm_fname, "w")
for e in ["A", "C", "G", "T"]:
pwm_file.write(" ".join([str(int(f)) for f in pwm_dict[e]]) + "\n")
pwm_file.close()
logo_fname = os.path.join(self.output_loc, "{}.logo.eps".format(self.output_prefix))
pwm = motifs.read(open(pwm_fname), "pfm")
pwm.weblogo(logo_fname, format="eps", stack_width="large", stacks_per_line=str(self.window_size),
color_scheme="color_classic", unit_name="", show_errorbars=False, logo_title="",
show_xaxis=False, xaxis_label="", show_yaxis=False, yaxis_label="",
show_fineprint=False, show_ends=False)
start = -(self.window_size / 2)
end = (self.window_size / 2) - 1
x = np.linspace(start, end, num=self.window_size)
fig = plt.figure(figsize=(8, 4))
ax2 = fig.add_subplot(111)
min_signal = min(min(mean_signal_atac), min(mean_signal_dnase))
max_signal = max(max(mean_signal_atac), max(mean_signal_dnase))
ax2.plot(x, mean_signal_atac, color='red', label='ATAC-seq')
ax2.plot(x, mean_signal_dnase, color='green', label='DNase-seq')
ax2.xaxis.set_ticks_position('bottom')
ax2.yaxis.set_ticks_position('left')
ax2.spines['top'].set_visible(False)
ax2.spines['right'].set_visible(False)
ax2.spines['left'].set_position(('outward', 15))
ax2.tick_params(direction='out')
ax2.set_title(self.output_prefix, fontweight='bold')
ax2.set_xticks([start, 0, end])
ax2.set_xticklabels([str(start), 0, str(end)])
ax2.set_yticks([min_signal, max_signal])
ax2.set_yticklabels([str(round(min_signal, 2)), str(round(max_signal, 2))], rotation=90)
ax2.set_xlim(start, end)
ax2.set_ylim([min_signal, max_signal])
ax2.legend(loc="upper right", frameon=False)
ax2.spines['bottom'].set_position(('outward', 40))
figure_name = os.path.join(self.output_loc, "{}.line.eps".format(self.output_prefix))
fig.subplots_adjust(bottom=.2, hspace=.5)
fig.tight_layout()
fig.savefig(figure_name, format="eps", dpi=300)
# Creating canvas and printing eps / pdf with merged results
output_fname = os.path.join(self.output_loc, "{}.eps".format(self.output_prefix))
c = pyx.canvas.canvas()
c.insert(pyx.epsfile.epsfile(0, 0, figure_name, scale=1.0))
c.insert(pyx.epsfile.epsfile(1.51, 0.89, logo_fname, width=18.3, height=1.75))
c.writeEPSfile(output_fname)
os.system("epstopdf " + figure_name)
os.system("epstopdf " + logo_fname)
os.system("epstopdf " + output_fname)
os.remove(pwm_fname)
os.remove(os.path.join(self.output_loc, "{}.line.eps".format(self.output_prefix)))
os.remove(os.path.join(self.output_loc, "{}.logo.eps".format(self.output_prefix)))
os.remove(os.path.join(self.output_loc, "{}.line.pdf".format(self.output_prefix)))
os.remove(os.path.join(self.output_loc, "{}.logo.pdf".format(self.output_prefix)))
os.remove(os.path.join(self.output_loc, "{}.eps".format(self.output_prefix)))
def fragment_size_raw_line(self):
mpbs_regions = GenomicRegionSet("Motif Predicted Binding Sites")
mpbs_regions.read(self.motif_file)
bam = Samfile(self.reads_file, "rb")
signal_f_max_145 = np.zeros(self.window_size)
signal_r_max_145 = np.zeros(self.window_size)
signal_f_146_307 = np.zeros(self.window_size)
signal_r_146_307 = np.zeros(self.window_size)
signal_f_min_307 = np.zeros(self.window_size)
signal_r_min_307 = np.zeros(self.window_size)
signal_f = np.zeros(self.window_size)
signal_r = np.zeros(self.window_size)
for region in mpbs_regions:
if str(region.name).split(":")[-1] == "Y":
# Extend by 50 bp
mid = (region.initial + region.final) / 2
p1 = mid - (self.window_size / 2)
p2 = mid + (self.window_size / 2)
# Fetch raw signal
for read in bam.fetch(region.chrom, p1, p2):
# All reads
if not read.is_reverse:
cut_site = read.pos + self.forward_shift
if p1 <= cut_site < p2:
signal_f[cut_site - p1] += 1.0
else:
cut_site = read.aend + self.reverse_shift - 1
if p1 <= cut_site < p2:
signal_r[cut_site - p1] += 1.0
# length <= 145
if abs(read.template_length) <= 145:
if not read.is_reverse:
cut_site = read.pos + self.forward_shift
if p1 <= cut_site < p2:
signal_f_max_145[cut_site - p1] += 1.0
else:
cut_site = read.aend + self.reverse_shift - 1
if p1 <= cut_site < p2:
signal_r_max_145[cut_site - p1] += 1.0
# length > 145 and <= 307
if 145 < abs(read.template_length) <= 307:
if not read.is_reverse:
cut_site = read.pos + self.forward_shift
if p1 <= cut_site < p2:
signal_f_146_307[cut_site - p1] += 1.0
else:
cut_site = read.aend + self.reverse_shift - 1
if p1 <= cut_site < p2:
signal_r_146_307[cut_site - p1] += 1.0
# length > 307
if abs(read.template_length) > 307:
if not read.is_reverse:
cut_site = read.pos + self.forward_shift
if p1 <= cut_site < p2:
signal_f_min_307[cut_site - p1] += 1.0
else:
cut_site = read.aend + self.reverse_shift - 1
if p1 <= cut_site < p2:
signal_r_min_307[cut_site - p1] += 1.0
# Output the norm and slope signal
output_fname = os.path.join(self.output_loc, "{}.txt".format(self.output_prefix))
f = open(output_fname, "w")
f.write("\t".join((map(str, signal_f))) + "\n")
f.write("\t".join((map(str, signal_r))) + "\n")
f.write("\t".join((map(str, signal_f_max_145))) + "\n")
f.write("\t".join((map(str, signal_r_max_145))) + "\n")
f.write("\t".join((map(str, signal_f_146_307))) + "\n")
f.write("\t".join((map(str, signal_r_146_307))) + "\n")
f.write("\t".join((map(str, signal_f_min_307))) + "\n")
f.write("\t".join((map(str, signal_r_min_307))) + "\n")
f.close()
# find out the linker position
pos_f_1, pos_r_1, pos_f_2, pos_r_2 = self.get_linkers_position(signal_f_146_307,
signal_r_146_307,
signal_f_min_307,
signal_r_min_307)
p1 = (pos_f_1 - pos_f_2) / 2 + pos_f_2
p2 = p1 + 180
p3 = self.window_size - p2
p4 = self.window_size - p1
fig, (ax1, ax2, ax3, ax4) = plt.subplots(4, 1, figsize=(8, 8))
start = -(self.window_size / 2)
end = (self.window_size / 2) - 1
x = np.linspace(start, end, num=self.window_size)
x_ticks = [start, p1 - 500, p2 - 500, 0, p3 - 500, p4 - 500, end]
self.update_axes_for_fragment_size_line(ax1, x, x_ticks, start, end, signal_f, signal_r, p1, p2,
p3, p4)
self.update_axes_for_fragment_size_line(ax2, x, x_ticks, start, end, signal_f_max_145, signal_r_max_145, p1, p2,
p3, p4)
self.update_axes_for_fragment_size_line(ax3, x, x_ticks, start, end, signal_f_146_307, signal_r_146_307, p1, p2,
p3, p4)
self.update_axes_for_fragment_size_line(ax4, x, x_ticks, start, end, signal_f_min_307, signal_r_min_307, p1, p2,
p3, p4)
figure_name = os.path.join(self.output_loc, "{}.pdf".format(self.output_prefix))
fig.subplots_adjust(bottom=.2, hspace=.5)
fig.tight_layout()
fig.savefig(figure_name, format="pdf", dpi=300)
def fragment_size_bc_line(self, bias_table_files):
mpbs_regions = GenomicRegionSet("Motif Predicted Binding Sites")
mpbs_regions.read(self.motif_file)
genomic_signal = GenomicSignal(self.reads_file)
genomic_signal.load_sg_coefs(11)
bam = Samfile(self.reads_file, "rb")
genome_data = GenomeData(self.organism)
fasta = Fastafile(genome_data.get_genome())
bias_table = BiasTable()
bias_table_list = bias_table_files.split(",")
table = bias_table.load_table(table_file_name_F=bias_table_list[0],
table_file_name_R=bias_table_list[1])
signal_f_max_145 = np.zeros(self.window_size)
signal_r_max_145 = np.zeros(self.window_size)
signal_f_146_307 = np.zeros(self.window_size)
signal_r_146_307 = np.zeros(self.window_size)
signal_f_min_307 = np.zeros(self.window_size)
signal_r_min_307 = np.zeros(self.window_size)
signal_f = np.zeros(self.window_size)
signal_r = np.zeros(self.window_size)
for region in mpbs_regions:
if str(region.name).split(":")[-1] == "Y":
mid = (region.initial + region.final) / 2
p1 = mid - (self.window_size / 2)
p2 = mid + (self.window_size / 2)
# All reads
signal_bc_f, signal_bc_r = \
genomic_signal.get_bc_signal_by_fragment_length(ref=region.chrom, start=p1, end=p2,
bam=bam, fasta=fasta,
bias_table=table,
forward_shift=self.forward_shift,
reverse_shift=self.reverse_shift,
min_length=None, max_length=None,
strand=True)
# length <= 145
signal_bc_max_145_f, signal_bc_max_145_r = \
genomic_signal.get_bc_signal_by_fragment_length(ref=region.chrom, start=p1, end=p2,
bam=bam, fasta=fasta,
bias_table=table,
forward_shift=self.forward_shift,
reverse_shift=self.reverse_shift,
min_length=None, max_length=145,
strand=True)
# length > 145 and <= 307
signal_bc_146_307_f, signal_bc_146_307_r = \
genomic_signal.get_bc_signal_by_fragment_length(ref=region.chrom, start=p1, end=p2,
bam=bam, fasta=fasta,
bias_table=table,
forward_shift=self.forward_shift,
reverse_shift=self.reverse_shift,
min_length=145, max_length=307,
strand=True)
# length > 307
signal_bc_min_307_f, signal_bc_min_307_r = \
genomic_signal.get_bc_signal_by_fragment_length(ref=region.chrom, start=p1, end=p2,
bam=bam, fasta=fasta,
bias_table=table,
forward_shift=self.forward_shift,
reverse_shift=self.reverse_shift,
min_length=307, max_length=None,
strand=True)
signal_f = np.add(signal_f, np.array(signal_bc_f))
signal_r = np.add(signal_r, np.array(signal_bc_r))
signal_f_max_145 = np.add(signal_f_max_145, np.array(signal_bc_max_145_f))
signal_r_max_145 = np.add(signal_r_max_145, np.array(signal_bc_max_145_r))
signal_f_146_307 = np.add(signal_f_146_307, np.array(signal_bc_146_307_f))
signal_r_146_307 = np.add(signal_r_146_307, np.array(signal_bc_146_307_r))
signal_f_min_307 = np.add(signal_f_min_307, np.array(signal_bc_min_307_f))
signal_r_min_307 = np.add(signal_r_min_307, np.array(signal_bc_min_307_r))
# Output the norm and slope signal
output_fname = os.path.join(self.output_loc, "{}.txt".format(self.output_prefix))
f = open(output_fname, "w")
f.write("\t".join((map(str, signal_f))) + "\n")
f.write("\t".join((map(str, signal_r))) + "\n")
f.write("\t".join((map(str, signal_f_max_145))) + "\n")
f.write("\t".join((map(str, signal_r_max_145))) + "\n")
f.write("\t".join((map(str, signal_f_146_307))) + "\n")
f.write("\t".join((map(str, signal_r_146_307))) + "\n")
f.write("\t".join((map(str, signal_f_min_307))) + "\n")
f.write("\t".join((map(str, signal_r_min_307))) + "\n")
f.close()
# find out the linker position
pos_f_1, pos_r_1, pos_f_2, pos_r_2 = self.get_linkers_position(signal_f_146_307,
signal_r_146_307,
signal_f_min_307,
signal_r_min_307)
p1 = (pos_f_1 - pos_f_2) / 2 + pos_f_2
p2 = p1 + 180
p3 = self.window_size - p2
p4 = self.window_size - p1
fig, (ax1, ax2, ax3, ax4) = plt.subplots(4, 1, figsize=(8, 8))
start = -(self.window_size / 2)
end = (self.window_size / 2) - 1
x = np.linspace(start, end, num=self.window_size)
x_ticks = [start, p1 - 500, p2 - 500, 0, p3 - 500, p4 - 500, end]
self.update_axes_for_fragment_size_line(ax1, x, x_ticks, start, end, signal_f, signal_r, p1, p2,
p3, p4)
self.update_axes_for_fragment_size_line(ax2, x, x_ticks, start, end, signal_f_max_145, signal_r_max_145, p1, p2,
p3, p4)
self.update_axes_for_fragment_size_line(ax3, x, x_ticks, start, end, signal_f_146_307, signal_r_146_307, p1, p2,
p3, p4)
self.update_axes_for_fragment_size_line(ax4, x, x_ticks, start, end, signal_f_min_307, signal_r_min_307, p1, p2,
p3, p4)
figure_name = os.path.join(self.output_loc, "{}.pdf".format(self.output_prefix))
fig.subplots_adjust(bottom=.2, hspace=.5)
fig.tight_layout()
fig.savefig(figure_name, format="pdf", dpi=300)
def slope_of_reads_number(self):
genome_data = GenomeData(self.organism)
mpbs_regions = GenomicRegionSet("Motif Predicted Binding Sites")
mpbs_regions.read(self.motif_file)
bam = Samfile(self.reads_file, "rb")
signal_f_146_307 = np.zeros(self.window_size)
signal_r_146_307 = np.zeros(self.window_size)
signal_f_min_307 = np.zeros(self.window_size)
signal_r_min_307 = np.zeros(self.window_size)
for region in mpbs_regions:
if str(region.name).split(":")[-1] == "Y":
# Extend by 50 bp
mid = (region.initial + region.final) / 2
p1 = mid - (self.window_size / 2)
p2 = mid + (self.window_size / 2)
# Fetch raw signal
for read in bam.fetch(region.chrom, p1, p2):
# length > 145 and <= 307
if 145 < abs(read.template_length) <= 307:
if not read.is_reverse:
cut_site = read.pos + self.forward_shift
if p1 <= cut_site < p2:
signal_f_146_307[cut_site - p1] += 1.0
else:
cut_site = read.aend + self.reverse_shift - 1
if p1 <= cut_site < p2:
signal_r_146_307[cut_site - p1] += 1.0
# length > 307
if abs(read.template_length) > 307:
if not read.is_reverse:
cut_site = read.pos + self.forward_shift
if p1 <= cut_site < p2:
signal_f_min_307[cut_site - p1] += 1.0
else:
cut_site = read.aend + self.reverse_shift - 1
if p1 <= cut_site < p2:
signal_r_min_307[cut_site - p1] += 1.0
# Apply a Savitzky-Golay filter to an array.
smooth_signal_f_146_307 = savgol_filter(signal_f_146_307, window_length=51, polyorder=2)
smooth_signal_r_146_307 = savgol_filter(signal_r_146_307, window_length=51, polyorder=2)
smooth_signal_f_min_307 = savgol_filter(signal_f_min_307, window_length=51, polyorder=2)
smooth_signal_r_min_307 = savgol_filter(signal_r_min_307, window_length=51, polyorder=2)
# Output the norm and slope signal
output_fname = os.path.join(self.output_loc, "{}.txt".format(self.output_prefix))
f = open(output_fname, "w")
f.write("\t".join((map(str, signal_f_146_307))) + "\n")
f.write("\t".join((map(str, signal_r_146_307))) + "\n")
f.write("\t".join((map(str, smooth_signal_f_146_307))) + "\n")
f.write("\t".join((map(str, smooth_signal_r_146_307))) + "\n")
f.write("\t".join((map(str, signal_f_min_307))) + "\n")
f.write("\t".join((map(str, signal_r_min_307))) + "\n")
f.write("\t".join((map(str, smooth_signal_f_min_307))) + "\n")
f.write("\t".join((map(str, smooth_signal_r_min_307))) + "\n")
f.close()
start = -(self.window_size / 2)
end = (self.window_size / 2) - 1
x = np.linspace(start, end, num=self.window_size)
fig, (ax1, ax2, ax3, ax4) = plt.subplots(4, 1, figsize=(8, 8))
min_signal = min(min(signal_f_146_307), min(signal_r_146_307))
max_signal = max(max(signal_f_146_307), max(signal_r_146_307))
ax1.plot(x, signal_f_146_307, color='red', label='Forward')
ax1.plot(x, signal_r_146_307, color='green', label='Reverse')
ax1.xaxis.set_ticks_position('bottom')
ax1.yaxis.set_ticks_position('left')
ax1.spines['top'].set_visible(False)
ax1.spines['right'].set_visible(False)
ax1.spines['left'].set_position(('outward', 15))
ax1.tick_params(direction='out')
ax1.set_title("Raw reads number of 1N", fontweight='bold')
ax1.set_xticks([start, 0, end])
ax1.set_xticklabels([str(start), 0, str(end)])
ax1.set_yticks([min_signal, max_signal])
ax1.set_yticklabels([str(int(min_signal)), str(int(max_signal))], rotation=90)
ax1.set_xlim(start, end)
ax1.set_ylim([min_signal, max_signal])
min_signal = min(min(smooth_signal_f_146_307), min(smooth_signal_r_146_307))
max_signal = max(max(smooth_signal_f_146_307), max(smooth_signal_r_146_307))
ax2.plot(x, smooth_signal_f_146_307, color='red', label='Forward')
ax2.plot(x, smooth_signal_r_146_307, color='green', label='Reverse')
ax2.xaxis.set_ticks_position('bottom')
ax2.yaxis.set_ticks_position('left')
ax2.spines['top'].set_visible(False)
ax2.spines['right'].set_visible(False)
ax2.spines['left'].set_position(('outward', 15))
ax2.tick_params(direction='out')
ax2.set_title("Smooth reads number of 1N", fontweight='bold')
ax2.set_yticks([min_signal, max_signal])
ax2.set_yticklabels([str(int(min_signal)), str(int(max_signal))], rotation=90)
ax2.set_xlim(start, end)
ax2.set_ylim([min_signal, max_signal])
id_max_f_1 = np.argmax(smooth_signal_f_146_307[:500]) - 500
id_max_r_1 = np.argmax(smooth_signal_r_146_307[500:])
ax2.axvline(x=id_max_f_1)
ax2.axvline(x=id_max_r_1)
x_tick_labels = [str(start), str(id_max_f_1), 0, str(id_max_r_1), str(end)]
ax2.set_xticks(map(int, x_tick_labels))
ax2.set_xticklabels(x_tick_labels, rotation=90)
min_signal = min(min(signal_f_min_307), min(signal_r_min_307))
max_signal = max(max(signal_f_min_307), max(signal_r_min_307))
ax3.plot(x, signal_f_min_307, color='red', label='Forward')
ax3.plot(x, signal_r_min_307, color='green', label='Reverse')
ax3.xaxis.set_ticks_position('bottom')
ax3.yaxis.set_ticks_position('left')
ax3.spines['top'].set_visible(False)
ax3.spines['right'].set_visible(False)
ax3.spines['left'].set_position(('outward', 15))
ax3.tick_params(direction='out')
ax3.set_title("Raw reads number of +2N", fontweight='bold')
ax3.set_xticks([start, 0, end])
ax3.set_xticklabels([str(start), 0, str(end)])
ax3.set_yticks([min_signal, max_signal])
ax3.set_yticklabels([str(int(min_signal)), str(int(max_signal))], rotation=90)
ax3.set_xlim(start, end)
ax3.set_ylim([min_signal, max_signal])
min_signal = min(min(smooth_signal_f_min_307), min(smooth_signal_r_min_307))
max_signal = max(max(smooth_signal_f_min_307), max(smooth_signal_r_min_307))
ax4.plot(x, smooth_signal_f_min_307, color='red', label='Forward')
ax4.plot(x, smooth_signal_r_min_307, color='green', label='Reverse')
ax4.xaxis.set_ticks_position('bottom')
ax4.yaxis.set_ticks_position('left')
ax4.spines['top'].set_visible(False)
ax4.spines['right'].set_visible(False)
ax4.spines['left'].set_position(('outward', 15))
ax4.tick_params(direction='out')
ax4.set_title("Smooth reads number of +2N", fontweight='bold')
ax4.set_yticks([min_signal, max_signal])
ax4.set_yticklabels([str(int(min_signal)), str(int(max_signal))], rotation=90)
ax4.set_xlim(start, end)
ax4.set_ylim([min_signal, max_signal])
id_max_f = np.argmax(smooth_signal_f_146_307[:500])
id_max_r = np.argmax(smooth_signal_r_146_307[500:]) + 500
id_max_f_2 = np.argmax(smooth_signal_f_min_307[:id_max_f]) - 500
id_max_r_2 = np.argmax(smooth_signal_r_min_307[id_max_r:]) + id_max_r - 500
ax4.axvline(x=id_max_f_2)
ax4.axvline(x=id_max_r_2)
x_tick_labels = [str(start), str(id_max_f_2), 0, str(id_max_r_2), str(end)]
ax4.set_xticks(map(int, x_tick_labels))
ax4.set_xticklabels(x_tick_labels, rotation=90)
figure_name = os.path.join(self.output_loc, "{}.pdf".format(self.output_prefix))
fig.subplots_adjust(bottom=.2, hspace=.5)
fig.tight_layout()
fig.savefig(figure_name, format="pdf", dpi=300)
def strand_line_by_size(self, bias_tables):
genome_data = GenomeData(self.organism)
fasta = Fastafile(genome_data.get_genome())
mpbs_regions = GenomicRegionSet("Motif Predicted Binding Sites")
mpbs_regions.read(self.motif_file)
bam = Samfile(self.reads_file, "rb")
table = BiasTable()
bias_table_list = bias_tables.split(",")
bias_table = table.load_table(table_file_name_F=bias_table_list[0],
table_file_name_R=bias_table_list[1])
signal = GenomicSignal()
signal.load_sg_coefs(9)
signal_bc_f_max_145 = np.zeros(self.window_size)
signal_bc_r_max_145 = np.zeros(self.window_size)
signal_bc_f_min_145 = np.zeros(self.window_size)
signal_bc_r_min_145 = np.zeros(self.window_size)
signal_bc_f_145_307 = np.zeros(self.window_size)
signal_bc_r_145_307 = np.zeros(self.window_size)
signal_bc_f_min_307 = np.zeros(self.window_size)
signal_bc_r_min_307 = np.zeros(self.window_size)
signal_bc_f_307_500 = np.zeros(self.window_size)
signal_bc_r_307_500 = np.zeros(self.window_size)
signal_bc_f_min_500 = np.zeros(self.window_size)
signal_bc_r_min_500 = np.zeros(self.window_size)
signal_bc_f = np.zeros(self.window_size)
signal_bc_r = np.zeros(self.window_size)
signal_bc = np.zeros(self.window_size)
for region in mpbs_regions:
if str(region.name).split(":")[-1] == "Y":
mid = (region.initial + region.final) / 2
p1 = mid - (self.window_size / 2)
p2 = mid + (self.window_size / 2)
bc_f_max_145, bc_r_max_145 = \
signal.get_bc_signal_by_fragment_length(ref=region.chrom, start=p1, end=p2,
bam=bam, fasta=fasta, bias_table=bias_table,
forward_shift=self.forward_shift,
reverse_shift=self.reverse_shift,
min_length=None, max_length=145, strand=True)
bc_f_min_145, bc_r_min_145 = \
signal.get_bc_signal_by_fragment_length(ref=region.chrom, start=p1, end=p2,
bam=bam, fasta=fasta, bias_table=bias_table,
forward_shift=self.forward_shift,
reverse_shift=self.reverse_shift,
min_length=145, max_length=None, strand=True)
bc_f_145_307, bc_r_145_307 = \
signal.get_bc_signal_by_fragment_length(ref=region.chrom, start=p1, end=p2,
bam=bam, fasta=fasta, bias_table=bias_table,
forward_shift=self.forward_shift,
reverse_shift=self.reverse_shift,
min_length=145, max_length=307, strand=True)
bc_f_min_307, bc_r_min_307 = \
signal.get_bc_signal_by_fragment_length(ref=region.chrom, start=p1, end=p2,
bam=bam, fasta=fasta, bias_table=bias_table,
forward_shift=self.forward_shift,
reverse_shift=self.reverse_shift,
min_length=307, max_length=None, strand=True)
bc_f_307_500, bc_r_307_500 = \
signal.get_bc_signal_by_fragment_length(ref=region.chrom, start=p1, end=p2,
bam=bam, fasta=fasta, bias_table=bias_table,
forward_shift=self.forward_shift,
reverse_shift=self.reverse_shift,
min_length=307, max_length=500, strand=True)
bc_f_min_500, bc_r_min_500 = \
signal.get_bc_signal_by_fragment_length(ref=region.chrom, start=p1, end=p2,
bam=bam, fasta=fasta, bias_table=bias_table,
forward_shift=self.forward_shift,
reverse_shift=self.reverse_shift,
min_length=500, max_length=None, strand=True)
bc_f, bc_r = \
signal.get_bc_signal_by_fragment_length(ref=region.chrom, start=p1, end=p2,
bam=bam, fasta=fasta, bias_table=bias_table,
forward_shift=self.forward_shift,
reverse_shift=self.reverse_shift,
min_length=None, max_length=None, strand=True)
bc = \
signal.get_bc_signal_by_fragment_length(ref=region.chrom, start=p1, end=p2,
bam=bam, fasta=fasta, bias_table=bias_table,
forward_shift=self.forward_shift,
reverse_shift=self.reverse_shift,
min_length=None, max_length=None, strand=False)
signal_bc_f_max_145 = np.add(signal_bc_f_max_145, bc_f_max_145)
signal_bc_r_max_145 = np.add(signal_bc_r_max_145, bc_r_max_145)
signal_bc_f_min_145 = np.add(signal_bc_f_min_145, bc_f_min_145)
signal_bc_r_min_145 = np.add(signal_bc_r_min_145, bc_r_min_145)
signal_bc_f_145_307 = np.add(signal_bc_f_145_307, bc_f_145_307)
signal_bc_r_145_307 = np.add(signal_bc_r_145_307, bc_r_145_307)
signal_bc_f_min_307 = np.add(signal_bc_f_min_307, bc_f_min_307)
signal_bc_r_min_307 = np.add(signal_bc_r_min_307, bc_r_min_307)
signal_bc_f_307_500 = np.add(signal_bc_f_307_500, bc_f_307_500)
signal_bc_r_307_500 = np.add(signal_bc_r_307_500, bc_r_307_500)
signal_bc_f_min_500 = np.add(signal_bc_f_min_500, bc_f_min_500)
signal_bc_r_min_500 = np.add(signal_bc_r_min_500, bc_r_min_500)
signal_bc_f = np.add(signal_bc_f, bc_f)
signal_bc_r = np.add(signal_bc_r, bc_r)
signal_bc = np.add(signal_bc, bc)
# Pre-process signal
signal_bc = signal.boyle_norm(signal_bc)
perc = scoreatpercentile(signal_bc, 98)
std = np.array(signal_bc).std()
signal_bc = signal.hon_norm_atac(signal_bc, perc, std)
signal_bc_slope = signal.slope(signal_bc, signal.sg_coefs)
signal_bc_f = signal.boyle_norm(signal_bc_f)
perc = scoreatpercentile(signal_bc_f, 98)
std = np.array(signal_bc_f).std()
signal_bc_f = signal.hon_norm_atac(signal_bc_f, perc, std)
signal_bc_f_slope = signal.slope(signal_bc_f, signal.sg_coefs)
signal_bc_r = signal.boyle_norm(signal_bc_r)
perc = scoreatpercentile(signal_bc_r, 98)
std = np.array(signal_bc_r).std()
signal_bc_r = signal.hon_norm_atac(signal_bc_r, perc, std)
signal_bc_r_slope = signal.slope(signal_bc_r, signal.sg_coefs)
# Output the norm and slope signal
output_fname = os.path.join(self.output_loc, "{}.txt".format(self.output_prefix))
with open(output_fname, "w") as f:
f.write("\t".join((map(str, signal_bc_f))) + "\n")
f.write("\t".join((map(str, signal_bc_f_slope))) + "\n")
f.write("\t".join((map(str, signal_bc_r))) + "\n")
f.write("\t".join((map(str, signal_bc_r_slope))) + "\n")
# Output signal of class a
signal_bc_f_max_145 = signal.boyle_norm(signal_bc_f_max_145)
perc = scoreatpercentile(signal_bc_f_max_145, 98)
std = np.array(signal_bc_f_max_145).std()
signal_bc_f_max_145 = signal.hon_norm_atac(signal_bc_f_max_145, perc, std)
signal_bc_f_max_145_slope = signal.slope(signal_bc_f_max_145, signal.sg_coefs)
signal_bc_r_max_145 = signal.boyle_norm(signal_bc_r_max_145)
perc = scoreatpercentile(signal_bc_r_max_145, 98)
std = np.array(signal_bc_r_max_145).std()
signal_bc_r_max_145 = signal.hon_norm_atac(signal_bc_r_max_145, perc, std)
signal_bc_r_max_145_slope = signal.slope(signal_bc_r_max_145, signal.sg_coefs)
output_fname = os.path.join(self.output_loc, "{}_a.txt".format(self.output_prefix))
with open(output_fname, "w") as f:
f.write("\t".join((map(str, signal_bc_f_max_145))) + "\n")
f.write("\t".join((map(str, signal_bc_f_max_145_slope))) + "\n")
f.write("\t".join((map(str, signal_bc_r_max_145))) + "\n")
f.write("\t".join((map(str, signal_bc_r_max_145_slope))) + "\n")
# Output signal of class b
signal_bc_f_min_145 = signal.boyle_norm(signal_bc_f_min_145)
perc = scoreatpercentile(signal_bc_f_min_145, 98)
std = np.array(signal_bc_f_min_145).std()
signal_bc_f_min_145 = signal.hon_norm_atac(signal_bc_f_min_145, perc, std)
signal_bc_f_min_145_slope = signal.slope(signal_bc_f_min_145, signal.sg_coefs)
signal_bc_r_min_145 = signal.boyle_norm(signal_bc_r_min_145)
perc = scoreatpercentile(signal_bc_r_min_145, 98)
std = np.array(signal_bc_r_min_145).std()
signal_bc_r_min_145 = signal.hon_norm_atac(signal_bc_r_min_145, perc, std)
signal_bc_r_min_145_slope = signal.slope(signal_bc_r_min_145, signal.sg_coefs)
output_fname = os.path.join(self.output_loc, "{}_b.txt".format(self.output_prefix))
with open(output_fname, "w") as f:
f.write("\t".join((map(str, signal_bc_f_max_145))) + "\n")
f.write("\t".join((map(str, signal_bc_f_max_145_slope))) + "\n")
f.write("\t".join((map(str, signal_bc_r_max_145))) + "\n")
f.write("\t".join((map(str, signal_bc_r_max_145_slope))) + "\n")
f.write("\t".join((map(str, signal_bc_f_min_145))) + "\n")
f.write("\t".join((map(str, signal_bc_f_min_145_slope))) + "\n")
f.write("\t".join((map(str, signal_bc_r_min_145))) + "\n")
f.write("\t".join((map(str, signal_bc_r_min_145_slope))) + "\n")
# Output signal of class c
signal_bc_f_145_307 = signal.boyle_norm(signal_bc_f_145_307)
perc = scoreatpercentile(signal_bc_f_145_307, 98)
std = np.array(signal_bc_f_145_307).std()
signal_bc_f_145_307 = signal.hon_norm_atac(signal_bc_f_145_307, perc, std)
signal_bc_f_145_307_slope = signal.slope(signal_bc_f_145_307, signal.sg_coefs)
signal_bc_r_145_307 = signal.boyle_norm(signal_bc_r_145_307)
perc = scoreatpercentile(signal_bc_r_145_307, 98)
std = np.array(signal_bc_r_145_307).std()
signal_bc_r_145_307 = signal.hon_norm_atac(signal_bc_r_145_307, perc, std)
signal_bc_r_145_307_slope = signal.slope(signal_bc_r_145_307, signal.sg_coefs)
output_fname = os.path.join(self.output_loc, "{}_c.txt".format(self.output_prefix))
with open(output_fname, "w") as f:
f.write("\t".join((map(str, signal_bc_f_max_145))) + "\n")
f.write("\t".join((map(str, signal_bc_f_max_145_slope))) + "\n")
f.write("\t".join((map(str, signal_bc_r_max_145))) + "\n")
f.write("\t".join((map(str, signal_bc_r_max_145_slope))) + "\n")
f.write("\t".join((map(str, signal_bc_f_145_307))) + "\n")
f.write("\t".join((map(str, signal_bc_f_145_307_slope))) + "\n")
f.write("\t".join((map(str, signal_bc_r_145_307))) + "\n")
f.write("\t".join((map(str, signal_bc_r_145_307_slope))) + "\n")
# Output signal of class d
signal_bc_f_min_307 = signal.boyle_norm(signal_bc_f_min_307)
perc = scoreatpercentile(signal_bc_f_min_307, 98)
std = np.array(signal_bc_f_min_307).std()
signal_bc_f_min_307 = signal.hon_norm_atac(signal_bc_f_min_307, perc, std)
signal_bc_f_min_307_slope = signal.slope(signal_bc_f_min_307, signal.sg_coefs)
signal_bc_r_min_307 = signal.boyle_norm(signal_bc_r_min_307)
perc = scoreatpercentile(signal_bc_r_min_307, 98)
std = np.array(signal_bc_r_min_307).std()
signal_bc_r_min_307 = signal.hon_norm_atac(signal_bc_r_min_307, perc, std)
signal_bc_r_min_307_slope = signal.slope(signal_bc_r_min_307, signal.sg_coefs)
output_fname = os.path.join(self.output_loc, "{}_d.txt".format(self.output_prefix))
with open(output_fname, "w") as f:
f.write("\t".join((map(str, signal_bc_f_max_145))) + "\n")
f.write("\t".join((map(str, signal_bc_f_max_145_slope))) + "\n")
f.write("\t".join((map(str, signal_bc_r_max_145))) + "\n")
f.write("\t".join((map(str, signal_bc_r_max_145_slope))) + "\n")
f.write("\t".join((map(str, signal_bc_f_145_307))) + "\n")
f.write("\t".join((map(str, signal_bc_f_145_307_slope))) + "\n")
f.write("\t".join((map(str, signal_bc_r_145_307))) + "\n")
f.write("\t".join((map(str, signal_bc_r_145_307_slope))) + "\n")
f.write("\t".join((map(str, signal_bc_f_min_307))) + "\n")
f.write("\t".join((map(str, signal_bc_f_min_307_slope))) + "\n")
f.write("\t".join((map(str, signal_bc_r_min_307))) + "\n")
f.write("\t".join((map(str, signal_bc_r_min_307_slope))) + "\n")
# Output signal of class e
signal_bc_f_307_500 = signal.boyle_norm(signal_bc_f_307_500)
perc = scoreatpercentile(signal_bc_f_307_500, 98)
std = np.array(signal_bc_f_307_500).std()
signal_bc_f_307_500 = signal.hon_norm_atac(signal_bc_f_307_500, perc, std)
signal_bc_f_307_500_slope = signal.slope(signal_bc_f_307_500, signal.sg_coefs)
signal_bc_r_307_500 = signal.boyle_norm(signal_bc_r_307_500)
perc = scoreatpercentile(signal_bc_r_307_500, 98)
std = np.array(signal_bc_r_307_500).std()
signal_bc_r_307_500 = signal.hon_norm_atac(signal_bc_r_307_500, perc, std)
signal_bc_r_307_500_slope = signal.slope(signal_bc_r_307_500, signal.sg_coefs)
output_fname = os.path.join(self.output_loc, "{}_e.txt".format(self.output_prefix))
with open(output_fname, "w") as f:
f.write("\t".join((map(str, signal_bc_f_max_145))) + "\n")
f.write("\t".join((map(str, signal_bc_f_max_145_slope))) + "\n")
f.write("\t".join((map(str, signal_bc_r_max_145))) + "\n")
f.write("\t".join((map(str, signal_bc_r_max_145_slope))) + "\n")
f.write("\t".join((map(str, signal_bc_f_145_307))) + "\n")
f.write("\t".join((map(str, signal_bc_f_145_307_slope))) + "\n")
f.write("\t".join((map(str, signal_bc_r_145_307))) + "\n")
f.write("\t".join((map(str, signal_bc_r_145_307_slope))) + "\n")
f.write("\t".join((map(str, signal_bc_f_307_500))) + "\n")
f.write("\t".join((map(str, signal_bc_f_307_500_slope))) + "\n")
f.write("\t".join((map(str, signal_bc_r_307_500))) + "\n")
f.write("\t".join((map(str, signal_bc_r_307_500_slope))) + "\n")
# Output signal of class f
signal_bc_f_min_500 = signal.boyle_norm(signal_bc_f_min_500)
perc = scoreatpercentile(signal_bc_f_min_500, 98)
std = np.array(signal_bc_f_min_500).std()
signal_bc_f_min_500 = signal.hon_norm_atac(signal_bc_f_min_500, perc, std)
signal_bc_f_min_500_slope = signal.slope(signal_bc_f_min_500, signal.sg_coefs)
signal_bc_r_min_500 = signal.boyle_norm(signal_bc_r_min_500)
perc = scoreatpercentile(signal_bc_r_min_500, 98)
std = np.array(signal_bc_r_min_500).std()
signal_bc_r_min_500 = signal.hon_norm_atac(signal_bc_r_min_500, perc, std)
signal_bc_r_min_500_slope = signal.slope(signal_bc_r_min_500, signal.sg_coefs)
output_fname = os.path.join(self.output_loc, "{}_f.txt".format(self.output_prefix))
with open(output_fname, "w") as f:
f.write("\t".join((map(str, signal_bc_f_max_145))) + "\n")
f.write("\t".join((map(str, signal_bc_f_max_145_slope))) + "\n")
f.write("\t".join((map(str, signal_bc_r_max_145))) + "\n")
f.write("\t".join((map(str, signal_bc_r_max_145_slope))) + "\n")
f.write("\t".join((map(str, signal_bc_f_145_307))) + "\n")
f.write("\t".join((map(str, signal_bc_f_145_307_slope))) + "\n")
f.write("\t".join((map(str, signal_bc_r_145_307))) + "\n")
f.write("\t".join((map(str, signal_bc_r_145_307_slope))) + "\n")
f.write("\t".join((map(str, signal_bc_f_307_500))) + "\n")
f.write("\t".join((map(str, signal_bc_f_307_500_slope))) + "\n")
f.write("\t".join((map(str, signal_bc_r_307_500))) + "\n")
f.write("\t".join((map(str, signal_bc_r_307_500_slope))) + "\n")
f.write("\t".join((map(str, signal_bc_f_min_500))) + "\n")
f.write("\t".join((map(str, signal_bc_f_min_500_slope))) + "\n")
f.write("\t".join((map(str, signal_bc_r_min_500))) + "\n")
f.write("\t".join((map(str, signal_bc_r_min_500_slope))) + "\n")
start = -(self.window_size / 2)
end = (self.window_size / 2) - 1
x = np.linspace(start, end, num=self.window_size)
fig, ax = plt.subplots(4, 2, figsize=(12, 12))
min_signal = min(signal_bc)
max_signal = max(signal_bc)
ax[0, 0].plot(x, signal_bc, color='red')
ax[0, 0].xaxis.set_ticks_position('bottom')
ax[0, 0].yaxis.set_ticks_position('left')
ax[0, 0].spines['top'].set_visible(False)
ax[0, 0].spines['right'].set_visible(False)
ax[0, 0].spines['left'].set_position(('outward', 15))
ax[0, 0].tick_params(direction='out')
ax[0, 0].set_title(self.output_prefix + " of all reads", fontweight='bold')
ax[0, 0].set_xticks([start, 0, end])
ax[0, 0].set_xticklabels([str(start), 0, str(end)])
ax[0, 0].set_yticks([min_signal, max_signal])
ax[0, 0].set_yticklabels([str(round(min_signal, 2)), str(round(max_signal, 2))], rotation=90)
ax[0, 0].set_xlim(start, end)
ax[0, 0].set_ylim([min_signal, max_signal])
min_signal = min(min(signal_bc_f), min(signal_bc_r))
max_signal = max(max(signal_bc_f), max(signal_bc_r))
ax[0, 1].plot(x, signal_bc_f, color='red', label='Forward')
ax[0, 1].plot(x, signal_bc_r, color='green', label='Reverse')
ax[0, 1].xaxis.set_ticks_position('bottom')
ax[0, 1].yaxis.set_ticks_position('left')
ax[0, 1].spines['top'].set_visible(False)
ax[0, 1].spines['right'].set_visible(False)
ax[0, 1].spines['left'].set_position(('outward', 15))
ax[0, 1].tick_params(direction='out')
ax[0, 1].set_title(self.output_prefix + " of all reads", fontweight='bold')
ax[0, 1].set_xticks([start, 0, end])
ax[0, 1].set_xticklabels([str(start), 0, str(end)])
ax[0, 1].set_yticks([min_signal, max_signal])
ax[0, 1].set_yticklabels([str(round(min_signal, 2)), str(round(max_signal, 2))], rotation=90)
ax[0, 1].set_xlim(start, end)
ax[0, 1].set_ylim([min_signal, max_signal])
ax[0, 1].legend(loc="upper right", frameon=False)
min_signal = min(min(signal_bc_f_max_145), min(signal_bc_r_max_145))
max_signal = max(max(signal_bc_f_max_145), max(signal_bc_r_max_145))
ax[1, 0].plot(x, signal_bc_f_max_145, color='red', label='Forward')
ax[1, 0].plot(x, signal_bc_r_max_145, color='green', label='Reverse')
ax[1, 0].xaxis.set_ticks_position('bottom')
ax[1, 0].yaxis.set_ticks_position('left')
ax[1, 0].spines['top'].set_visible(False)
ax[1, 0].spines['right'].set_visible(False)
ax[1, 0].spines['left'].set_position(('outward', 15))
ax[1, 0].tick_params(direction='out')
ax[1, 0].set_title(self.output_prefix + " from fragment length <= 145 ", fontweight='bold')
ax[1, 0].set_xticks([start, 0, end])
ax[1, 0].set_xticklabels([str(start), 0, str(end)])
ax[1, 0].set_yticks([min_signal, max_signal])
ax[1, 0].set_yticklabels([str(round(min_signal, 2)), str(round(max_signal, 2))], rotation=90)
ax[1, 0].set_xlim(start, end)
ax[1, 0].set_ylim([min_signal, max_signal])
ax[1, 0].legend(loc="upper right", frameon=False)
min_signal = min(min(signal_bc_f_min_145), min(signal_bc_r_min_145))
max_signal = max(max(signal_bc_f_min_145), max(signal_bc_r_min_145))
ax[1, 1].plot(x, signal_bc_f_min_145, color='red', label='Forward')
ax[1, 1].plot(x, signal_bc_r_min_145, color='green', label='Reverse')
ax[1, 1].xaxis.set_ticks_position('bottom')
ax[1, 1].yaxis.set_ticks_position('left')
ax[1, 1].spines['top'].set_visible(False)
ax[1, 1].spines['right'].set_visible(False)
ax[1, 1].spines['left'].set_position(('outward', 15))
ax[1, 1].tick_params(direction='out')
ax[1, 1].set_title(self.output_prefix + " from fragment length > 145 ", fontweight='bold')
ax[1, 1].set_xticks([start, 0, end])
ax[1, 1].set_xticklabels([str(start), 0, str(end)])
ax[1, 1].set_yticks([min_signal, max_signal])
ax[1, 1].set_yticklabels([str(round(min_signal, 2)), str(round(max_signal, 2))], rotation=90)
ax[1, 1].set_xlim(start, end)
ax[1, 1].set_ylim([min_signal, max_signal])
ax[1, 1].legend(loc="upper right", frameon=False)
min_signal = min(min(signal_bc_f_145_307), min(signal_bc_r_145_307))
max_signal = max(max(signal_bc_f_145_307), max(signal_bc_r_145_307))
ax[2, 0].plot(x, signal_bc_f_145_307, color='red', label='Forward')
ax[2, 0].plot(x, signal_bc_r_145_307, color='green', label='Reverse')
ax[2, 0].xaxis.set_ticks_position('bottom')
ax[2, 0].yaxis.set_ticks_position('left')
ax[2, 0].spines['top'].set_visible(False)
ax[2, 0].spines['right'].set_visible(False)
ax[2, 0].spines['left'].set_position(('outward', 15))
ax[2, 0].tick_params(direction='out')
ax[2, 0].set_title(self.output_prefix + " from fragment length > 145 and <= 307 ", fontweight='bold')
ax[2, 0].set_xticks([start, 0, end])
ax[2, 0].set_xticklabels([str(start), 0, str(end)])
ax[2, 0].set_yticks([min_signal, max_signal])
ax[2, 0].set_yticklabels([str(round(min_signal, 2)), str(round(max_signal, 2))], rotation=90)
ax[2, 0].set_xlim(start, end)
ax[2, 0].set_ylim([min_signal, max_signal])
ax[2, 0].legend(loc="upper right", frameon=False)
min_signal = min(min(signal_bc_f_min_307), min(signal_bc_r_min_307))
max_signal = max(max(signal_bc_f_min_307), max(signal_bc_r_min_307))
ax[2, 1].plot(x, signal_bc_f_min_307, color='red', label='Forward')
ax[2, 1].plot(x, signal_bc_r_min_307, color='green', label='Reverse')
ax[2, 1].xaxis.set_ticks_position('bottom')
ax[2, 1].yaxis.set_ticks_position('left')
ax[2, 1].spines['top'].set_visible(False)
ax[2, 1].spines['right'].set_visible(False)
ax[2, 1].spines['left'].set_position(('outward', 15))
ax[2, 1].tick_params(direction='out')
ax[2, 1].set_title(self.output_prefix + " from fragment length > 307 ", fontweight='bold')
ax[2, 1].set_xticks([start, 0, end])
ax[2, 1].set_xticklabels([str(start), 0, str(end)])
ax[2, 1].set_yticks([min_signal, max_signal])
ax[2, 1].set_yticklabels([str(round(min_signal, 2)), str(round(max_signal, 2))], rotation=90)
ax[2, 1].set_xlim(start, end)
ax[2, 1].set_ylim([min_signal, max_signal])
ax[2, 1].legend(loc="upper right", frameon=False)
min_signal = min(min(signal_bc_f_307_500), min(signal_bc_r_307_500))
max_signal = max(max(signal_bc_f_307_500), max(signal_bc_r_307_500))
ax[3, 0].plot(x, signal_bc_f_307_500, color='red', label='Forward')
ax[3, 0].plot(x, signal_bc_r_307_500, color='green', label='Reverse')
ax[3, 0].xaxis.set_ticks_position('bottom')
ax[3, 0].yaxis.set_ticks_position('left')
ax[3, 0].spines['top'].set_visible(False)
ax[3, 0].spines['right'].set_visible(False)
ax[3, 0].spines['left'].set_position(('outward', 15))
ax[3, 0].tick_params(direction='out')
ax[3, 0].set_title(self.output_prefix + " from fragment length > 307 and <= 500 ", fontweight='bold')
ax[3, 0].set_xticks([start, 0, end])
ax[3, 0].set_xticklabels([str(start), 0, str(end)])
ax[3, 0].set_yticks([min_signal, max_signal])
ax[3, 0].set_yticklabels([str(round(min_signal, 2)), str(round(max_signal, 2))], rotation=90)
ax[3, 0].set_xlim(start, end)
ax[3, 0].set_ylim([min_signal, max_signal])
ax[3, 0].legend(loc="upper right", frameon=False)
min_signal = min(min(signal_bc_f_min_500), min(signal_bc_r_min_500))
max_signal = max(max(signal_bc_f_min_500), max(signal_bc_r_min_500))
ax[3, 1].plot(x, signal_bc_f_min_500, color='red', label='Forward')
ax[3, 1].plot(x, signal_bc_r_min_500, color='green', label='Reverse')
ax[3, 1].xaxis.set_ticks_position('bottom')
ax[3, 1].yaxis.set_ticks_position('left')
ax[3, 1].spines['top'].set_visible(False)
ax[3, 1].spines['right'].set_visible(False)
ax[3, 1].spines['left'].set_position(('outward', 15))
ax[3, 1].tick_params(direction='out')
ax[3, 1].set_title(self.output_prefix + " from fragment length > 500 ", fontweight='bold')
ax[3, 1].set_xticks([start, 0, end])
ax[3, 1].set_xticklabels([str(start), 0, str(end)])
ax[3, 1].set_yticks([min_signal, max_signal])
ax[3, 1].set_yticklabels([str(round(min_signal, 2)), str(round(max_signal, 2))], rotation=90)
ax[3, 1].set_xlim(start, end)
ax[3, 1].set_ylim([min_signal, max_signal])
ax[3, 1].legend(loc="upper right", frameon=False)
figure_name = os.path.join(self.output_loc, "{}.pdf".format(self.output_prefix))
fig.subplots_adjust(bottom=.2, hspace=.5)
fig.tight_layout()
fig.savefig(figure_name, format="pdf", dpi=300)
def unstrand_line_by_size(self, bias_tables):
genome_data = GenomeData(self.organism)
fasta = Fastafile(genome_data.get_genome())
mpbs_regions = GenomicRegionSet("Motif Predicted Binding Sites")
mpbs_regions.read(self.motif_file)
bam = Samfile(self.reads_file, "rb")
table = BiasTable()
bias_table_list = bias_tables.split(",")
bias_table = table.load_table(table_file_name_F=bias_table_list[0],
table_file_name_R=bias_table_list[1])
signal = GenomicSignal()
signal.load_sg_coefs(9)
signal_bc_max_145 = np.zeros(self.window_size)
signal_bc_min_145 = np.zeros(self.window_size)
signal_bc_145_307 = np.zeros(self.window_size)
signal_bc_min_307 = np.zeros(self.window_size)
signal_bc_307_500 = np.zeros(self.window_size)
signal_bc_min_500 = np.zeros(self.window_size)
signal_bc = np.zeros(self.window_size)
for region in mpbs_regions:
if str(region.name).split(":")[-1] == "Y":
mid = (region.initial + region.final) / 2
p1 = mid - (self.window_size / 2)
p2 = mid + (self.window_size / 2)
bc_max_145 = \
signal.get_bc_signal_by_fragment_length(ref=region.chrom, start=p1, end=p2,
bam=bam, fasta=fasta, bias_table=bias_table,
forward_shift=self.forward_shift,
reverse_shift=self.reverse_shift,
min_length=None, max_length=145, strand=False)
bc_min_145 = \
signal.get_bc_signal_by_fragment_length(ref=region.chrom, start=p1, end=p2,
bam=bam, fasta=fasta, bias_table=bias_table,
forward_shift=self.forward_shift,
reverse_shift=self.reverse_shift,
min_length=145, max_length=None, strand=False)
bc_145_307 = \
signal.get_bc_signal_by_fragment_length(ref=region.chrom, start=p1, end=p2,
bam=bam, fasta=fasta, bias_table=bias_table,
forward_shift=self.forward_shift,
reverse_shift=self.reverse_shift,
min_length=145, max_length=307, strand=False)
bc_min_307 = \
signal.get_bc_signal_by_fragment_length(ref=region.chrom, start=p1, end=p2,
bam=bam, fasta=fasta, bias_table=bias_table,
forward_shift=self.forward_shift,
reverse_shift=self.reverse_shift,
min_length=307, max_length=None, strand=False)
bc_307_500 = \
signal.get_bc_signal_by_fragment_length(ref=region.chrom, start=p1, end=p2,
bam=bam, fasta=fasta, bias_table=bias_table,
forward_shift=self.forward_shift,
reverse_shift=self.reverse_shift,
min_length=307, max_length=500, strand=False)
bc_min_500 = \
signal.get_bc_signal_by_fragment_length(ref=region.chrom, start=p1, end=p2,
bam=bam, fasta=fasta, bias_table=bias_table,
forward_shift=self.forward_shift,
reverse_shift=self.reverse_shift,
min_length=500, max_length=None, strand=False)
bc = \
signal.get_bc_signal_by_fragment_length(ref=region.chrom, start=p1, end=p2,
bam=bam, fasta=fasta, bias_table=bias_table,
forward_shift=self.forward_shift,
reverse_shift=self.reverse_shift,
min_length=None, max_length=None, strand=False)
signal_bc_max_145 = np.add(signal_bc_max_145, bc_max_145)
signal_bc_min_145 = np.add(signal_bc_min_145, bc_min_145)
signal_bc_145_307 = np.add(signal_bc_145_307, bc_145_307)
signal_bc_min_307 = np.add(signal_bc_min_307, bc_min_307)
signal_bc_307_500 = np.add(signal_bc_307_500, bc_307_500)
signal_bc_min_500 = np.add(signal_bc_min_500, bc_min_500)
signal_bc = np.add(signal_bc, bc)
# Pre-process signal
signal_bc = signal.boyle_norm(signal_bc)
perc = scoreatpercentile(signal_bc, 98)
std = np.array(signal_bc).std()
signal_bc = signal.hon_norm_atac(signal_bc, perc, std)
signal_bc_slope = signal.slope(signal_bc, signal.sg_coefs)
# Output the norm and slope signal
output_fname = os.path.join(self.output_loc, "{}.txt".format(self.output_prefix))
with open(output_fname, "w") as f:
f.write("\t".join((map(str, signal_bc))) + "\n")
f.write("\t".join((map(str, signal_bc_slope))) + "\n")
# Output signal of class a
signal_bc_max_145 = signal.boyle_norm(signal_bc_max_145)
perc = scoreatpercentile(signal_bc_max_145, 98)
std = np.array(signal_bc_max_145).std()
signal_bc_max_145 = signal.hon_norm_atac(signal_bc_max_145, perc, std)
signal_bc_max_145_slope = signal.slope(signal_bc_max_145, signal.sg_coefs)
output_fname = os.path.join(self.output_loc, "{}_a.txt".format(self.output_prefix))
with open(output_fname, "w") as f:
f.write("\t".join((map(str, signal_bc_max_145))) + "\n")
f.write("\t".join((map(str, signal_bc_max_145_slope))) + "\n")
# Output signal of class b
signal_bc_min_145 = signal.boyle_norm(signal_bc_min_145)
perc = scoreatpercentile(signal_bc_min_145, 98)
std = np.array(signal_bc_min_145).std()
signal_bc_min_145 = signal.hon_norm_atac(signal_bc_min_145, perc, std)
signal_bc_min_145_slope = signal.slope(signal_bc_min_145, signal.sg_coefs)
output_fname = os.path.join(self.output_loc, "{}_b.txt".format(self.output_prefix))
with open(output_fname, "w") as f:
f.write("\t".join((map(str, signal_bc_max_145))) + "\n")
f.write("\t".join((map(str, signal_bc_max_145_slope))) + "\n")
f.write("\t".join((map(str, signal_bc_min_145))) + "\n")
f.write("\t".join((map(str, signal_bc_min_145_slope))) + "\n")
# Output signal of class c
signal_bc_145_307 = signal.boyle_norm(signal_bc_145_307)
perc = scoreatpercentile(signal_bc_145_307, 98)
std = np.array(signal_bc_145_307).std()
signal_bc_145_307 = signal.hon_norm_atac(signal_bc_145_307, perc, std)
signal_bc_145_307_slope = signal.slope(signal_bc_145_307, signal.sg_coefs)
output_fname = os.path.join(self.output_loc, "{}_c.txt".format(self.output_prefix))
with open(output_fname, "w") as f:
f.write("\t".join((map(str, signal_bc_max_145))) + "\n")
f.write("\t".join((map(str, signal_bc_max_145_slope))) + "\n")
f.write("\t".join((map(str, signal_bc_145_307))) + "\n")
f.write("\t".join((map(str, signal_bc_145_307_slope))) + "\n")
# Output signal of class d
signal_bc_min_307 = signal.boyle_norm(signal_bc_min_307)
perc = scoreatpercentile(signal_bc_min_307, 98)
std = np.array(signal_bc_min_307).std()
signal_bc_min_307 = signal.hon_norm_atac(signal_bc_min_307, perc, std)
signal_bc_min_307_slope = signal.slope(signal_bc_min_307, signal.sg_coefs)
output_fname = os.path.join(self.output_loc, "{}_d.txt".format(self.output_prefix))
with open(output_fname, "w") as f:
f.write("\t".join((map(str, signal_bc_max_145))) + "\n")
f.write("\t".join((map(str, signal_bc_max_145_slope))) + "\n")
f.write("\t".join((map(str, signal_bc_145_307))) + "\n")
f.write("\t".join((map(str, signal_bc_145_307_slope))) + "\n")
f.write("\t".join((map(str, signal_bc_min_307))) + "\n")
f.write("\t".join((map(str, signal_bc_min_307_slope))) + "\n")
# Output signal of class e
signal_bc_307_500 = signal.boyle_norm(signal_bc_307_500)
perc = scoreatpercentile(signal_bc_307_500, 98)
std = np.array(signal_bc_307_500).std()
signal_bc_307_500 = signal.hon_norm_atac(signal_bc_307_500, perc, std)
signal_bc_307_500_slope = signal.slope(signal_bc_307_500, signal.sg_coefs)
output_fname = os.path.join(self.output_loc, "{}_e.txt".format(self.output_prefix))
with open(output_fname, "w") as f:
f.write("\t".join((map(str, signal_bc_max_145))) + "\n")
f.write("\t".join((map(str, signal_bc_max_145_slope))) + "\n")
f.write("\t".join((map(str, signal_bc_145_307))) + "\n")
f.write("\t".join((map(str, signal_bc_145_307_slope))) + "\n")
f.write("\t".join((map(str, signal_bc_307_500))) + "\n")
f.write("\t".join((map(str, signal_bc_307_500_slope))) + "\n")
# Output signal of class f
signal_bc_min_500 = signal.boyle_norm(signal_bc_min_500)
perc = scoreatpercentile(signal_bc_min_500, 98)
std = np.array(signal_bc_min_500).std()
signal_bc_min_500 = signal.hon_norm_atac(signal_bc_min_500, perc, std)
signal_bc_min_500_slope = signal.slope(signal_bc_min_500, signal.sg_coefs)
output_fname = os.path.join(self.output_loc, "{}_f.txt".format(self.output_prefix))
with open(output_fname, "w") as f:
f.write("\t".join((map(str, signal_bc_max_145))) + "\n")
f.write("\t".join((map(str, signal_bc_max_145_slope))) + "\n")
f.write("\t".join((map(str, signal_bc_145_307))) + "\n")
f.write("\t".join((map(str, signal_bc_145_307_slope))) + "\n")
f.write("\t".join((map(str, signal_bc_307_500))) + "\n")
f.write("\t".join((map(str, signal_bc_307_500_slope))) + "\n")
f.write("\t".join((map(str, signal_bc_min_500))) + "\n")
f.write("\t".join((map(str, signal_bc_min_500_slope))) + "\n")
start = -(self.window_size / 2)
end = (self.window_size / 2) - 1
x = np.linspace(start, end, num=self.window_size)
fig, ax = plt.subplots(4, 2, figsize=(12, 12))
min_signal = min(signal_bc)
max_signal = max(signal_bc)
ax[0, 0].plot(x, signal_bc, color='red')
ax[0, 0].xaxis.set_ticks_position('bottom')
ax[0, 0].yaxis.set_ticks_position('left')
ax[0, 0].spines['top'].set_visible(False)
ax[0, 0].spines['right'].set_visible(False)
ax[0, 0].spines['left'].set_position(('outward', 15))
ax[0, 0].tick_params(direction='out')
ax[0, 0].set_title(self.output_prefix + " of all reads", fontweight='bold')
ax[0, 0].set_xticks([start, 0, end])
ax[0, 0].set_xticklabels([str(start), 0, str(end)])
ax[0, 0].set_yticks([min_signal, max_signal])
ax[0, 0].set_yticklabels([str(round(min_signal, 2)), str(round(max_signal, 2))], rotation=90)
ax[0, 0].set_xlim(start, end)
ax[0, 0].set_ylim([min_signal, max_signal])
min_signal = min(signal_bc)
max_signal = max(signal_bc)
ax[0, 1].plot(x, signal_bc, color='red')
ax[0, 1].xaxis.set_ticks_position('bottom')
ax[0, 1].yaxis.set_ticks_position('left')
ax[0, 1].spines['top'].set_visible(False)
ax[0, 1].spines['right'].set_visible(False)
ax[0, 1].spines['left'].set_position(('outward', 15))
ax[0, 1].tick_params(direction='out')
ax[0, 1].set_title(self.output_prefix + " of all reads", fontweight='bold')
ax[0, 1].set_xticks([start, 0, end])
ax[0, 1].set_xticklabels([str(start), 0, str(end)])
ax[0, 1].set_yticks([min_signal, max_signal])
ax[0, 1].set_yticklabels([str(round(min_signal, 2)), str(round(max_signal, 2))], rotation=90)
ax[0, 1].set_xlim(start, end)
ax[0, 1].set_ylim([min_signal, max_signal])
min_signal = min(signal_bc_max_145)
max_signal = max(signal_bc_max_145)
ax[1, 0].plot(x, signal_bc_max_145, color='red')
ax[1, 0].xaxis.set_ticks_position('bottom')
ax[1, 0].yaxis.set_ticks_position('left')
ax[1, 0].spines['top'].set_visible(False)
ax[1, 0].spines['right'].set_visible(False)
ax[1, 0].spines['left'].set_position(('outward', 15))
ax[1, 0].tick_params(direction='out')
ax[1, 0].set_title(self.output_prefix + " from fragment length <= 145 ", fontweight='bold')
ax[1, 0].set_xticks([start, 0, end])
ax[1, 0].set_xticklabels([str(start), 0, str(end)])
ax[1, 0].set_yticks([min_signal, max_signal])
ax[1, 0].set_yticklabels([str(round(min_signal, 2)), str(round(max_signal, 2))], rotation=90)
ax[1, 0].set_xlim(start, end)
ax[1, 0].set_ylim([min_signal, max_signal])
min_signal = min(signal_bc_min_145)
max_signal = max(signal_bc_min_145)
ax[1, 1].plot(x, signal_bc_min_145, color='red')
ax[1, 1].xaxis.set_ticks_position('bottom')
ax[1, 1].yaxis.set_ticks_position('left')
ax[1, 1].spines['top'].set_visible(False)
ax[1, 1].spines['right'].set_visible(False)
ax[1, 1].spines['left'].set_position(('outward', 15))
ax[1, 1].tick_params(direction='out')
ax[1, 1].set_title(self.output_prefix + " from fragment length > 145 ", fontweight='bold')
ax[1, 1].set_xticks([start, 0, end])
ax[1, 1].set_xticklabels([str(start), 0, str(end)])
ax[1, 1].set_yticks([min_signal, max_signal])
ax[1, 1].set_yticklabels([str(round(min_signal, 2)), str(round(max_signal, 2))], rotation=90)
ax[1, 1].set_xlim(start, end)
ax[1, 1].set_ylim([min_signal, max_signal])
min_signal = min(signal_bc_145_307)
max_signal = max(signal_bc_145_307)
ax[2, 0].plot(x, signal_bc_145_307, color='red')
ax[2, 0].xaxis.set_ticks_position('bottom')
ax[2, 0].yaxis.set_ticks_position('left')
ax[2, 0].spines['top'].set_visible(False)
ax[2, 0].spines['right'].set_visible(False)
ax[2, 0].spines['left'].set_position(('outward', 15))
ax[2, 0].tick_params(direction='out')
ax[2, 0].set_title(self.output_prefix + " from fragment length > 145 and <= 307 ", fontweight='bold')
ax[2, 0].set_xticks([start, 0, end])
ax[2, 0].set_xticklabels([str(start), 0, str(end)])
ax[2, 0].set_yticks([min_signal, max_signal])
ax[2, 0].set_yticklabels([str(round(min_signal, 2)), str(round(max_signal, 2))], rotation=90)
ax[2, 0].set_xlim(start, end)
ax[2, 0].set_ylim([min_signal, max_signal])
min_signal = min(signal_bc_min_307)
max_signal = max(signal_bc_min_307)
ax[2, 1].plot(x, signal_bc_min_307, color='red')
ax[2, 1].xaxis.set_ticks_position('bottom')
ax[2, 1].yaxis.set_ticks_position('left')
ax[2, 1].spines['top'].set_visible(False)
ax[2, 1].spines['right'].set_visible(False)
ax[2, 1].spines['left'].set_position(('outward', 15))
ax[2, 1].tick_params(direction='out')
ax[2, 1].set_title(self.output_prefix + " from fragment length > 307 ", fontweight='bold')
ax[2, 1].set_xticks([start, 0, end])
ax[2, 1].set_xticklabels([str(start), 0, str(end)])
ax[2, 1].set_yticks([min_signal, max_signal])
ax[2, 1].set_yticklabels([str(round(min_signal, 2)), str(round(max_signal, 2))], rotation=90)
ax[2, 1].set_xlim(start, end)
ax[2, 1].set_ylim([min_signal, max_signal])
min_signal = min(signal_bc_307_500)
max_signal = max(signal_bc_307_500)
ax[3, 0].plot(x, signal_bc_307_500, color='red')
ax[3, 0].xaxis.set_ticks_position('bottom')
ax[3, 0].yaxis.set_ticks_position('left')
ax[3, 0].spines['top'].set_visible(False)
ax[3, 0].spines['right'].set_visible(False)
ax[3, 0].spines['left'].set_position(('outward', 15))
ax[3, 0].tick_params(direction='out')
ax[3, 0].set_title(self.output_prefix + " from fragment length > 307 and <= 500 ", fontweight='bold')
ax[3, 0].set_xticks([start, 0, end])
ax[3, 0].set_xticklabels([str(start), 0, str(end)])
ax[3, 0].set_yticks([min_signal, max_signal])
ax[3, 0].set_yticklabels([str(round(min_signal, 2)), str(round(max_signal, 2))], rotation=90)
ax[3, 0].set_xlim(start, end)
ax[3, 0].set_ylim([min_signal, max_signal])
min_signal = min(signal_bc_min_500)
max_signal = max(signal_bc_min_500)
ax[3, 1].plot(x, signal_bc_min_500, color='red')
ax[3, 1].xaxis.set_ticks_position('bottom')
ax[3, 1].yaxis.set_ticks_position('left')
ax[3, 1].spines['top'].set_visible(False)
ax[3, 1].spines['right'].set_visible(False)
ax[3, 1].spines['left'].set_position(('outward', 15))
ax[3, 1].tick_params(direction='out')
ax[3, 1].set_title(self.output_prefix + " from fragment length > 500 ", fontweight='bold')
ax[3, 1].set_xticks([start, 0, end])
ax[3, 1].set_xticklabels([str(start), 0, str(end)])
ax[3, 1].set_yticks([min_signal, max_signal])
ax[3, 1].set_yticklabels([str(round(min_signal, 2)), str(round(max_signal, 2))], rotation=90)
ax[3, 1].set_xlim(start, end)
ax[3, 1].set_ylim([min_signal, max_signal])
figure_name = os.path.join(self.output_loc, "{}.pdf".format(self.output_prefix))
fig.subplots_adjust(bottom=.2, hspace=.5)
fig.tight_layout()
fig.savefig(figure_name, format="pdf", dpi=300)
def distribution_of_frag_length(self, reads_file=None, motif_file=None):
bam = Samfile(self.reads_file, "rb")
lengths = list()
lengths_shorter_than_145 = list()
lengths_between_146_307 = list()
lengths_between_307_500 = list()
lengths_longer_than_500 = list()
if motif_file is None:
# Use all fragments to plot the distribution
for read in bam.fetch():
if read.is_read1:
length = abs(read.template_length)
lengths.append(length)
if length <= 145: lengths_shorter_than_145.append(length)
if 145 < length <= 307: lengths_between_146_307.append(length)
if 307 < length <= 500: lengths_between_307_500.append(length)
if length > 500: lengths_longer_than_500.append(length)
elif motif_file is not None:
mpbs_regions = GenomicRegionSet("Motif Predicted Binding Sites")
mpbs_regions.read(self.motif_file)
for region in mpbs_regions:
if str(region.name).split(":")[-1] == "Y":
# Extend by 50 bp
mid = (region.initial + region.final) / 2
p1 = mid - (self.window_size / 2)
p2 = mid + (self.window_size / 2)
for read in bam.fetch(region.chrom, p1, p2):
if read.is_read1:
length = abs(read.template_length)
lengths.append(length)
if length <= 145: lengths_shorter_than_145.append(length)
if 145 < length <= 307: lengths_between_146_307.append(length)
if 307 < length <= 500: lengths_between_307_500.append(length)
if length > 500: lengths_longer_than_500.append(length)
# output the plot
unique, counts = np.unique(lengths, return_counts=True)
count_list = list()
length_dict = dict(zip(unique, counts))
sort_keys = sorted(length_dict.keys())
txt_fname = os.path.join(self.output_loc, "{}.txt".format(self.output_prefix))
with open(txt_fname, "w") as f:
for key in sort_keys:
f.write(str(key) + "\t" + str(length_dict[key]) + "\n")
count_list.append(length_dict[key])
fig, ax = plt.subplots(5, 1, figsize=(6, 15))
mean = np.mean(lengths)
std = np.std(lengths)
ax[0].plot(unique, counts)
ax[0].set_title("Histogram of all fragment length", fontweight='bold')
ax[0].text(0.7, 0.9, "number of fragments:{}".format(str(len(lengths))), verticalalignment='center',
horizontalalignment='center', transform=ax[0].transAxes, fontsize=12)
ax[0].text(0.8, 0.8, "mean:{}".format(str(round(mean, 2))), verticalalignment='center',
horizontalalignment='center', transform=ax[0].transAxes, fontsize=12)
ax[0].text(0.8, 0.7, "std:{}".format(str(round(std, 2))), verticalalignment='center',
horizontalalignment='center', transform=ax[0].transAxes, fontsize=12)
mean =
|
np.mean(lengths_shorter_than_145)
|
numpy.mean
|
"""
Auxiliary functions for cvxpy formulations.
"""
# <NAME> <<EMAIL>>
# Copyright (C) 2020
import numpy as np
def monotonic_trend_constraints(monotonic_trend, c, D, t=None):
if monotonic_trend in ("ascending", "convex"):
return D @ c >= 0
elif monotonic_trend in ("descending", "concave"):
return D @ c <= 0
elif monotonic_trend == "valley":
return [D[:t, :] @ c <= 0, D[t:, :] @ c >= 0]
elif monotonic_trend == "peak":
return [D[:t, :] @ c >= 0, D[t:, :] @ c <= 0]
def compute_change_point(x, y, splits, order, monotonic_trend):
n_splits = len(splits)
n_bins = n_splits + 1
indices =
|
np.searchsorted(splits, x, side='right')
|
numpy.searchsorted
|
# -*- coding: utf-8 -*-
# *****************************************************************************
# NICOS, the Networked Instrument Control System of the MLZ
# Copyright (c) 2009-2022 by the NICOS contributors (see AUTHORS)
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Module authors:
# <NAME> <<EMAIL>>
#
# *****************************************************************************
"""Implementation of the ARIANE based scan optimizations.
See https://jugit.fz-juelich.de/ainx/ariane
Requires a running ARIANE server, whose location is configured as the
`ariane_host` parameter of the experiment device.
"""
import json
from time import time as currenttime
import numpy as np
import zmq
from nicos import session
from nicos.commands import usercommand
from nicos.commands.scan import _handleScanArgs, _infostr
from nicos.commands.tas import _getQ
from nicos.core import CommunicationError, NicosError, UsageError, SIMULATION
from nicos.core.scan import QScan
from nicos.utils.messaging import nicos_zmq_ctx
from nicos_mlz.panda.gui.ariane_vis import make_map, update_map
__all__ = ['qaigrid', 'qaiscan']
@usercommand
def qaigrid(Q0, Q1, Q2, *args, **kwargs):
"""Make a grid within a Q/E plane, usable as the initial points for a
`qaiscan()`.
You need to provide a rectangle of Q space to measure in, given by three
Q vectors to lower-left, lower-right and upper-left corners. For example,
>>> qaiscan([1, 0, 0, 0], [2, 0, 0, 0], [1, 0, 0, 5], ...)
would scan in this reactangle::
[1, 0, 0, 5] +--------+ [2, 0, 0, 5]
| |
| |
[1, 0, 0, 0] +--------+ [2, 0, 0, 0]
Special keyword arguments:
* initpoints: specify the size of initial grid to measure, can also
be [n1, n2] for different size along the axes
Other arguments are interpreted as for a normal `qscan`.
"""
kwargs['initonly'] = True
qaiscan(Q0, Q1, Q2, *args, **kwargs)
@usercommand
def qaiscan(Q0, Q1, Q2, *args, **kwargs):
"""Scan within a Q/E plane, letting the ARIANE algorithm select the
precise scan locations.
You need to provide a rectangle of Q space to measure in, given by three
Q vectors to lower-left, lower-right and upper-left corners. For example,
>>> qaiscan([1, 0, 0, 0], [2, 0, 0, 0], [1, 0, 0, 5], ...)
would scan in this reactangle::
[1, 0, 0, 5] +--------+ [2, 0, 0, 5]
| |
| |
[1, 0, 0, 0] +--------+ [2, 0, 0, 0]
Special keyword arguments:
* initpoints: specify the size of initial grid to measure, can also
be [n1, n2] for different size along the axes
* initscans: specify a scan number or list of scan numbers to use as the
initial points, instead of measuring them again
* maxpoints: specify the maximum total number of points to measure
* maxtime: specify the maximum total time for the scan to take,
including instrument moves, in seconds
* background: specify a threshold in detector counts, below which
counts are considered as background and removed for consideration by
the algorithm.
* intensity_threshold: specify a threshold in detector counts,
above which exact counts are not used by the AI algorithm
to determine the next point.
Other arguments are interpreted as for a normal `qscan`.
"""
# TODO: allow selecting counter/monitor columns
maxpts = kwargs.pop('maxpoints', None)
maxtime = kwargs.pop('maxtime', None)
initpts = kwargs.pop('initpoints', 11)
initscans = kwargs.pop('initscans', None)
initres = kwargs.pop('initres', None)
initonly = kwargs.pop('initonly', False)
int_threshold = kwargs.pop('intensity_threshold', None)
background = kwargs.pop('background', None)
if not isinstance(initpts, (list, tuple)):
initpts = [int(initpts), int(initpts)]
q0 = np.array(_getQ(Q0, 'Q0')).astype(float)
q1 = np.array(_getQ(Q1, 'Q1')).astype(float)
q2 = np.array(_getQ(Q2, 'Q2')).astype(float)
scanstr = _infostr('qaigrid' if initonly else 'qaiscan',
(Q0, Q1, Q2) + args, kwargs)
preset, scaninfo, detlist, envlist, move, multistep = \
_handleScanArgs(args, kwargs, scanstr)
if multistep:
raise UsageError('Multi-step scan points are impossible with ARIANE')
scan = QAIScan(q0, q1, q2, initpts=initpts, initscans=initscans, initres=initres,
initonly=initonly, maxpts=maxpts, maxtime=maxtime,
background=background, int_threshold=int_threshold,
firstmoves=move, detlist=detlist, envlist=envlist,
preset=preset, scaninfo=scaninfo)
qaiscan.last = scan # for debugging purposes
scan.run()
class QAIScan(QScan):
"""Specialized Scan class for ARIANE scans."""
ID = b'ARIANE'
_socket = None
# pylint: disable=too-many-arguments
def __init__(self, q0, q1, q2, initpts, initscans, initres, initonly, maxpts,
maxtime, background, int_threshold, firstmoves=None,
detlist=None, envlist=None, preset=None, scaninfo=None):
# first establish connection
hostport = getattr(session.experiment, 'ariane_host', None)
if not hostport:
raise UsageError('Cannot perform scan: ARIANE not configured')
if session.mode != SIMULATION:
self._connect(hostport)
# ensure service is up and running
version = self._request('ping')['version']
session.log.debug('connected to ARIANE version %s', version)
# conversion between Q/E and AI coordinates
# apply heuristics to make q0 and the directions as simple as possible
# by adjusting the limits
offset, dir1, lim1, dir2, lim2 = determine_axes(q0, q1, q2)
# see _pos2loc and _loc2pos for the conversions using these matrices
self._q0 = offset
self._qw = np.concatenate((dir1.reshape((4, 1)),
dir2.reshape((4, 1))), axis=1)
self._invqw =
|
np.linalg.inv(self._qw.T @ self._qw)
|
numpy.linalg.inv
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
import logging
import glob
import os
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from collections import namedtuple
import numpy as np
import datetime
logging.basicConfig(
level='INFO', format='%(asctime)s : %(message)s')
logger = logging.getLogger(__name__)
plt.style.use(['seaborn-talk'])
class SourceFile(object):
score_headings = ['excellent', 'very_good', 'average', 'poor', 'terrible']
score_values = [5, 2, 0, -2, -5]
def __init__(self, name, filename):
self.filename = filename
self.name = name
self.timestamps = []
def parse(self):
scores = []
with open(self.filename) as infile:
for line in infile:
line = line.strip()
contents = line.split()
timestamp = int(contents[0])
upload_data = {
part.split(':')[0]: int(part.split(':')[1])
for part in contents[1:]
}
row = [timestamp] + [upload_data[heading]
for heading in self.score_headings]
scores.append(tuple(row))
dtype = [('timestamp', int)] + \
list(zip(self.score_headings, [int] * len(self.score_headings)))
return np.array(scores, dtype=dtype)
def render_to(self, name, filename):
data = self.parse()
totals = []
for row in data:
totals.append(
|
np.sum([row[heading] for heading in self.score_headings])
|
numpy.sum
|
"""Helper for evaluation on the Labeled Faces in the Wild dataset
"""
# MIT License
#
# Copyright (c) 2016 <NAME>
# Copyright (c) 2021 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
from utils import utils
from dataset.dataset_np_ipc import Dataset
import os
import argparse
import sys
import numpy as np
from scipy import misc
from scipy import interpolate
import sklearn
import cv2
import math
import datetime
import pickle
from sklearn.decomposition import PCA
import mxnet as mx
from mxnet import ndarray as nd
import _pickle as cPickle
from utils.utils import KFold
def calculate_roc(embeddings1, embeddings2, actual_issame, compare_func, nrof_folds=10):
assert(embeddings1.shape[0] == embeddings2.shape[0])
assert(embeddings1.shape[1] == embeddings2.shape[1])
nrof_pairs = min(len(actual_issame), embeddings1.shape[0])
k_fold = KFold(nrof_pairs, n_folds=nrof_folds, shuffle=False)
accuracy = np.zeros((nrof_folds))
indices = np.arange(nrof_pairs)
#print('pca', pca)
accuracies = np.zeros(10, dtype=np.float32)
thresholds = np.zeros(10, dtype=np.float32)
dist = compare_func(embeddings1, embeddings2)
for fold_idx, (train_set, test_set) in enumerate(k_fold):
#print('train_set', train_set)
#print('test_set', test_set)
# Find the best threshold for the fold
from evaluation import metrics
train_score = dist[train_set]
train_labels = actual_issame[train_set] == 1
acc, thresholds[fold_idx] = metrics.accuracy(train_score, train_labels)
# print('train acc', acc, thresholds[i])
# Testing
test_score = dist[test_set]
accuracies[fold_idx], _ = metrics.accuracy(test_score, actual_issame[test_set]==1, np.array([thresholds[fold_idx]]))
accuracy = np.mean(accuracies)
threshold = np.mean(thresholds)
return accuracy, threshold
def evaluate(embeddings, actual_issame, compare_func, nrof_folds=10, keep_idxes=None):
# Calculate evaluation metrics
embeddings1 = embeddings[0::2]
embeddings2 = embeddings[1::2]
actual_issame = np.asarray(actual_issame)
if keep_idxes is not None:
embeddings1 = embeddings1[keep_idxes]
embeddings2 = embeddings2[keep_idxes]
actual_issame = actual_issame[keep_idxes]
return calculate_roc(embeddings1, embeddings2,
actual_issame, compare_func, nrof_folds=nrof_folds)
def load_bin(path, image_size):
print(path, image_size)
with open(path, 'rb') as f:
if 'lfw_all' in path:
bins, issame_list = pickle.load(f)
else:
bins, issame_list = pickle.load(f, encoding='latin1')
data_list = []
for flip in [0]:
data = nd.empty((len(issame_list)*2, image_size[0], image_size[1], 3))
data_list.append(data)
print(len(bins))
for i in range(len(issame_list)*2):
_bin = bins[i]
# print(type(_bin))
img = mx.image.imdecode(_bin)
# img = nd.transpose(img, axes=(2, 0, 1))
for flip in [0]:
if flip==1:
img = mx.ndarray.flip(data=img, axis=2)
data_list[flip][i][:] = img
# if i%5000==0:
# print('loading bin', i)
print(data_list[0].shape)
return (data_list, issame_list)
def eval_images_cmp(images_preprocessed, issame_list, network, batch_size, nfolds=10, name='', result_dir='',
re_extract_feature=False, filter_out_type='max'):
print('testing verification..')
result_dir = r'G:\chenkai\Probabilistic-Face-Embeddings-master\log\resface64_relu_msarcface_am_PFE\20201020-180059-mls-only'
save_name_pkl_feature = result_dir + '/%s_feature.pkl' % name
if re_extract_feature or not os.path.exists(save_name_pkl_feature):
images = images_preprocessed
print(images.shape)
mu, sigma_sq = network.extract_feature(images, batch_size, verbose=True)
save_data = (mu, sigma_sq)
with open(save_name_pkl_feature, 'wb') as f:
cPickle.dump(save_data, f)
print('save', save_name_pkl_feature)
else:
with open(save_name_pkl_feature, 'rb') as f:
data = cPickle.load(f)
mu, sigma_sq = data
print('load', save_name_pkl_feature)
feat_pfe = np.concatenate([mu, sigma_sq], axis=1)
threshold = -1.604915
result_dir2 = r'G:\chenkai\Probabilistic-Face-Embeddings-master\log\resface64m_relu_msarcface_am_PFE\20201028-193142-mls1.0-abs0.001-triplet0.0001'
save_name_pkl_feature2 = result_dir2 + '/%s_feature.pkl' % name
with open(save_name_pkl_feature2, 'rb') as f:
data = cPickle.load(f)
mu2, sigma_sq2 = data
print('load', save_name_pkl_feature2)
feat_pfe2 = np.concatenate([mu2, sigma_sq2], axis=1)
threshold2 = -1.704115
embeddings = mu
embeddings1 = embeddings[0::2]
embeddings2 = embeddings[1::2]
cosine_score = np.sum(embeddings1 * embeddings2, axis=1)
compare_func = lambda x,y: utils.pair_MLS_score(x, y, use_attention_only=False)
embeddings = feat_pfe
embeddings1 = embeddings[0::2]
embeddings2 = embeddings[1::2]
actual_issame = np.asarray(issame_list)
dist = compare_func(embeddings1, embeddings2)
pred = (dist > threshold).astype(int)
idx_true1 = pred == actual_issame
print('acc1', np.average(idx_true1), np.sum(idx_true1))
embeddings = feat_pfe2
embeddings1 = embeddings[0::2]
embeddings2 = embeddings[1::2]
actual_issame =
|
np.asarray(issame_list)
|
numpy.asarray
|
"""
Plot relative number of errors and new sentences as a function of the SORN size
"""
import os
import sys; sys.path.append('.')
import pickle
import numpy as np
import matplotlib.pylab as plt
# parameters to include in the plot
SAVE_PLOT = True
################################################################################
# Make plot #
################################################################################
# 0. build figures
fig = plt.figure(1, figsize=(7, 6))
# 1. load performances and experiment parameters
for experiment_tag in ['error_and_new', 'eFDT', 'sp']:
experiment_folder = 'GrammarTask_' + experiment_tag
experiment_path = 'backup/' + experiment_folder + '/'
experiment_n = len(os.listdir(experiment_path))
percent_error = np.zeros(experiment_n)
net_size = np.zeros(experiment_n)
for exp, exp_name in enumerate(os.listdir(experiment_path)):
exp_n = [int(s) for s in exp_name.split('_') if s.isdigit()]
stats = pickle.load(open(experiment_path+exp_name+'/stats.p', 'rb'))
percent_error[exp] = float(stats.n_wrong)/stats.n_output
net_size[exp] = int(exp_name.split('_')[0][1:])
N_e = np.unique(net_size)
wrong = []
wrong_std = []
wrong_up = []
wrong_down = []
for i, n in enumerate(N_e):
net = np.where(n == net_size)
wrong.append(percent_error[net].mean())
wrong_std.append(percent_error[net].std())
wrong_up.append(np.percentile(percent_error[net], 75))
wrong_down.append(np.percentile(percent_error[net], 25))
if experiment_tag == 'error_and_new':
plot_label = 'FDT'
plot_color = 'r'
plot_style = '-'
elif experiment_tag == 'eFDT':
plot_label = 'eFDT'
plot_color = 'r'
plot_style = '--'
elif experiment_tag == 'sp':
plot_label = 'SinPlu'
plot_color = 'orange'
plot_style = '-'
plt.plot(N_e, wrong, plot_style,
alpha=0.5,
color=plot_color,
label=plot_label)
plt.fill_between(N_e,
np.array(wrong)-np.array(wrong_std),
np.array(wrong)+
|
np.array(wrong_std)
|
numpy.array
|
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE_scikit-fuzzy.txt
"""
This block was based on scikit-fuzzy by <NAME> et.al., and they deserve
most of the credits for this.
The decision to split came from the fact that CoTeDe uses only few functions
from scikit-fuzzy, and scikit-fuzzy 0.1.9 stopped to work with pypi breaking
all tests and the development process of CoTeDe. I did contributed to
scikit-fuzzy, but it would be more work than I could afford at that time.
"""
import numpy as np
def smf(x, p):
"""
S-function fuzzy membership generator.
Parameters
----------
x : any sequence
Independent variable.
p: list of 2 values
p[0]: 'foot', where the function begins to climb from zero.
p[1]: 'ceiling', where the function levels off at 1.
Returns
-------
y : 1d array
S-function.
Notes
-----
Named such because of its S-like shape.
"""
assert len(p) == 2, "smf requires 2 parameters"
assert p[0] <= p[1], "p[0] must be <= p[1]."
x = np.asanyarray(x)
y = np.nan * np.ones_like(x)
y[np.nonzero(x <= p[0])] = 0
idx = np.logical_and(p[0] < x, x <= (p[0] + p[1]) / 2.0)
y[idx] = 2.0 * ((x[idx] - p[0]) / (p[1] - p[0])) ** 2.0
idx = np.logical_and((p[0] + p[1]) / 2.0 <= x, x < p[1])
y[idx] = 1 - 2.0 * ((x[idx] - p[1]) / (p[1] - p[0])) ** 2.0
y[np.nonzero(x >= p[1])] = 1
return y
def trapmf(x, p):
"""
Trapezoidal membership function generator.
Parameters
----------
x : any sequence
Independent variable.
p: list of 4 values
lower than p[0] and higher than p[3] it returns 0
between p[1] and p[2] it returns 1
linear transition from p[0] to p[1] and from p[2] to p[3]
Returns
-------
y : 1d array
Trapezoidal membership function.
"""
assert len(p) == 4, "trapmf requires 4 parameters."
assert (
p[0] <= p[1] and p[1] <= p[2] and p[2] <= p[3]
), "trapmf requires 4 parameters: p[0] <= p[1] <= p[2] <= p[3]."
x = np.asanyarray(x)
y = np.nan * np.ones_like(x)
idx =
|
np.nonzero(x <= p[1])
|
numpy.nonzero
|
"""This module contains the tools needed for satellite detection
within an ACS/WFC image as published in
`ACS ISR 2016-01 <http://www.stsci.edu/hst/acs/documents/isrs/isr1601.pdf>`_.
.. note::
Only tested for ACS/WFC FLT and FLC images, but it should
theoretically work for any instrument.
Requires skimage version 0.11.x or later.
:func:`skimage.transform.probabilistic_hough_line` gives
slightly different results from run to run, but this should
not matter since :func:`detsat` only provides crude
approximation of the actual trail(s).
Performance is faster when ``plot=False``, where applicable.
Examples
--------
>>> from acstools.satdet import detsat, make_mask, update_dq
Find trail segments for a single image and extension without multiprocessing,
and display plots (not shown) and verbose information:
>>> results, errors = detsat(
... 'jc8m10syq_flc.fits', chips=[4], n_processes=1,
... plot=True, verbose=True)
1 file(s) found...
Processing jc8m10syq_flc.fits[4]...
Rescale intensity percentiles: 110.161376953, 173.693756409
Length of PHT result: 42
min(x0)= 1, min(x1)= 269, min(y0)= 827, min(y1)= 780
max(x0)=3852, max(x1)=4094, max(y0)=1611, max(y1)=1545
buf=200
topx=3896, topy=1848
...
Trail Direction: Right to Left
42 trail segment(s) detected
...
End point list:
1. (1256, 1345), (2770, 1037)
2. ( 11, 1598), ( 269, 1545)
...
Total run time: 22.4326269627 s
>>> results[('jc8m10syq_flc.fits', 4)]
array([[[1242, 1348],
[2840, 1023]],
[[1272, 1341],
[2688, 1053]],
...
[[2697, 1055],
[2967, 1000]]])
>>> errors
{}
Find trail segments for multiple images and all ACS/WFC science extensions with
multiprocessing:
>>> results, errors = detsat(
... '*_flc.fits', chips=[1, 4], n_processes=12, verbose=True)
6 file(s) found...
Using 12 processes
Number of trail segment(s) found:
abell2744-hffpar_acs-wfc_f814w_13495_11_01_jc8n11q9q_flc.fits[1]: 0
abell2744-hffpar_acs-wfc_f814w_13495_11_01_jc8n11q9q_flc.fits[4]: 4
abell2744_acs-wfc_f814w_13495_51_04_jc8n51hoq_flc.fits[1]: 2
abell2744_acs-wfc_f814w_13495_51_04_jc8n51hoq_flc.fits[4]: 34
abell2744_acs-wfc_f814w_13495_93_02_jc8n93a7q_flc.fits[1]: 20
abell2744_acs-wfc_f814w_13495_93_02_jc8n93a7q_flc.fits[4]: 20
j8oc01sxq_flc.fits[1]: 0
j8oc01sxq_flc.fits[4]: 0
jc8m10syq_flc.fits[1]: 0
jc8m10syq_flc.fits[4]: 38
jc8m32j5q_flc.fits[1]: 42
jc8m32j5q_flc.fits[4]: 12
Total run time: 34.6021330357 s
>>> results[('jc8m10syq_flc.fits', 4)]
array([[[1242, 1348],
[2840, 1023]],
[[1272, 1341],
[2688, 1053]],
...
[[2697, 1055],
[2967, 1000]]])
>>> errors
{}
For a given image and extension, create a DQ mask for a satellite trail using
the first segment (other segments should give similar masks) based on the
results from above (plots not shown):
>>> trail_coords = results[('jc8m10syq_flc.fits', 4)]
>>> trail_segment = trail_coords[0]
>>> trail_segment
array([[1199, 1357],
[2841, 1023]])
>>> mask = make_mask('jc8m10syq_flc.fits', 4, trail_segment,
... plot=True, verbose=True)
Rotation: -11.4976988695
Hit image edge at counter=26
Hit rotate edge at counter=38
Run time: 19.476323843 s
Update the corresponding DQ array using the mask from above:
>>> update_dq('jc8m10syq_flc.fits', 6, mask, verbose=True)
DQ flag value is 16384
Input... flagged NPIX=156362
Existing flagged NPIX=0
Newly... flagged NPIX=156362
jc8m10syq_flc.fits[6] updated
"""
#
# History:
# Dec 12, 2014 - DMB - Created for COSC 602 "Image Processing and Pattern
# Recocnition" at Towson University. Mostly detection algorithm
# development and VERY crude mask.
# Feb 01, 2015 - DMB - Masking algorithm refined to be useable by HFF.
# Mar 28, 2015 - DMB - Small bug fixes and tweaking to try not to include
# diffraction spikes.
# Nov 03, 2015 - PLL - Adapted for acstools distribution. Fixed bugs,
# possibly improved performance, changed API.
# Dec 07, 2015 - PLL - Minor changes based on feedback from DMB.
# May 24, 2016 - SMO - Minor import changes to skimage
#
# STDLIB
import glob
import multiprocessing
import time
import warnings
from functools import partial
from multiprocessing import Process, Queue
# THIRD PARTY
import numpy as np
from astropy.io import fits
# from astropy.stats import biweight_location
from astropy.stats import sigma_clipped_stats
from astropy.utils.exceptions import AstropyUserWarning
from astropy.utils.introspection import minversion
# See https://github.com/astropy/astropy/issues/6364
from astropy.stats import biweight_scale
biweight_midvariance = partial(biweight_scale, modify_sample_size=False)
# OPTIONAL
try:
import skimage
from scipy import stats
from skimage import transform
from skimage import morphology as morph
from skimage import exposure
from skimage.feature import canny
except ImportError:
HAS_OPDEP = False
else:
if minversion(skimage, '0.11'):
HAS_OPDEP = True
else:
HAS_OPDEP = False
try:
import matplotlib.pyplot as plt
except ImportError:
plt = None
warnings.warn('matplotlib not found, plotting is disabled',
AstropyUserWarning)
__version__ = '0.3.3'
__vdate__ = '11-Jul-2017'
__author__ = '<NAME>, <NAME>'
__all__ = ['detsat', 'make_mask', 'update_dq']
def _detsat_one(filename, ext, sigma=2.0, low_thresh=0.1, h_thresh=0.5,
small_edge=60, line_len=200, line_gap=75,
percentile=(4.5, 93.0), buf=200, plot=False, verbose=False):
"""Called by :func:`detsat`."""
if verbose:
t_beg = time.time()
fname = f'{filename}[{ext}]'
# check extension
if ext not in (1, 4, 'SCI', ('SCI', 1), ('SCI', 2)):
warnings.warn(f'{ext} is not a valid science extension for ACS/WFC',
AstropyUserWarning)
# get the data
image = fits.getdata(filename, ext)
# image = im.astype('float64')
# rescale the image
p1, p2 = np.percentile(image, percentile)
# there should always be some counts in the image, anything lower should
# be set to one. Makes things nicer for finding edges.
if p1 < 0:
p1 = 0.0
if verbose:
print(f'Rescale intensity percentiles: {p1}, {p2}')
image = exposure.rescale_intensity(image, in_range=(p1, p2))
# get the edges
immax = np.max(image)
edge = canny(image, sigma=sigma,
low_threshold=immax * low_thresh,
high_threshold=immax * h_thresh)
# clean up the small objects, will make less noise
morph.remove_small_objects(edge, min_size=small_edge, connectivity=8,
in_place=True)
# create an array of angles from 0 to 180, exactly 0 will get bad columns
# but it is unlikely that a satellite will be exactly at 0 degrees, so
# don't bother checking.
# then, convert to radians.
angle = np.radians(
|
np.arange(2, 178, 0.5, dtype=float)
|
numpy.arange
|
"""
CanSat Constellation Centralized Controller
controller.py
Taken from
> <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
> "Safe Reinforcement Learning Benchmark Environments for Aerospace Control Systems,"
> IEEE Aerospace, Big Sky, MT, March 2022.
"""
import numpy as np
from scipy.spatial.qhull import Delaunay
def graph_from_simplices(tri: Delaunay) -> dict:
"""
transform simplices to graph represented as
{
vertex_id : set({verts})
}
"""
graph = {}
for simplex in tri.simplices:
for va, vb in list(zip(simplex[:-1],
simplex[1:])) + [(simplex[0], simplex[-1])]:
if va in graph:
graph[va].add(vb)
else:
graph[va] = set({vb})
if vb in graph:
graph[vb].add(va)
else:
graph[vb] = set({va})
return graph
def model_output(model, time_t, state_ctrl, input_forces):
"""
calculate forces to be supplied by the satellites to rejoin
"""
forces = []
points = [np.array([0.0, 0.0])
] + [np.array(input_forces[i * 4:(i + 1) * 4][:2]) for i in range(0, 4)]
vels = [np.array([0.0, 0.0])
] + [np.array(input_forces[i * 4:(i + 1) * 4][2:]) for i in range(0, 4)]
tri = Delaunay(points)
graph = graph_from_simplices(tri)
for sidx in range(len(points)):
connections = graph[sidx]
f = np.array([0.0, 0.0])
for sother in connections:
# get unit distance vector and norm
dist = points[sother] - points[sidx]
r = np.linalg.norm(dist)
dist /= r
vel = (vels[sother] - vels[sidx])
velp = np.dot(vel, dist) * dist
velr = np.linalg.norm(vel)
if not
|
np.isnan(r)
|
numpy.isnan
|
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from builtins import zip
from builtins import range
import hashlib
import os
import numpy
from ektelo.data import Histogram
from ektelo import util
from ektelo.mixins import CartesianInstantiable
from ektelo.mixins import Marshallable
import scipy.stats as sci
from functools import reduce
class Dataset(Marshallable):
def __init__(self, hist, reduce_to_domain_shape=None, dist=None):
"""
Any instances with equal key() values should have equal hash() values
domain_shape will be result of regular grid partition
"""
if isinstance(reduce_to_domain_shape, int): # allow for integers in 1D, instead of shape tuples
reduce_to_domain_shape = (reduce_to_domain_shape, )
if dist is not None:
self._dist_str = numpy.array_str(numpy.array(dist))
else:
self._dist_str = ''
self._hist = hist
self._reduce_to_domain_shape = reduce_to_domain_shape if hist.shape != reduce_to_domain_shape else None
self._dist = dist
self._payload = None
self._compiled = False
def reduce_data(self,p_grid,data):
"""reduce data to the domain indicated by p_grid"""
assert data.shape == p_grid.shape, 'Shape of x and shape of partition vector must match.'
#get out_shape
if p_grid.ndim == 1:
out_shape =(len(numpy.unique(p_grid)), )
else:
out = []
for i in range(p_grid.ndim):
ind_slice = [0] * p_grid.ndim
ind_slice[i] = slice(0, p_grid.shape[i])
out.append(len(numpy.unique(p_grid[ind_slice])))
# This is doesn't work for dimension >2, the compressed array is not a strip of the domain,
# but the first element along the axis. It could be an nd array itself.
#out = [len(numpy.unique(numpy.compress([True], p_grid, axis=i))) for i in range(p_grid.ndim)]
#out.reverse() # rows/cols need to be reversed here
out_shape = tuple(out)
#reduce
unique, indices, inverse, counts = numpy.unique(p_grid, return_index=True, return_inverse=True, return_counts=True)
res = numpy.zeros_like(unique, dtype=float)
for index, c in numpy.ndenumerate(data.ravel()): # needs to be flattened for parallel indexing with output of unique
res[ inverse[index] ] += c
return numpy.array(res).reshape(out_shape)
def compile(self):
if self._compiled:
return self
self._payload = self._hist
if self._reduce_to_domain_shape: # reduce cells to get desired shape
q = [divmod(dom,grid) for (dom,grid) in zip(self._payload.shape, self._reduce_to_domain_shape)]
for red, rem in q:
assert rem == 0, 'Domain must be reducible to target domain by uniform grid: %i, %i' % (red,rem)
grid_shape = [r[0] for r in q]
p_grid = partition_grid(self._payload.shape, grid_shape)
self._payload = self.reduce_data(p_grid,self._payload) # update payload
if self._dist is not None:
self._dist = self.reduce_data(p_grid,self._dist) # update dist
self._compiled = True
self._payload = self._payload.astype("int") # partition engines need payload to be of type int
return self
@property
def payload(self):
return self.compile()._payload
@property
def dist(self):
return self.compile()._dist
@property
def scale(self):
return self.payload.sum()
@property
def domain_shape(self):
return self.payload.shape
@property
def fractionZeros(self):
zero_count = (self.payload == 0).sum()
return util.old_div(float(zero_count), self.payload.size)
@property
def maxCount(self):
return self.payload.max()
@property
def key(self):
""" Using leading 8 characters of hash as key for now """
return self.hash[:8]
@property
def hash(self):
m = hashlib.sha1()
m.update(util.prepare_for_hash(self.__class__.__name__))
m.update(util.prepare_for_hash(numpy.array(self._hist)))
m.update(util.prepare_for_hash(numpy.array(self._reduce_to_domain_shape)))
m.update(util.prepare_for_hash(self._dist_str))
return m.hexdigest()
# represent object state as dict (for storage in datastore)
def asDict(self):
d = util.class_to_dict(self, ignore_list=['_dist', 'dist', '_payload', 'payload', '_compiled',
'_dist_str', '_hist', '_reduce_to_domain_shape'])
## add decomposed shape attrs ??
return d
def analysis_payload(self):
return {'payload': self.payload}
class DatasetFromRelation(Dataset, CartesianInstantiable):
def __init__(self,
relation,
nickname,
normed=False,
weights=None,
reduce_to_dom_shape=None):
self.init_params = util.init_params_from_locals(locals())
self.fname = nickname
histogram = Histogram(relation.df, relation.bins, relation.domains, normed, weights).generate()
self.statistics = relation.statistics()
self.statistics.update(histogram.statistics())
if reduce_to_dom_shape != None:
self.statistics['domain_valid'] = False
else:
self.statistics['domain_valid'] = True
super(DatasetFromRelation,self).__init__(histogram.hist.copy(order='C'), reduce_to_dom_shape)
@staticmethod
def instantiate(params):
relation = data.Relation(params['config']).load_csv(params['csv_filename'], params['csv_delimiter'])
return DatasetFromRelation(relation, params['nickname'], params['normed'], params['weights'], params['domain'])
def asDict(self):
d = util.class_to_dict(self, ignore_list=['_dist', '_payload', 'payload', '_compiled',
'_dist_str', '_hist', '_reduce_to_domain_shape',
'statistics'])
return d
class DatasetFromFile(Dataset, CartesianInstantiable):
def __init__(self, fileName, reduce_to_dom_shape=None):
self.init_params = util.init_params_from_locals(locals())
self.fname = fileName
#assert nickname in filenameDict, 'Filename parameter not recognized: %s' % nickname
hist = load(self.fname)
super(DatasetFromFile,self).__init__(hist, reduce_to_dom_shape, None)
@staticmethod
def instantiate(params):
return DatasetFromFile(params['nickname'], params['domain'])
class DatasetSampled(Dataset):
def __init__(self, dist, sample_to_scale, reduce_to_dom_shape=None, seed=None):
self.seed = seed
prng = numpy.random.RandomState(self.seed)
hist = subSample(dist, sample_to_scale, prng)
super(DatasetSampled,self).__init__(hist, reduce_to_dom_shape, dist)
class DatasetUniformityVaryingSynthetic(Dataset, CartesianInstantiable):
def __init__(self, uniformity, dom_shape, scale, seed=None):
'''
Generate synthetic data of varying uniformity
uniformity: parameter in [0,1] where 1 produces perfectly uniform data, 0 is maximally non-uniform
All cells set to zero except fraction equal to 'uniformity' value.
All non-zero cells are set to same value, then shuffled randomly.
'''
self.init_params = util.init_params_from_locals(locals())
self.u = uniformity
assert 0 <= uniformity and uniformity <= 1
n = numpy.prod(dom_shape) # total domain size
hist = numpy.zeros(n)
num_nonzero = max(1, int(uniformity * n))
hist_value = util.old_div(scale, num_nonzero)
hist[0:num_nonzero] = hist_value
prng = numpy.random.RandomState(seed)
prng.shuffle(hist)
super(DatasetUniformityVaryingSynthetic, self).__init__(hist.reshape(dom_shape), reduce_to_domain_shape=None, dist=None)
@staticmethod
def instantiate(params):
return DatasetUniformityVaryingSynthetic(params['uniformity'], params['dom_shape'], params['scale'], params['seed'])
tryPaths = [os.path.join(os.environ['EKTELO_HOME'], 'ektelo')]
def load(filename):
"""
Load from file and return original counts (should be integral)
"""
path = [p for p in tryPaths if os.path.exists(p)]
assert path, 'data path not found.'
fullpath = os.path.join( path[0], 'datafiles', filename ) # fixed for now, so we don't have to include as a parameter in automated key extraction stuff...
_, file_extension = os.path.splitext(fullpath)
if file_extension == '.txt':
x = []
with open(fullpath, 'r') as fp:
for ln in fp.readlines():
x.append(int(ln))
return numpy.array(x, dtype='int32')
elif file_extension == '.npy':
return numpy.load(fullpath)
else:
raise Exception('Unrecognized file extension')
def subSample(dist, sampleSize, prng):
''' Generate a subsample of given sampleSize from an input distribution '''
samples = prng.choice(a=dist.size, replace=True, size=int(sampleSize), p=dist.flatten())
hist = numpy.histogram(samples, bins=dist.size, range=(0,dist.size))[0].astype('int32') # return only counts (not bins)
return hist.reshape( dist.shape )
# partition vector generation for data reduction
def cantor_pairing(a, b):
"""
A function returning a unique positive integer for every pair (a,b) of positive integers
"""
return util.old_div((a+b)*(a+b+1), 2) + b
def general_pairing(tup):
""" Generalize cantor pairing to k dimensions """
if len(tup) == 0:
return tup[0] # no need for pairing in 1D case
else:
return reduce(cantor_pairing, tup)
def canonicalTransform(vector):
""" transform a partition vector according to the canonical order.
if bins are noncontiguous, use position of first occurrence.
e.g. [3,4,1,1] => [1,2,3,3]; [3,4,1,1,0,1]=>[0,1,2,2,3,2]
"""
unique, indices, inverse =
|
numpy.unique(vector, return_index=True, return_inverse=True)
|
numpy.unique
|
#_____import packages_____
from galpy.potential import KuzminKutuzovStaeckelPotential
from galpy.potential import evaluatePotentials, evaluateRforces, evaluateDensities
from galpy.actionAngle import estimateDeltaStaeckel
from galpy.actionAngle import actionAngleStaeckel
from galpy.orbit import Orbit
from galpy.util import bovy_coords
import numpy
import scipy
import math
#Test whether circular velocity calculation works
def test_vcirc():
#test the circular velocity of the KuzminKutuzovStaeckelPotential
#using parameters from Batsleer & Dejonghe 1994, fig. 1-3
#and their formula eq. (10)
#_____model parameters______
#surface ratios of disk and halo:
ac_Ds = [50. ,50. ,50. ,50. ,50. ,50. ,40. ,40. ,40. ]
ac_Hs = [1.005,1.005,1.005,1.01,1.01,1.01 ,1.02,1.02,1.02 ]
#disk contribution to total mass:
ks = [0.05 ,0.06 ,0.07 ,0.07,0.1 ,0.125,0.1 ,0.12,0.125]
#focal distance:
Delta = 1.
for ii in range(9):
ac_D = ac_Ds[ii]
ac_H = ac_Hs[ii]
k = ks[ii]
#_____setup potential_____
#first try, not normalized:
V_D = KuzminKutuzovStaeckelPotential(amp= k ,ac=ac_D,Delta=Delta,normalize=False)
V_H = KuzminKutuzovStaeckelPotential(amp=(1.-k),ac=ac_H,Delta=Delta,normalize=False)
pot = [V_D,V_H]
#normalization according to Batsleer & Dejonghe 1994:
V00 = evaluatePotentials(pot,0.,0.)
#second try, normalized:
V_D = KuzminKutuzovStaeckelPotential(amp= k / (-V00),ac=ac_D,Delta=Delta,normalize=False)
V_H = KuzminKutuzovStaeckelPotential(amp=(1.-k) / (-V00),ac=ac_H,Delta=Delta,normalize=False)
pot = [V_D,V_H]
#_____calculate rotation curve_____
Rs = numpy.linspace(0.,20.,100)
z = 0.
vcirc_calc = numpy.sqrt(-Rs * evaluateRforces(pot,Rs,z))
#_____vcirc by Batsleer & Dejonghe eq. (10) (with proper Jacobian)_____
def vc2w(R):
g_D = Delta**2 / (1.-ac_D**2)
a_D = g_D - Delta**2
g_H = Delta**2 / (1.-ac_H**2)
a_H = g_H - Delta**2
l = R**2 - a_D
q = a_H - a_D
termD = numpy.sqrt(l )*(numpy.sqrt(l ) + numpy.sqrt(-g_D ))**2
termH = numpy.sqrt(l-q)*(numpy.sqrt(l-q) + numpy.sqrt(-g_D-q))**2
return R**2 * (k / termD + (1.-k) / termH)
vcirc_formula = numpy.sqrt(vc2w(Rs)/(-V00))
assert numpy.all(numpy.fabs(vcirc_calc - vcirc_formula) < 10**-8.), \
'Calculated circular velocity for KuzminKutuzovStaeckelPotential '+ \
'does not agree with eq. (10) (corrected by proper Jacobian) '+ \
'by Batsleer & Dejonghe (1994)'
return None
#-----------------------------------------------------------------------------
#test whether the density calculation works
def test_density():
#test the density calculation of the KuzminKutuzovStaeckelPotential
#using parameters from Batsleer & Dejonghe 1994, tab. 2
#_____parameters_____
#table 2 in Batsleer & Dejonghe
ac_D = [25. ,25. ,25. ,25. ,25. ,25. ,40. ,40. ,40. ,40. ,40. ,50. ,50. ,50. ,50. ,50. ,50. ,75. ,75. ,75. ,75. ,75. ,75. ,100. ,100. ,100.,100. ,100.,100. ,150. ,150. ,150. ,150. ,150.,150.]
ac_H = [1.005,1.005,1.01 ,1.01,1.02 ,1.02,1.005,1.005,1.01 ,1.01,1.02,1.005,1.005,1.01,1.01 ,1.02,1.02 ,1.005,1.005,1.01,1.01 ,1.02,1.02 ,1.005,1.005,1.01,1.01 ,1.02,1.02 ,1.005,1.005,1.01 ,1.01 ,1.02,1.02]
k = [0.05 ,0.08 ,0.075,0.11,0.105,0.11,0.05 ,0.08 ,0.075,0.11,0.11,0.05 ,0.07 ,0.07,0.125,0.1 ,0.125,0.05 ,0.065 ,0.07,0.125,0.10,0.125,0.05,0.065,0.07,0.125,0.10,0.125,0.05 ,0.065,0.075,0.125,0.11,0.125]
Delta = [0.99 ,1.01 ,0.96 ,0.99,0.86 ,0.88,1.00 ,1.01 ,0.96 ,0.99,0.89,1.05 ,1.06 ,1.00,1.05 ,0.91,0.97 ,0.98 ,0.99 ,0.94,0.98 ,0.85,0.91 ,1.06 ,1.07 ,1.01,1.06 ,0.94,0.97 ,1.06 ,1.07 ,0.98 ,1.06 ,0.94,0.97]
Mmin = [7.49 ,6.17 ,4.08 ,3.70,2.34 ,2.36,7.57 ,6.16 ,4.08 ,2.64,2.38,8.05 ,6.94 ,4.37,3.70 ,2.48,2.50 ,7.37 ,6.66 ,4.05,3.46 ,2.33,2.36 ,8.14 ,7.27 ,4.42,3.72 ,2.56,2.50 ,8.14 ,7.26 ,4.17 ,3.72 ,2.51,2.50]
Mmax = [7.18 ,6.12 ,3.99 ,3.69,2.37 ,2.40,7.27 ,6.11 ,3.99 ,2.66,2.42,7.76 ,6.85 ,4.26,3.72 ,2.51,2.54 ,7.07 ,6.51 ,3.95,3.48 ,2.36,2.40 ,7.85 ,7.15 ,4.30,3.75 ,2.58,2.54 ,7.85 ,7.07 ,4.08 ,3.75 ,2.53,2.53]
rhomin = [0.04 ,0.05 ,0.04 ,0.04,0.03 ,0.03,0.06 ,0.06 ,0.05 ,0.04,0.04,0.07 ,0.08 ,0.06,0.07 ,0.04,0.05 ,0.08 ,0.09 ,0.07,0.09 ,0.05,0.06 ,0.12 ,0.13 ,0.09,0.13 ,0.07,0.09 ,0.16 ,0.19 ,0.12 ,0.18 ,0.10,0.12]
rhomax = [0.03 ,0.03 ,0.02 ,0.03,0.02 ,0.02,0.04 ,0.04 ,0.03 ,0.03,0.02,0.05 ,0.05 ,0.04,0.05 ,0.03,0.03 ,0.05 ,0.06 ,0.04,0.06 ,0.03,0.04 ,0.07 ,0.08 ,0.06,0.08 ,0.04,0.05 ,0.09 ,0.10 ,0.07 ,0.10 ,0.06,0.07]
Sigmin = [58 ,52 ,52 ,49 ,39 ,40 ,58 ,55 ,51 ,44 ,40 ,59 ,54 ,53 ,49 ,41 ,42 ,58 ,55 ,51 ,48 ,39 ,40 ,59 ,55 ,53 ,49 ,42 ,42 ,59 ,55 ,52 ,49 ,42 ,42]
Sigmax = [45 ,41 ,38 ,37 ,28 ,28 ,45 ,32 ,37 ,32 ,30 ,46 ,43 ,40 ,37 ,30 ,31 ,45 ,43 ,38 ,36 ,28 ,29 ,46 ,43 ,40 ,38 ,31 ,31 ,46 ,44 ,39 ,38 ,30 ,31]
for ii in range(len(ac_D)):
if ac_D[ii] == 40.:
continue
#because I believe that there are typos in tab. 2 by Batsleer & Dejonghe...
for jj in range(2):
#_____parameters depending on solar position____
if jj == 0:
Rsun = 7.5
zsun = 0.004
GM = Mmin[ii] #units: G = 1, M in 10^11 solar masses
rho = rhomin[ii]
Sig = Sigmin[ii]
elif jj == 1:
Rsun = 8.5
zsun = 0.02
GM = Mmax[ii] #units: G = 1, M in 10^11 solar masses
rho = rhomax[ii]
Sig = Sigmax[ii]
outstr = 'ac_D='+str(ac_D[ii])+', ac_H='+str(ac_H[ii])+', k='+str(k[ii])+', Delta='+str(Delta[ii])+\
', Mtot='+str(GM)+'*10^11Msun, Rsun='+str(Rsun)+'kpc, rho(Rsun,zsun)='+str(rho)+'Msun/pc^3, Sig(Rsun,z<1.1kpc)='+str(Sig)+'Msun/pc^2'
#_____setup potential_____
amp_D = GM * k[ii]
V_D = KuzminKutuzovStaeckelPotential(amp=amp_D,ac=ac_D[ii],Delta=Delta[ii],normalize=False)
amp_H = GM * (1.-k[ii])
V_H = KuzminKutuzovStaeckelPotential(amp=amp_H,ac=ac_H[ii],Delta=Delta[ii],normalize=False)
pot = [V_D,V_H]
#_____local density_____
rho_calc = evaluateDensities(pot,Rsun,zsun) * 100. #units: [solar mass / pc^3]
rho_calc = round(rho_calc,2)
#an error of 0.01 corresponds to the significant digit
#given in the table, to which the density was rounded,
#to be wrong by one.
assert numpy.fabs(rho_calc - rho) <= 0.01+10.**-8, \
'Calculated density %f for KuzminKutuzovStaeckelPotential ' % rho_calc + \
'with model parameters:\n'+outstr+'\n'+ \
'does not agree with value from tab. 2 '+ \
'by Batsleer & Dejonghe (1994)'
#_____surface density_____
Sig_calc, err = scipy.integrate.quad(lambda z: (evaluateDensities(pot,Rsun,z/1000.) * 100.), #units: [solar mass / pc^3]
0., 1100.) #units: pc
Sig_calc = round(2. * Sig_calc)
#an error of 1 corresponds to the significant digit
#given in the table, to which the surface density was rounded,
#to be wrong by one.
assert numpy.fabs(Sig_calc - Sig) <= 1., \
'Calculated surface density %f for KuzminKutuzovStaeckelPotential ' % Sig_calc + \
'with model parameters:\n'+outstr+'\n'+ \
'does not agree with value from tab. 2 '+ \
'by Batsleer & Dejonghe (1994)'
return None
#-----------------------------------------------------------------------------
#test wheter the orbit integration in C and Python are the same
def test_orbitIntegrationC():
#_____initialize some KKSPot_____
Delta = 1.0
pot = KuzminKutuzovStaeckelPotential(ac=20.,Delta=Delta,normalize=True)
#_____initialize an orbit (twice)_____
vxvv = [1.,0.1,1.1,0.,0.1]
o_P= Orbit(vxvv=vxvv)
o_C= Orbit(vxvv=vxvv)
#_____integrate the orbit with python and C_____
ts= numpy.linspace(0,100,101)
o_P.integrate(ts,pot,method='leapfrog') #python
o_C.integrate(ts,pot,method='leapfrog_c')#C
for ii in range(5):
exp3= -1.7
if ii == 0: Python, CC, string, exp1, exp2 = o_P.R(ts) , o_C.R(ts) , 'R' , -5., -10.
elif ii == 1: Python, CC, string, exp1, exp2 = o_P.z(ts) , o_C.z(ts) , 'z' , -3.25, -4.
elif ii == 2: Python, CC, string, exp1, exp2 = o_P.vR(ts), o_C.vR(ts), 'vR', -3., -10.
elif ii == 3: Python, CC, string, exp1, exp2, exp3 = o_P.vz(ts), o_C.vz(ts), 'vz', -3., -4., -1.3
elif ii == 4: Python, CC, string, exp1, exp2 = o_P.vT(ts), o_C.vT(ts), 'vT', -5., -10.
rel_diff = numpy.fabs((Python-CC)/CC) < 10.**exp1
abs_diff = (numpy.fabs(Python-CC) < 10.**exp2) * (numpy.fabs(Python) < 10.**exp3)
assert numpy.all(rel_diff+abs_diff), \
'Orbit integration for '+string+' coordinate different in ' + \
'C and Python implementation.'
return None
#-----------------------------------------------------------------------------
#test whether this is really a Staeckel potential and the Delta is constant
def test_estimateDelta():
#_____initialize some KKSPot_____
Delta = 1.0
pot = KuzminKutuzovStaeckelPotential(ac=20.,Delta=Delta,normalize=True)
#_____initialize an orbit (twice)_____
vxvv = [1.,0.1,1.1,0.01,0.1]
o= Orbit(vxvv=vxvv)
#_____integrate the orbit with C_____
ts= numpy.linspace(0,101,100)
o.integrate(ts,pot,method='leapfrog_c')
#____estimate Focal length Delta_____
#for each time step individually:
deltas_estimate = numpy.zeros(len(ts))
for ii in range(len(ts)):
deltas_estimate[ii] = estimateDeltaStaeckel(pot,o.R(ts[ii]),o.z(ts[ii]))
assert numpy.all(numpy.fabs(deltas_estimate - Delta) < 10.**-8), \
'Focal length Delta estimated along the orbit is not constant.'
#for all time steps together:
delta_estimate = estimateDeltaStaeckel(pot,o.R(ts),o.z(ts))
assert numpy.fabs(delta_estimate - Delta) < 10.**-8, \
'Focal length Delta estimated from the orbit is not the same as the input focal length.'
return None
#-----------------------------------------------------------------------------
#test whether this is really a Staeckel potential and the Actions are conserved along the orbit
def test_actionConservation():
#_____initialize some KKSPot_____
Delta = 1.0
pot = KuzminKutuzovStaeckelPotential(ac=20.,Delta=Delta,normalize=True)
#_____initialize an orbit (twice)_____
vxvv = [1.,0.1,1.1,0.01,0.1]
o= Orbit(vxvv=vxvv)
#_____integrate the orbit with C_____
ts= numpy.linspace(0,101,100)
o.integrate(ts,pot,method='leapfrog_c')
#_____Setup ActionAngle object and calculate actions (Staeckel approximation)_____
aAS = actionAngleStaeckel(pot=pot,delta=Delta,c=True)
jrs,lzs,jzs = aAS(o.R(ts),o.vR(ts),o.vT(ts),o.z(ts),o.vz(ts))
assert numpy.all(numpy.fabs(jrs - jrs[0]) < 10.**-8.), \
'Radial action is not conserved along orbit.'
assert numpy.all(numpy.fabs(lzs - lzs[0]) < 10.**-8.), \
'Angular momentum is not conserved along orbit.'
assert numpy.all(numpy.fabs(jzs - jzs[0]) < 10.**-8.), \
'Vertical action is not conserved along orbit.'
return None
#-----------------------------------------------------------------------------
#test coordinate transformation
def test_lambdanu_to_Rz():
#coordinate system:
a = 3.
g = 4.
Delta = numpy.sqrt(g-a)
ac = numpy.sqrt(a/g)
#_____test float input (z=0)_____
#coordinate transformation:
l, n = 2., -4.
R,z= bovy_coords.lambdanu_to_Rz(l,n,ac=ac,Delta=Delta)
#true values:
R_true = numpy.sqrt((l+a)*(n+a)/(a-g))
z_true = numpy.sqrt((l+g)*(n+g)/(g-a))
#test:
assert numpy.fabs(R-R_true) < 10.**-10., 'lambdanu_to_Rz conversion did not work as expected (R)'
assert numpy.fabs(z-z_true) < 10.**-10., 'lambdanu_to_Rz conversion did not work as expected (z)'
#_____Also test for arrays_____
#coordinate transformation:
l = numpy.array([2. ,10.,20. ,0.])
n = numpy.array([-4.,-3.,-3.5,-3.5])
R,z= bovy_coords.lambdanu_to_Rz(l,n,ac=ac,Delta=Delta)
#true values:
R_true = numpy.sqrt((l+a)*(n+a)/(a-g))
z_true = numpy.sqrt((l+g)*(n+g)/(g-a))
#test:
rel_diff = numpy.fabs((R-R_true)/R_true) < 10.**-8.
abs_diff = (numpy.fabs(R-R_true) < 10.**-6.) * (numpy.fabs(R_true) < 10.**-6.)
assert numpy.all(rel_diff+abs_diff), 'lambdanu_to_Rz conversion did not work as expected (R array)'
rel_diff = numpy.fabs((z-z_true)/z_true) < 10.**-8.
abs_diff = (numpy.fabs(z-z_true) < 10.**-6.) * (numpy.fabs(z_true) < 10.**-6.)
assert numpy.all(rel_diff+abs_diff), 'lambdanu_to_Rz conversion did not work as expected (z array)'
return None
def test_Rz_to_lambdanu():
#coordinate system:
a = 3.
g = 7.
Delta = numpy.sqrt(g-a)
ac =
|
numpy.sqrt(a/g)
|
numpy.sqrt
|
#!/usr/bin/env python
# Copyright (c) 2011-2021, wradlib developers.
# Distributed under the MIT License. See LICENSE.txt for more info.
"""
Xarray based Data I/O
^^^^^^^^^^^^^^^^^^^^^
Reads data from netcdf-based CfRadial1, CfRadial2 and hdf5-based ODIM_H5 and
other hdf5-flavours (GAMIC). More radar backends (sigmet, rainbow) will be
implemented too.
Writes data to CfRadial2, ODIM_H5 or plain netCDF files.
This reader implementation uses
* `xarray <https://xarray.pydata.org/>`_,
* `netcdf4 <https://unidata.github.io/netcdf4-python/>`_,
* `h5py <https://www.h5py.org/>`_ and
* `h5netcdf <https://github.com/h5netcdf/h5netcdf>`_.
Currently there are three different approaches.
The recommended approach makes use of the newly implemented ``xarray.backends.BackendEntrypoint``.
For every radar source (CfRadial1, CfRadial2, GAMIC, ODIM) a specific backend is
implemented in wradlib which returns an specific `sweep` as ``xarray.Dataset``.
Convenience functions (eg. ``wradlib.io.open_radar_dataset``) are available to read
volume data into shallow ``wradlib.io.RadarVolume``-wrapper.
The following two approaches are not recommended and will be removed from the codebase
in wradlib v 2.0.0.
In the first approach the data is claimed using netcdf4-Dataset in a diskless
non-persistent mode as follows::
vol = io.xarray.OdimH5(ncfile)
# or
vol = io.xarray.CfRadial(ncfile)
The vol data structure holds one or many ['sweep_X'] xarray datasets, containing the
sweep data. The root group xarray dataset which corresponds to the
CfRadial2 root-group is available via the `.root`-object.
The writer implementation uses xarray for CfRadial2 output and relies on h5py
for the ODIM_H5 output.
The second approach reads ODIM files (metadata) into a *simple* accessible
structure::
vol = wradlib.io.open_odim(paths, loader='netcdf4', **kwargs)
All datafiles are accessed via the given loader ('netcdf4', 'h5py',
'h5netcdf'). Only absolutely neccessary data is actually read in this process,
eg. acquisition time and elevation, to fill the structure accordingly. All
subsequent metadata retrievals are cached to further improve performance.
Actual data access is realised via xarray using engine 'netcdf4' or 'h5netcdf',
depending on the loader.
Since for data handling xarray is utilized all xarray features can be
exploited, like lazy-loading, pandas-like indexing on N-dimensional data and
vectorized mathematical operations across multiple dimensions.
Examples
--------
See :ref:`/notebooks/fileio/wradlib_odim_multi_file_dataset.ipynb`.
Warning
-------
This implementation is considered experimental. Changes in the API should
be expected.
.. autosummary::
:nosignatures:
:toctree: generated/
{}
"""
__all__ = [
"WradlibVariable",
"RadarVolume",
"open_radar_dataset",
"open_radar_mfdataset",
"XRadVol",
"CfRadial",
"OdimH5",
"to_cfradial2",
"to_odim",
"to_netcdf",
"open_odim",
"XRadSweep",
"XRadMoment",
"XRadTimeSeries",
"XRadVolume",
"create_xarray_dataarray",
]
__doc__ = __doc__.format("\n ".join(__all__))
import collections
import datetime as dt
import glob
import io
import os
import re
import warnings
from distutils.version import LooseVersion
import dateutil
import deprecation
import h5netcdf
import h5py
import netCDF4 as nc
import numpy as np
import xarray as xr
from xarray.backends.api import combine_by_coords
from xarray.core.variable import Variable
from wradlib import version
from wradlib.georef import xarray
try:
from tqdm import tqdm
except ImportError:
def tqdm(val, **kwargs):
print(
"wradlib: Please wait for completion of time consuming task! \n"
"wradlib: Please install 'tqdm' for showing a progress bar "
"instead."
)
return val
def raise_on_missing_xarray_backend():
"""Raise errors if functionality isn't available."""
if LooseVersion(xr.__version__) < LooseVersion("0.17.0"):
raise ImportError(
f"'xarray>=0.17.0' needed to perform this operation. "
f"'xarray={xr.__version__}' available.",
)
elif LooseVersion(xr.__version__) < LooseVersion("0.18.2"):
xarray_backend_api = os.environ.get("XARRAY_BACKEND_API", None)
if xarray_backend_api is None:
os.environ["XARRAY_BACKEND_API"] = "v2"
else:
if xarray_backend_api != "v2":
raise ValueError(
"Environment variable `XARRAY_BACKEND_API='v2'` needed to perform "
"this operation. "
)
else:
pass
class WradlibVariable(object):
"""Minimal variable wrapper."""
def __init__(self, dims, data, attrs):
self._dimensions = dims
self._data = data
self._attrs = attrs
@property
def dimensions(self):
return self._dimensions
@property
def data(self):
return self._data
@property
def attributes(self):
return self._attrs
@deprecation.deprecated(
deprecated_in="1.5",
removed_in="2.0",
current_version=version.version,
details="Use `wradlib.georef.create_xarray_dataarray` " "instead.",
)
def create_xarray_dataarray(*args, **kwargs):
return xarray.create_xarray_dataarray(*args, **kwargs)
moment_attrs = {"standard_name", "long_name", "units"}
# CfRadial 2.0 - ODIM_H5 mapping
moments_mapping = {
"DBZH": {
"standard_name": "radar_equivalent_reflectivity_factor_h",
"long_name": "Equivalent reflectivity factor H",
"short_name": "DBZH",
"units": "dBZ",
"gamic": ["zh"],
},
"DBZH_CLEAN": {
"standard_name": "radar_equivalent_reflectivity_factor_h",
"long_name": "Equivalent reflectivity factor H",
"short_name": "DBZH_CLEAN",
"units": "dBZ",
"gamic": None,
},
"DBZV": {
"standard_name": "radar_equivalent_reflectivity_factor_v",
"long_name": "Equivalent reflectivity factor V",
"short_name": "DBZV",
"units": "dBZ",
"gamic": ["zv"],
},
"ZH": {
"standard_name": "radar_linear_equivalent_reflectivity_factor_h",
"long_name": "Linear equivalent reflectivity factor H",
"short_name": "ZH",
"units": "unitless",
"gamic": None,
},
"ZV": {
"standard_name": "radar_equivalent_reflectivity_factor_v",
"long_name": "Linear equivalent reflectivity factor V",
"short_name": "ZV",
"units": "unitless",
"gamic": None,
},
"DBZ": {
"standard_name": "radar_equivalent_reflectivity_factor",
"long_name": "Equivalent reflectivity factor",
"short_name": "DBZ",
"units": "dBZ",
"gamic": None,
},
"DBTH": {
"standard_name": "radar_equivalent_reflectivity_factor_h",
"long_name": "Total power H (uncorrected reflectivity)",
"short_name": "DBTH",
"units": "dBZ",
"gamic": ["uzh", "uh"],
},
"DBTV": {
"standard_name": "radar_equivalent_reflectivity_factor_v",
"long_name": "Total power V (uncorrected reflectivity)",
"short_name": "DBTV",
"units": "dBZ",
"gamic": ["uzv", "uv"],
},
"TH": {
"standard_name": "radar_linear_equivalent_reflectivity_factor_h",
"long_name": "Linear total power H (uncorrected reflectivity)",
"short_name": "TH",
"units": "unitless",
"gamic": None,
},
"TV": {
"standard_name": "radar_linear_equivalent_reflectivity_factor_v",
"long_name": "Linear total power V (uncorrected reflectivity)",
"short_name": "TV",
"units": "unitless",
"gamic": None,
},
"VRADH": {
"standard_name": "radial_velocity_of_scatterers_away_" "from_instrument_h",
"long_name": "Radial velocity of scatterers away from instrument H",
"short_name": "VRADH",
"units": "meters per seconds",
"gamic": ["vh"],
},
"VRADV": {
"standard_name": "radial_velocity_of_scatterers_" "away_from_instrument_v",
"long_name": "Radial velocity of scatterers away from instrument V",
"short_name": "VRADV",
"units": "meters per second",
"gamic": ["vv"],
},
"VR": {
"standard_name": "radial_velocity_of_scatterers_away_" "from_instrument",
"long_name": "Radial velocity of scatterers away from instrument",
"short_name": "VR",
"units": "meters per seconds",
"gamic": None,
},
"VRAD": {
"standard_name": "radial_velocity_of_scatterers_away_" "from_instrument",
"long_name": "Radial velocity of scatterers away from instrument",
"short_name": "VRAD",
"units": "meters per seconds",
"gamic": None,
},
"VRADDH": {
"standard_name": "radial_velocity_of_scatterers_away_" "from_instrument_h",
"long_name": "Radial velocity of scatterers away from instrument H",
"short_name": "VRADDH",
"units": "meters per seconds",
"gamic": None,
},
"WRADH": {
"standard_name": "radar_doppler_spectrum_width_h",
"long_name": "Doppler spectrum width H",
"short_name": "WRADH",
"units": "meters per seconds",
"gamic": ["wh"],
},
"UWRADH": {
"standard_name": "radar_doppler_spectrum_width_h",
"long_name": "Doppler spectrum width H",
"short_name": "UWRADH",
"units": "meters per seconds",
"gamic": ["uwh"],
},
"WRADV": {
"standard_name": "radar_doppler_spectrum_width_v",
"long_name": "Doppler spectrum width V",
"short_name": "WRADV",
"units": "meters per second",
"gamic": ["wv"],
},
"WRAD": {
"standard_name": "radar_doppler_spectrum_width",
"long_name": "Doppler spectrum width",
"short_name": "WRAD",
"units": "meters per second",
"gamic": None,
},
"ZDR": {
"standard_name": "radar_differential_reflectivity_hv",
"long_name": "Log differential reflectivity H/V",
"short_name": "ZDR",
"units": "dB",
"gamic": ["zdr"],
},
"UZDR": {
"standard_name": "radar_differential_reflectivity_hv",
"long_name": "Log differential reflectivity H/V",
"short_name": "UZDR",
"units": "dB",
"gamic": ["uzdr"],
},
"LDR": {
"standard_name": "radar_linear_depolarization_ratio",
"long_name": "Log-linear depolarization ratio HV",
"short_name": "LDR",
"units": "dB",
"gamic": ["ldr"],
},
"PHIDP": {
"standard_name": "radar_differential_phase_hv",
"long_name": "Differential phase HV",
"short_name": "PHIDP",
"units": "degrees",
"gamic": ["phidp"],
},
"UPHIDP": {
"standard_name": "radar_differential_phase_hv",
"long_name": "Differential phase HV",
"short_name": "UPHIDP",
"units": "degrees",
"gamic": ["uphidp"],
},
"KDP": {
"standard_name": "radar_specific_differential_phase_hv",
"long_name": "Specific differential phase HV",
"short_name": "KDP",
"units": "degrees per kilometer",
"gamic": ["kdp"],
},
"RHOHV": {
"standard_name": "radar_correlation_coefficient_hv",
"long_name": "Correlation coefficient HV",
"short_name": "RHOHV",
"units": "unitless",
"gamic": ["rhohv"],
},
"URHOHV": {
"standard_name": "radar_correlation_coefficient_hv",
"long_name": "Correlation coefficient HV",
"short_name": "URHOHV",
"units": "unitless",
"gamic": ["urhohv"],
},
"SNRH": {
"standard_name": "signal_noise_ratio_h",
"long_name": "Signal Noise Ratio H",
"short_name": "SNRH",
"units": "unitless",
"gamic": None,
},
"SNRV": {
"standard_name": "signal_noise_ratio_v",
"long_name": "Signal Noise Ratio V",
"short_name": "SNRV",
"units": "unitless",
"gamic": None,
},
"SQIH": {
"standard_name": "signal_quality_index_h",
"long_name": "Signal Quality H",
"short_name": "SQIH",
"units": "unitless",
"gamic": None,
},
"SQIV": {
"standard_name": "signal_quality_index_v",
"long_name": "Signal Quality V",
"short_name": "SQIV",
"units": "unitless",
"gamic": None,
},
"CCORH": {
"standard_name": "clutter_correction_h",
"long_name": "Clutter Correction H",
"short_name": "CCORH",
"units": "unitless",
"gamic": None,
},
"CCORV": {
"standard_name": "clutter_correction_v",
"long_name": "Clutter Correction V",
"short_name": "CCORV",
"units": "unitless",
"gamic": None,
},
"CMAP": {
"standard_name": "clutter_map",
"long_name": "Clutter Map",
"short_name": "CMAP",
"units": "unitless",
"gamic": ["cmap"],
},
}
ODIM_NAMES = {value["short_name"]: key for (key, value) in moments_mapping.items()}
GAMIC_NAMES = {
v: key
for (key, value) in moments_mapping.items()
if value["gamic"] is not None
for v in value["gamic"]
}
range_attrs = {
"units": "meters",
"standard_name": "projection_range_coordinate",
"long_name": "range_to_measurement_volume",
"spacing_is_constant": "true",
"axis": "radial_range_coordinate",
"meters_to_center_of_first_gate": None,
}
az_attrs = {
"standard_name": "ray_azimuth_angle",
"long_name": "azimuth_angle_from_true_north",
"units": "degrees",
"axis": "radial_azimuth_coordinate",
}
el_attrs = {
"standard_name": "ray_elevation_angle",
"long_name": "elevation_angle_from_horizontal_plane",
"units": "degrees",
"axis": "radial_elevation_coordinate",
}
time_attrs = {
"standard_name": "time",
"units": "seconds since 1970-01-01T00:00:00Z",
}
root_vars = {
"volume_number",
"platform_type",
"instrument_type",
"primary_axis",
"time_coverage_start",
"time_coverage_end",
"latitude",
"longitude",
"altitude",
"fixed_angle",
"status_xml",
}
sweep_vars1 = {
"sweep_number",
"sweep_mode",
"polarization_mode",
"prt_mode",
"follow_mode",
"fixed_angle",
"target_scan_rate",
"sweep_start_ray_index",
"sweep_end_ray_index",
}
sweep_vars2 = {
"azimuth",
"elevation",
"pulse_width",
"prt",
"nyquist_velocity",
"unambiguous_range",
"antenna_transition",
"n_samples",
"r_calib_index",
"scan_rate",
}
sweep_vars3 = {
"DBZH",
"DBZV",
"VELH",
"VELV",
"DBZ",
"VR",
"time",
"range",
"reflectivity_horizontal",
}
cf_full_vars = {"prt": "prf", "n_samples": "pulse"}
global_attrs = [
("Conventions", "Cf/Radial"),
("version", "Cf/Radial version number"),
("title", "short description of file contents"),
("institution", "where the original data were produced"),
(
"references",
("references that describe the data or the methods used " "to produce it"),
),
("source", "method of production of the original data"),
("history", "list of modifications to the original data"),
("comment", "miscellaneous information"),
("instrument_name", "nameThe of radar or lidar"),
("site_name", "name of site where data were gathered"),
("scan_name", "name of scan strategy used, if applicable"),
("scan_id", "scan strategy id, if applicable. assumed 0 if missing"),
("platform_is_mobile", '"true" or "false", assumed "false" if missing'),
(
"ray_times_increase",
(
'"true" or "false", assumed "true" if missing. '
"This is set to true if ray times increase monotonically "
"thoughout all of the sweeps in the volume"
),
),
("field_names", "array of strings of field names present in this file."),
("time_coverage_start", "copy of time_coverage_start global variable"),
("time_coverage_end", "copy of time_coverage_end global variable"),
(
"simulated data",
(
'"true" or "false", assumed "false" if missing. '
"data in this file are simulated"
),
),
]
global_variables = dict(
[
("volume_number", np.int_),
("platform_type", "fixed"),
("instrument_type", "radar"),
("primary_axis", "axis_z"),
("time_coverage_start", "1970-01-01T00:00:00Z"),
("time_coverage_end", "1970-01-01T00:00:00Z"),
("latitude", np.nan),
("longitude", np.nan),
("altitude", np.nan),
("altitude_agl", np.nan),
("sweep_group_name", (["sweep"], [np.nan])),
("sweep_fixed_angle", (["sweep"], [np.nan])),
("frequency", np.nan),
("status_xml", "None"),
]
)
@xr.register_dataset_accessor("gamic")
class GamicAccessor(object):
"""Dataset Accessor for handling GAMIC HDF5 data files"""
def __init__(self, xarray_obj):
self._obj = xarray_obj
self._radial_range = None
self._azimuth_range = None
self._elevation_range = None
self._time_range = None
self._sitecoords = None
self._polcoords = None
self._projection = None
self._time = None
@property
def radial_range(self):
"""Return the radial range of this dataset."""
if self._radial_range is None:
ngates = self._obj.attrs["bin_count"]
# range_start = self._obj.attrs['range_start']
range_samples = self._obj.attrs["range_samples"]
range_step = self._obj.attrs["range_step"]
bin_range = range_step * range_samples
range_data = np.arange(
bin_range / 2.0, bin_range * ngates, bin_range, dtype="float32"
)
range_attrs["meters_to_center_of_first_gate"] = bin_range / 2.0
da = xr.DataArray(range_data, dims=["dim_1"], attrs=range_attrs)
self._radial_range = da
return self._radial_range
@property
def azimuth_range(self):
"""Return the azimuth range of this dataset."""
if self._azimuth_range is None:
azstart = self._obj["azimuth_start"]
azstop = self._obj["azimuth_stop"]
zero_index = np.where(azstop < azstart)
azstop[zero_index[0]] += 360
azimuth = (azstart + azstop) / 2.0
azimuth = azimuth.assign_attrs(az_attrs)
self._azimuth_range = azimuth
return self._azimuth_range
@property
def elevation_range(self):
"""Return the elevation range of this dataset."""
if self._elevation_range is None:
elstart = self._obj["elevation_start"]
elstop = self._obj["elevation_stop"]
elevation = (elstart + elstop) / 2.0
elevation = elevation.assign_attrs(el_attrs)
self._elevation_range = elevation
return self._elevation_range
@property
def time_range(self):
"""Return the time range of this dataset."""
if self._time_range is None:
times = self._obj["timestamp"] / 1e6
attrs = {
"units": "seconds since 1970-01-01T00:00:00Z",
"standard_name": "time",
}
da = xr.DataArray(times, attrs=attrs)
self._time_range = da
return self._time_range
@xr.register_dataset_accessor("odim")
class OdimAccessor(object):
"""Dataset Accessor for handling ODIM_H5 data files"""
def __init__(self, xarray_obj):
self._obj = xarray_obj
self._radial_range = None
self._azimuth_range = None
self._elevation_range = None
self._time_range = None
self._time_range2 = None
self._prt = None
self._n_samples = None
@property
def radial_range(self):
"""Return the radial range of this dataset."""
if self._radial_range is None:
ngates = self._obj.attrs["nbins"]
range_start = self._obj.attrs["rstart"] * 1000.0
bin_range = self._obj.attrs["rscale"]
cent_first = range_start + bin_range / 2.0
range_data = np.arange(
cent_first, range_start + bin_range * ngates, bin_range, dtype="float32"
)
range_attrs["meters_to_center_of_first_gate"] = cent_first
range_attrs["meters_between_gates"] = bin_range
da = xr.DataArray(range_data, dims=["dim_1"], attrs=range_attrs)
self._radial_range = da
return self._radial_range
@property
def azimuth_range2(self):
"""Return the azimuth range of this dataset."""
if self._azimuth_range is None:
nrays = self._obj.attrs["nrays"]
res = 360.0 / nrays
azimuth_data = np.arange(res / 2.0, 360.0, res, dtype="float32")
da = xr.DataArray(azimuth_data, dims=["dim_0"], attrs=az_attrs)
self._azimuth_range = da
return self._azimuth_range
@property
def azimuth_range(self):
"""Return the azimuth range of this dataset."""
if self._azimuth_range is None:
startaz = self._obj.attrs["startazA"]
stopaz = self._obj.attrs["stopazA"]
zero_index = np.where(stopaz < startaz)
stopaz[zero_index[0]] += 360
azimuth_data = (startaz + stopaz) / 2.0
da = xr.DataArray(azimuth_data, attrs=az_attrs)
self._azimuth_range = da
return self._azimuth_range
@property
def elevation_range2(self):
"""Return the elevation range of this dataset."""
if self._elevation_range is None:
nrays = self._obj.attrs["nrays"]
elangle = self._obj.attrs["elangle"]
elevation_data = np.ones(nrays, dtype="float32") * elangle
da = xr.DataArray(elevation_data, dims=["dim_0"], attrs=el_attrs)
self._elevation_range = da
return self._elevation_range
@property
def elevation_range(self):
"""Return the elevation range of this dataset."""
if self._elevation_range is None:
startel = self._obj.attrs["startelA"]
stopel = self._obj.attrs["stopelA"]
elevation_data = (startel + stopel) / 2.0
da = xr.DataArray(elevation_data, dims=["dim_0"], attrs=el_attrs)
self._elevation_range = da
return self._elevation_range
@property
def time_range(self):
"""Return the time range of this dataset."""
if self._time_range is None:
startT = self._obj.attrs["startazT"]
stopT = self._obj.attrs["stopazT"]
times = (startT + stopT) / 2.0
self._time_range = times
return self._time_range
@property
def time_range2(self):
"""Return the time range of this dataset."""
if self._time_range2 is None:
startdate = self._obj.attrs["startdate"]
starttime = self._obj.attrs["starttime"]
enddate = self._obj.attrs["enddate"]
endtime = self._obj.attrs["endtime"]
start = dt.datetime.strptime(startdate + starttime, "%Y%m%d%H%M%S")
end = dt.datetime.strptime(enddate + endtime, "%Y%m%d%H%M%S")
start = start.replace(tzinfo=dt.timezone.utc)
end = end.replace(tzinfo=dt.timezone.utc)
self._time_range2 = (start.timestamp(), end.timestamp())
return self._time_range2
@property
def prt(self):
if self._prt is None:
try:
prt = 1.0 / self._obj.attrs["prf"]
da = xr.DataArray(prt, dims=["dim_0"])
self._prt = da
except KeyError:
pass
return self._prt
@property
def n_samples(self):
if self._n_samples is None:
try:
da = xr.DataArray(self._obj.attrs["pulse"], dims=["dim_0"])
self._n_samples = da
except KeyError:
pass
return self._n_samples
def to_cfradial2(volume, filename, timestep=None):
"""Save RadarVolume/XRadVol/XRadVolume to CfRadial2.0 compliant file.
Parameters
----------
volume : RadarVolume/XRadVol/XRadVolume object
filename : str
output filename
timestep : int
timestep of wanted volume
"""
volume.root.load()
root = volume.root.copy(deep=True)
root.attrs["Conventions"] = "Cf/Radial"
root.attrs["version"] = "2.0"
root.to_netcdf(filename, mode="w", group="/")
for idx, key in enumerate(root.sweep_group_name.values):
if isinstance(volume, (OdimH5, CfRadial)):
swp = volume[key]
elif isinstance(volume, XRadVolume):
swp = volume[idx][timestep].data
else:
ds = volume[idx]
if "time" not in ds.dims:
ds = ds.expand_dims("time")
swp = ds.isel(time=timestep)
swp.load()
dims = list(swp.dims)
dims.remove("range")
dim0 = dims[0]
try:
swp = swp.swap_dims({dim0: "time"})
except ValueError:
swp = swp.drop_vars("time").rename({"rtime": "time"})
swp = swp.swap_dims({dim0: "time"})
swp = swp.drop_vars(["x", "y", "z", "gr", "rays", "bins"], errors="ignore")
swp = swp.sortby("time")
swp.to_netcdf(filename, mode="a", group=key)
def to_netcdf(volume, filename, timestep=None, keys=None):
"""Save RadarVolume/XRadVolume to netcdf compliant file.
Parameters
----------
volume : RadarVolume/XRadVolume object
filename : str
output filename
timestep : int, slice
timestep/slice of wanted volume
keys : list
list of sweep_group_names which should be written to the file
"""
volume.root.load()
root = volume.root.copy(deep=True)
root.attrs["Conventions"] = "Cf/Radial"
root.attrs["version"] = "2.0"
root.to_netcdf(filename, mode="w", group="/")
if keys is None:
keys = root.sweep_group_name.values
for idx, key in enumerate(root.sweep_group_name.values):
if key in keys:
try:
swp = volume[idx].data.isel(time=timestep)
except AttributeError:
ds = volume[idx]
if "time" not in ds.dims:
ds = ds.expand_dims("time")
swp = ds.isel(time=timestep)
swp.to_netcdf(filename, mode="a", group=key)
def to_odim(volume, filename, timestep=0):
"""Save RadarVolume/XRadVol/XRadVolume to ODIM_H5/V2_2 compliant file.
Parameters
----------
volume : RadarVolume/XRadVol/XRadVolume object
filename : str
output filename
timestep : int
timestep of wanted volume
"""
root = volume.root
h5 = h5py.File(filename, "w")
# root group, only Conventions for ODIM_H5
_write_odim({"Conventions": "ODIM_H5/V2_2"}, h5)
# how group
how = {}
how.update({"_modification_program": "wradlib"})
h5_how = h5.create_group("how")
_write_odim(how, h5_how)
sweepnames = root.sweep_group_name.values
# what group, object, version, date, time, source, mandatory
# p. 10 f
what = {}
if len(sweepnames) > 1:
what["object"] = "PVOL"
else:
what["object"] = "SCAN"
what["version"] = "H5rad 2.2"
what["date"] = str(root.time_coverage_start.values)[:10].replace("-", "")
what["time"] = str(root.time_coverage_end.values)[11:19].replace(":", "")
what["source"] = root.attrs["instrument_name"]
h5_what = h5.create_group("what")
_write_odim(what, h5_what)
# where group, lon, lat, height, mandatory
where = {
"lon": root.longitude.values,
"lat": root.latitude.values,
"height": root.altitude.values,
}
h5_where = h5.create_group("where")
_write_odim(where, h5_where)
# datasets
ds_list = ["dataset{}".format(i + 1) for i in range(len(sweepnames))]
ds_idx = np.argsort(ds_list)
for idx in ds_idx:
if isinstance(volume, (OdimH5, CfRadial)):
ds = volume["sweep_{}".format(idx + 1)]
elif isinstance(volume, XRadVolume):
ds = volume[idx][timestep].data
ds = ds.drop_vars("time", errors="ignore").rename({"rtime": "time"})
else:
ds = volume[idx]
if "time" not in ds.dims:
ds = ds.expand_dims("time")
ds = ds.isel(time=timestep, drop=True)
ds = ds.drop_vars("time", errors="ignore").rename({"rtime": "time"})
h5_dataset = h5.create_group(ds_list[idx])
# what group p. 21 ff.
h5_ds_what = h5_dataset.create_group("what")
ds_what = {}
# skip NaT values
valid_times = ~np.isnat(ds.time.values)
t = sorted(ds.time.values[valid_times])
start = dt.datetime.utcfromtimestamp(np.rint(t[0].astype("O") / 1e9))
end = dt.datetime.utcfromtimestamp(np.rint(t[-1].astype("O") / 1e9))
ds_what["product"] = "SCAN"
ds_what["startdate"] = start.strftime("%Y%m%d")
ds_what["starttime"] = start.strftime("%H%M%S")
ds_what["enddate"] = end.strftime("%Y%m%d")
ds_what["endtime"] = end.strftime("%H%M%S")
_write_odim(ds_what, h5_ds_what)
# where group, p. 11 ff. mandatory
h5_ds_where = h5_dataset.create_group("where")
rscale = ds.range.values[1] / 1.0 - ds.range.values[0]
rstart = (ds.range.values[0] - rscale / 2.0) / 1000.0
# todo: make this work for RHI's
a1gate = np.argsort(ds.sortby("azimuth").time.values)[0]
try:
fixed_angle = ds.fixed_angle
except AttributeError:
fixed_angle = ds.elevation.round(decimals=1).median().values
ds_where = {
"elangle": fixed_angle,
"nbins": ds.range.shape[0],
"rstart": rstart,
"rscale": rscale,
"nrays": ds.azimuth.shape[0],
"a1gate": a1gate,
}
_write_odim(ds_where, h5_ds_where)
# how group, p. 14 ff.
h5_ds_how = h5_dataset.create_group("how")
tout = [tx.astype("O") / 1e9 for tx in ds.sortby("azimuth").time.values]
tout_sorted = sorted(tout)
# handle non-uniform times (eg. only second-resolution)
if np.count_nonzero(np.diff(tout_sorted)) < (len(tout_sorted) - 1):
tout = np.roll(
np.linspace(tout_sorted[0], tout_sorted[-1], len(tout)), a1gate
)
tout_sorted = sorted(tout)
difft =
|
np.diff(tout_sorted)
|
numpy.diff
|
# -*- coding: utf-8 -*-
##################################################
# © 2017 ETH Zurich, Swiss Seismological Service #
# <NAME>' - wavedec at gmail dot com #
##################################################
"""
Here are estimation routines used in WaveDec
"""
from scipy.optimize import minimize # , approx_fprime
import numpy as np
from numpy import shape, fft, zeros, real, imag, eye, linalg, ceil, linspace, pi, \
meshgrid, concatenate, sqrt, array, sum, conjugate, log, mean, size, dot, \
arctan2, sin, cos, argmin, reshape, matrix, transpose, arange
import DataUtils as db
import logging
from wdSettings import MODEL_NOISE, MODEL_VERTICAL, MODEL_RAYLEIGH, MODEL_LOVE
from wdSettings import EWX, NSY, UDZ, ROTX, ROTY, ROTZ
def decomposeWavefield(conn, y, WindowId, Ts, Fvec_ndx, Kmax, Kstep, Estep, Vmin, WavesToModel, MaxWaves, MaxIterations, ArrayInfo, Gamma):
""" Fit different wave types at several frequencies.
Parameters
----------
conn :
SQL database connection
y : float array
input signal
WindowId : int
Unique indentifier of the window
Ts : float
Sampling time [s]
Fvec_ndx : int array
...
Kmax : float
Largest wavenumber [1/m] to analyze
Kstep : float
Grid step in the wavenumber plane [1/m]
Vmin : float
Smallest velocity [m/s] to analyze
WavesToModel :
Describe which wave types should be fitted
MaxWaves : int
Maximum number of waves to model at each frequency in the time window
MaxIterations : int
Maximum number of interations. In each iteration parameters of each wave are re-estimated.
ArrayInfo : float array
An array containing information about sensor location and channels.
It has L rows, where L is the number of channels.
Each rows has the following form:
pos_x, pos_y, pos_z, cmp, Ts
where the first three fields are the position of the sensor in [m].
cmp is the component code.
Ts is the sampling time as read from the SAC file.
Gamma : float
Controls model complexity. (0 for pure ML, 1 for BIC, other values for intermediate strategies)
"""
K = shape(y)[0]
L = shape(y)[1]
Fvec_fft = fft.fftfreq(K, Ts);
# Fvec = Fvec_fft[Fvec_ndx]
(Sm_bw, Sw_bw, Swm_bw) = bwMessages(y, Ts)
Sm_fw_all = zeros(shape(Sm_bw))
F=shape(Sm_bw)[2]
NumK = np.int(2*ceil(Kmax/Kstep)) # number of points in the wavenumber search grid. Even, so that we do not have 0 in the final search grid
NumE = np.int(ceil(np.pi/Estep)) # number of points in the ellipticity search grid Even, so that we have 0 in the final search grid
Wavenumber_x = linspace(-Kmax, Kmax, NumK)
Wavenumber_y = linspace(-Kmax, Kmax, NumK)
EllipticityAngle = linspace(-pi/2, pi/2, NumE, endpoint=False)
xx, yy, zz = meshgrid(Wavenumber_x,Wavenumber_y,EllipticityAngle)
xx=concatenate(xx); yy=concatenate(yy); zz=concatenate(zz);
ndx_ok = sqrt( xx**2 + yy**2 ) <= Kmax # only wavenumber smaller than Kmax
xx = xx[ndx_ok]; yy = yy[ndx_ok]; zz = zz[ndx_ok];
X_grid_R = array([xx, yy, zz]) # Rayleigh waves
xx, yy = meshgrid(Wavenumber_x,Wavenumber_y)
xx=concatenate(xx); yy=concatenate(yy);
ndx_ok = sqrt( xx**2 + yy**2 ) <= Kmax # only wavenumber smaller than Kmax
xx = xx[ndx_ok]; yy = yy[ndx_ok];
X_grid_L = array([xx, yy]) # Love waves
# Fndx
for ff in Fvec_ndx:
X_grid_R_f = X_grid_R
X_grid_L_f = X_grid_L
X_grid_V_f = X_grid_L
if Vmin != None and Vmin > 0: # further restrict search grid
if WavesToModel[MODEL_LOVE] or WavesToModel[MODEL_VERTICAL]:
ndx_ok = sqrt( X_grid_L[0,:]**2 + X_grid_L[1,:]**2 ) <= Fvec_fft[ff]/Vmin
if WavesToModel[MODEL_LOVE]:
X_grid_L_f = X_grid_L[:,ndx_ok]
if WavesToModel[MODEL_VERTICAL]:
X_grid_V_f = X_grid_L[:,ndx_ok]
if WavesToModel[MODEL_RAYLEIGH]:
ndx_ok = sqrt( X_grid_R[0,:]**2 + X_grid_R[1,:]**2 ) <= Fvec_fft[ff]/Vmin
X_grid_R_f = X_grid_R[:,ndx_ok]
Sm_fw = zeros((2,L,F,MaxWaves)) # shape() = (2,L,F)
WaveModel = zeros((MaxWaves,))
WaveAmplitude = zeros((MaxWaves,))
WavePhase = zeros((MaxWaves,))
WaveX_ML = [None]* MaxWaves
# gradually increase the number of waves modeled
NumWaves = 0 # number of waves modeled
for mm in range(0,MaxWaves):
logging.debug("Fitting {0}-th model (at {1:.3f} [Hz])".format(mm+1, Fvec_fft[ff]))
tmpBIC=[]
tmpModel=[]
tmpSm_bw = Sm_bw - sum(Sm_fw ,3) + Sm_fw[:,:,:,mm]
# how about variance messages?
# Parameter estimation: attempt to fit different possible models
if WavesToModel[MODEL_NOISE]:
(BIC_N, sigma2_ML) = fitNoise(tmpSm_bw, L, K, Gamma)
tmpBIC.append(BIC_N); tmpModel.append(MODEL_NOISE);
if WavesToModel[MODEL_VERTICAL] and size(X_grid_V_f) > 0:
(BIC_V, Sm_fw_V, Amplitude_V, Phase_V, X_ML_V) = fitVerticalWave(X_grid_V_f, tmpSm_bw, Sw_bw, ff, L, K, Kmax, ArrayInfo, Gamma)
tmpBIC.append(BIC_V); tmpModel.append(MODEL_VERTICAL);
if WavesToModel[MODEL_LOVE] and size(X_grid_L_f) > 0:
(BIC_L, Sm_fw_L, Amplitude_L, Phase_L, X_ML_L) = fitLoveWave(X_grid_L_f, tmpSm_bw, Sw_bw, ff, L, K, Kmax, ArrayInfo, Gamma)
tmpBIC.append(BIC_L); tmpModel.append(MODEL_LOVE);
if WavesToModel[MODEL_RAYLEIGH] and size(X_grid_R_f) > 0:
(BIC_R, Sm_fw_R, Amplitude_R, Phase_R, X_ML_R) = fitRayleighWave(X_grid_R_f, tmpSm_bw, Sw_bw, ff, L, K, Kmax, ArrayInfo, Gamma)
tmpBIC.append(BIC_R); tmpModel.append(MODEL_RAYLEIGH);
# Model selection: choose wave with smallest BIC
if len(tmpBIC) > 0:
WaveModel[mm] = tmpModel[np.argmin(tmpBIC)]
histBIC = zeros((MaxIterations+1,))
histBIC[0] = np.min(tmpBIC)
if WaveModel[mm] == MODEL_NOISE:
break # when a noise model is chosen no more waves need to be added
if WaveModel[mm] == 0:
break # Nothing was modeled. Perhaps because of stringent Vmin at this frequency
elif WaveModel[mm] == MODEL_VERTICAL:
Sm_fw[:,:,:,mm] = Sm_fw_V
WaveAmplitude[mm] = Amplitude_V
WavePhase[mm] = Phase_V
WaveX_ML[mm] = X_ML_V
elif WaveModel[mm] == MODEL_LOVE:
Sm_fw[:,:,:,mm] = Sm_fw_L
WaveAmplitude[mm] = Amplitude_L
WavePhase[mm] = Phase_L
WaveX_ML[mm] = X_ML_L
elif WaveModel[mm] == MODEL_RAYLEIGH:
Sm_fw[:,:,:,mm] = Sm_fw_R
WaveAmplitude[mm] = Amplitude_R
WavePhase[mm] = Phase_R
WaveX_ML[mm] = X_ML_R
else:
logging.warning("Warning: unrecognized wave model")
# refine existing estimates, if needed
NumWaves = sum( (WaveModel > 0) & (WaveModel != MODEL_NOISE) )
if (MaxIterations > 0) and (NumWaves > 1):
for ii in range(1,MaxIterations):
for m in range(0,mm+1):
logging.debug("Refining estimates of model {0}/{1}".format(m+1,mm+1))
tmpSm_bw = Sm_bw - sum(Sm_fw ,3) + Sm_fw[:,:,:,m]
if WaveModel[m] == MODEL_NOISE:
continue
elif WaveModel[m] == MODEL_VERTICAL:
X = WaveX_ML[m]
(BIC_V, Sm_fw_V, Amplitude_V, Phase_v, X_ML_V) = fitVerticalWave(X, tmpSm_bw, Sw_bw, ff, L, K, Kmax, ArrayInfo, Gamma)
WaveX_ML[m] = X_ML_V
WaveAmplitude[m] = Amplitude_V
WavePhase[m] = Phase_V
Sm_fw[:,:,:,m] = Sm_fw_V
elif WaveModel[m] == MODEL_LOVE:
X = WaveX_ML[m]
(BIC_L, Sm_fw_L, Amplitude_L, Phase_L, X_ML_L) = fitLoveWave(X, tmpSm_bw, Sw_bw, ff, L, K, Kmax, ArrayInfo, Gamma)
WaveX_ML[m] = X_ML_L
WaveAmplitude[m] = Amplitude_L
WavePhase[m] = Phase_L
Sm_fw[:,:,:,m] = Sm_fw_L
elif WaveModel[m] == MODEL_RAYLEIGH:
X = WaveX_ML[m]
(BIC_R, Sm_fw_R, Amplitude_R, Phase_R, X_ML_R) = fitRayleighWave(X, tmpSm_bw, Sw_bw, ff, L, K, Kmax, ArrayInfo, Gamma)
WaveX_ML[m] = X_ML_R
WaveAmplitude[m] = Amplitude_R
WavePhase[m] = Phase_R
Sm_fw[:,:,:,m] = Sm_fw_R
else:
logging.warning("Unrecognized wave model {0}".format(WaveModel[m]))
tmpSm_bw = Sm_bw - sum(Sm_fw ,3)
(BIC_N, sigma2_ML) = fitNoise(tmpSm_bw, L, K, Gamma)
histBIC[ii] = BIC_N # TODO is this the right BIC value, multiple waves are they all same?
if abs(histBIC[ii]-histBIC[ii-1])/abs(histBIC[ii-1]) < 0.01:
break
# Compute forward messages and estimated paramters
Sm_fw_all += sum(Sm_fw ,3)
NumWaves = sum( (WaveModel > 0) & (WaveModel != MODEL_NOISE) )
for mm in range(0,NumWaves):
if WaveModel[mm] == MODEL_NOISE:
continue
elif WaveModel[mm] == MODEL_VERTICAL:
Kx = WaveX_ML[mm][0]
Ky = WaveX_ML[mm][1]
Wavenumber = sqrt( Kx**2 + Ky**2)
Azimuth = np.mod(arctan2(Ky, Kx), 2*pi) # Note the role reversal, as from documentation
db.addVerticalWave(conn, WindowId, ff, WaveAmplitude[mm], WavePhase[mm], Wavenumber, Azimuth)
elif WaveModel[mm] == MODEL_LOVE:
Kx = WaveX_ML[mm][0]
Ky = WaveX_ML[mm][1]
Wavenumber = sqrt( Kx**2 + Ky**2)
Azimuth = np.mod(arctan2(Ky, Kx), 2*pi) # Note the role reversal, as from documentation
db.addLoveWave(conn, WindowId, ff, WaveAmplitude[mm], WavePhase[mm], Wavenumber, Azimuth)
elif WaveModel[mm] == MODEL_RAYLEIGH:
Kx = WaveX_ML[mm][0]
Ky = WaveX_ML[mm][1]
EllipticityAngle = np.mod(WaveX_ML[mm][2] + pi/2, pi) -pi/2
Wavenumber = sqrt( Kx**2 + Ky**2)
Azimuth = np.mod(arctan2(Ky, Kx), 2*pi) # Note the role reversal, as from documentation
db.addRayleighWave(conn, WindowId, ff, WaveAmplitude[mm], WavePhase[mm], Wavenumber, Azimuth,EllipticityAngle)
else:
logging.warning("Unrecognized wave model {0}".format(WaveModel[mm]))
# after all freq have been processed
(BIC_N, sigma2_ML) = fitNoise(Sm_bw - Sm_fw_all, L, K, Gamma)
db.addNoise(conn, WindowId, sigma2_ML)
# TODO after estimating noise again, do we want to iterate once more on wave params ?
return
def bwMessages(y, Ts):
"""Compute sufficient statistics, assuming unitary variance
Parameters
----------
y : 2d float array
It is an array of size (K, L). Each column contains the signal at the l-th location.
Ts : float
The sampling time in [s]
Returns
-------
Sm_bw : float array
It is an array of size (2, L). Each column refers to the the signal at the l-th location.
Contains the mean vector of the message S.
Sw_bw : float array
It is an array of size (2, 2, L). Each page refers to the the signal at the l-th location.
Contains the precision matrix of the message S.
Swm_bw : float array
It is an array of size (2, L). Each column refers to the the signal at the l-th location.
Contains the weighted mean vector of the message S.
"""
K = shape(y)[0]
L = shape(y)[1]
#Fvec = fft.fftfreq(K, Ts);
#Fmin=0
#Fmax=0.5/Ts
#Fndx = (Fvec >= Fmin) and (Fvec <= Fmax);
#Fvec=Fvec[Fndx];
Fnum=int(K/2+1); # TODO need to make sure K is even
Y_bw= fft.rfft(y, K, 0);
Swm_bw = zeros((2,L,Fnum));
Sm_bw= zeros((2,L,Fnum));
Sw_bw=zeros((2,2,L,Fnum));
for ff in range(0,Fnum):
for ll in range(0, L):
Swm_bw[0,ll,ff] = real(Y_bw[ff,ll]);
Swm_bw[1,ll,ff] = imag(Y_bw[ff,ll]);
Sw_bw[:,:,ll,ff] = (K/2)*eye(2);
Sm_bw[:,ll,ff] = linalg.solve(Sw_bw[:,:,ll,ff], Swm_bw[:,ll,ff]);
return (Sm_bw, Sw_bw, Swm_bw)
### Vertical wave
def negLL_VerticalWave(X_grid, Sw_bw, Swm_bw, SlnGamma_bw, ArrayInfo):
""" Computes the loglikelihood of a plane wave (vertical component only)
Parameters
----------
X_grid : float array
Array of size (2,N). Description of N points to evaluate likelihoodfunction. First row is the wavenumber along the x axes. Second row the wavenumber along the y axes.
Sw_bw, Swm_bw, SlnGamma_bw :
sufficient statistics
ArrayInfo : float array
An array containing information about sensor location and channels.
It has L rows, where L is the number of channels.
Each rows has the following form:
pos_x, pos_y, pos_z, cmp, Ts
where the first three fields are the position of the sensor in [m].
cmp is the component code.
Ts is the sampling time as read from the SAC file.
Returns
-------
negLogLikelihood : float array
A one dimensional array of N elements with the value of minus the log-likelihood.
"""
if shape(shape(X_grid))[0] == 1: # single point
X_grid = reshape(X_grid, (2,-1))
N_points=shape(X_grid)[1]
Wavenumber_x=X_grid[0,:]
Wavenumber_y=X_grid[1,:]
L=shape(Swm_bw)[1]
negLogLikelihood = zeros((N_points,))
pos_x=ArrayInfo[:,0]
pos_y=ArrayInfo[:,1]
comp=ArrayInfo[:,3]
if False: # TODO check if input matrices are const*identity
for nn in range(0,N_points):
Uw = matrix(zeros((2,2)))
Uwm = matrix(zeros((2,1)))
for ll in range(0,L):
if comp[ll] == UDZ:
phi = -2*pi*(Wavenumber_x[nn]*pos_x[ll] + Wavenumber_y[nn]*pos_y[ll])
H = matrix([[cos(phi), -sin(phi)],[sin(phi), cos(phi)]])
else:
H = matrix([[0, 0],[0, 0]])
Uw = Uw + transpose(H) * matrix(Sw_bw[:,:,ll]) * H
Uwm = Uwm + transpose(H) * transpose(matrix(Swm_bw[:,ll]))
Um = linalg.solve(Uw, Uwm)
negLogLikelihood[nn] = -0.5*transpose(Um) * Uwm + sum(-SlnGamma_bw)
else: # alternative, faster implementation. Requires Sw_bw to be const*Identity
ndx_v = arange(0, L)[comp == UDZ]
L_v = len(ndx_v) # number of vertical components
phi = zeros((N_points, L_v))
Uw = zeros((N_points))
Uwm = zeros((2,N_points))
for ll_v in range(0, L_v):
ll = ndx_v[ll_v]
phi[:,ll_v] = -2*pi*(Wavenumber_x[:]*pos_x[ll] + Wavenumber_y[:]*pos_y[ll])
Uw[:] = Uw[:] + Sw_bw[0,0,ll]
Uwm[0,:] += + cos(phi[:,ll_v]) * Swm_bw[0,ll] + sin(phi[:,ll_v]) * Swm_bw[1,ll]
Uwm[1,:] += - sin(phi[:,ll_v]) * Swm_bw[0,ll] + cos(phi[:,ll_v]) * Swm_bw[1,ll]
Um = zeros((2,N_points))
Um[0,:] = Uwm[0,:] / Uw[:]
Um[1,:] = Uwm[1,:] / Uw[:]
negLogLikelihood[:] = -0.5*(Um[0,:] * Uwm[0,:] + Um[1,:] * Uwm[1,:]) + sum(-SlnGamma_bw)
return(negLogLikelihood)
def grad_negLL_VerticalWave(X_grid, Sw_bw, Swm_bw, SlnGamma_bw, ArrayInfo):
""" Computes the gradient of the loglikelihood of a plane wave (vertical component only)
Parameters
----------
X_grid : float array
Array of size (2,N). Description of N points to evaluate likelihoodfunction. First row is the wavenumber along the x axes. Second row the wavenumber along the y axes.
Sw_bw, Swm_bw, SlnGamma_bw :
sufficient statistics
ArrayInfo : float array
An array containing information about sensor location and channels.
It has L rows, where L is the number of channels.
Each rows has the following form:
pos_x, pos_y, pos_z, cmp, Ts
where the first three fields are the position of the sensor in [m].
cmp is the component code.
Ts is the sampling time as read from the SAC file.
Returns
-------
grad : float array
The gradient.
"""
Wavenumber_x=X_grid[0]
Wavenumber_y=X_grid[1]
L=shape(Swm_bw)[1]
grad = zeros((2,))
pos_x=ArrayInfo[:,0]
pos_y=ArrayInfo[:,1]
comp=ArrayInfo[:,3]
if False: # TODO check if input matrices are const*identity
# derivative wrt Wavenumber_x
Uw = matrix(zeros((2,2)))
Uwm = matrix(zeros((2,1)))
dWx_Uw = matrix(zeros((2,2)))
dWx_Uwm = matrix(zeros((2,1)))
dWy_Uw = matrix(zeros((2,2)))
dWy_Uwm = matrix(zeros((2,1)))
for ll in range(0,L):
if comp[ll] == UDZ:
phi = -2*pi*(Wavenumber_x*pos_x[ll] + Wavenumber_y*pos_y[ll])
H = matrix([[cos(phi), -sin(phi)],[sin(phi), cos(phi)]])
dWx_H = matrix([[-sin(phi), -cos(phi)],[cos(phi), -sin(phi)]]) * -2*pi*pos_x[ll]
dWy_H = matrix([[-sin(phi), -cos(phi)],[cos(phi), -sin(phi)]]) * -2*pi*pos_y[ll]
else:
H = matrix([[0, 0],[0, 0]])
dWx_H = matrix([[0, 0],[0, 0]])
dWy_H = matrix([[0, 0],[0, 0]])
Uw = Uw + transpose(H) * matrix(Sw_bw[:,:,ll]) * H
Uwm = Uwm + transpose(H) * transpose(matrix(Swm_bw[:,ll]))
dWx_Uw += transpose(dWx_H) * matrix(Sw_bw[:,:,ll]) * H + transpose(H) * matrix(Sw_bw[:,:,ll]) * dWx_H
dWx_Uwm +=
|
transpose(dWx_H)
|
numpy.transpose
|
#!/usr/bin/env python
u"""
spatial.py
Written by <NAME> (03/2021)
Utilities for reading, writing and operating on spatial data
PYTHON DEPENDENCIES:
numpy: Scientific Computing Tools For Python
https://numpy.org
https://numpy.org/doc/stable/user/numpy-for-matlab-users.html
netCDF4: Python interface to the netCDF C library
https://unidata.github.io/netcdf4-python/netCDF4/index.html
h5py: Pythonic interface to the HDF5 binary data format
https://www.h5py.org/
gdal: Pythonic interface to the Geospatial Data Abstraction Library (GDAL)
https://pypi.python.org/pypi/GDAL
PyYAML: YAML parser and emitter for Python
https://github.com/yaml/pyyaml
UPDATE HISTORY:
Updated 03/2021: added polar stereographic area scale calculation
add routines for converting to and from cartesian coordinates
eplaced numpy bool/int to prevent deprecation warnings
Updated 01/2021: add streaming from bytes for ascii, netCDF4, HDF5, geotiff
set default time for geotiff files to 0
Updated 12/2020: added module for converting ellipsoids
Updated 11/2020: output data as masked arrays if containing fill values
add functions to read from and write to geotiff image formats
Written 09/2020
"""
import os
import re
import io
import gzip
import uuid
import h5py
import yaml
import netCDF4
import datetime
import numpy as np
import osgeo.gdal, osgeo.osr
def case_insensitive_filename(filename):
"""
Searches a directory for a filename without case dependence
"""
#-- check if file presently exists with input case
if not os.access(os.path.expanduser(filename),os.F_OK):
#-- search for filename without case dependence
basename = os.path.basename(filename)
directory = os.path.dirname(os.path.expanduser(filename))
f = [f for f in os.listdir(directory) if re.match(basename,f,re.I)]
if not f:
raise IOError('{0} not found in file system'.format(filename))
filename = os.path.join(directory,f.pop())
return os.path.expanduser(filename)
def from_ascii(filename, compression=None, verbose=False,
columns=['time','y','x','data'], header=0):
"""
Read data from an ascii file
Inputs: full path of input ascii file
Options:
ascii file is compressed or streamed from memory
verbose output of file information
column names of ascii file
header lines to skip from start of file
"""
#-- set filename
print(filename) if verbose else None
#-- open the ascii file and extract contents
if (compression == 'gzip'):
#-- read input ascii data from gzip compressed file and split lines
with gzip.open(case_insensitive_filename(filename),'r') as f:
file_contents = f.read().decode('ISO-8859-1').splitlines()
elif (compression == 'bytes'):
#-- read input file object and split lines
file_contents = filename.read().splitlines()
else:
#-- read input ascii file (.txt, .asc) and split lines
with open(case_insensitive_filename(filename),'r') as f:
file_contents = f.read().splitlines()
#-- number of lines in the file
file_lines = len(file_contents)
#-- compile regular expression operator for extracting numerical values
#-- from input ascii files of spatial data
regex_pattern = r'[-+]?(?:(?:\d*\.\d+)|(?:\d+\.?))(?:[EeD][+-]?\d+)?'
rx = re.compile(regex_pattern, re.VERBOSE)
#-- check if header has a known format
if (str(header).upper() == 'YAML'):
#-- counts the number of lines in the header
YAML = False
count = 0
#-- Reading over header text
while (YAML is False) & (count < file_lines):
#-- file line at count
line = file_contents[count]
#-- if End of YAML Header is found: set YAML flag
YAML = bool(re.search(r"\# End of YAML header",line))
#-- add 1 to counter
count += 1
#-- parse the YAML header (specifying yaml loader)
YAML_HEADER = yaml.load('\n'.join(file_contents[:count]),
Loader=yaml.BaseLoader)
#-- output spatial data and attributes
dinput = {}
#-- copy global attributes
dinput['attributes'] = YAML_HEADER['header']['global_attributes']
#-- allocate for each variable and copy variable attributes
for c in columns:
dinput[c] = np.zeros((file_lines-count))
dinput['attributes'][c] = YAML_HEADER['header']['variables'][c]
#-- update number of file lines to skip for reading data
header = int(count)
else:
#-- output spatial data and attributes
dinput = {c:np.zeros((file_lines-header)) for c in columns}
dinput['attributes'] = {c:dict() for c in columns}
#-- extract spatial data array
#-- for each line in the file
for i,line in enumerate(file_contents[header:]):
#-- extract columns of interest and assign to dict
#-- convert fortran exponentials if applicable
column = {c:r.replace('D','E') for c,r in zip(columns,rx.findall(line))}
#-- copy variables from column dict to output dictionary
for c in columns:
dinput[c][i] = np.float64(column[c])
#-- convert to masked array if fill values
if '_FillValue' in dinput['attributes']['data'].keys():
dinput['data'] = np.ma.asarray(dinput['data'])
dinput['data'].fill_value = dinput['attributes']['data']['_FillValue']
dinput['data'].mask = (dinput['data'].data == dinput['data'].fill_value)
#-- return the spatial variables
return dinput
def from_netCDF4(filename, compression=None, verbose=False,
timename='time', xname='lon', yname='lat', varname='data'):
"""
Read data from a netCDF4 file
Inputs: full path of input netCDF4 file
Options:
netCDF4 file is compressed or streamed from memory
verbose output of file information
netCDF4 variable names of time, longitude, latitude, and data
"""
#-- read data from netCDF4 file
#-- Open the NetCDF4 file for reading
if (compression == 'gzip'):
#-- read as in-memory (diskless) netCDF4 dataset
with gzip.open(case_insensitive_filename(filename),'r') as f:
fileID = netCDF4.Dataset(uuid.uuid4().hex,memory=f.read())
elif (compression == 'bytes'):
#-- read as in-memory (diskless) netCDF4 dataset
fileID = netCDF4.Dataset(uuid.uuid4().hex,memory=filename.read())
else:
#-- read netCDF4 dataset
fileID = netCDF4.Dataset(case_insensitive_filename(filename), 'r')
#-- Output NetCDF file information
if verbose:
print(fileID.filepath())
print(list(fileID.variables.keys()))
#-- create python dictionary for output variables and attributes
dinput = {}
dinput['attributes'] = {}
#-- get attributes for the file
for attr in ['title','description','projection']:
#-- try getting the attribute
try:
ncattr, = [s for s in dir(fileID) if re.match(attr,s,re.I)]
dinput['attributes'][attr] = getattr(fileID,ncattr)
except (ValueError,AttributeError):
pass
#-- list of attributes to attempt to retrieve from included variables
attributes_list = ['description','units','long_name','calendar',
'standard_name','_FillValue']
#-- mapping between netCDF4 variable names and output names
variable_mapping = dict(x=xname,y=yname,data=varname,time=timename)
#-- for each variable
for key,nc in variable_mapping.items():
#-- Getting the data from each NetCDF variable
dinput[key] = fileID.variables[nc][:]
#-- get attributes for the included variables
dinput['attributes'][key] = {}
for attr in attributes_list:
#-- try getting the attribute
try:
ncattr, = [s for s in dir(fileID) if re.match(attr,s,re.I)]
dinput['attributes'][key][attr] = \
getattr(fileID.variables[nc],ncattr)
except (ValueError,AttributeError):
pass
#-- convert to masked array if fill values
if '_FillValue' in dinput['attributes']['data'].keys():
dinput['data'] = np.ma.asarray(dinput['data'])
dinput['data'].fill_value = dinput['attributes']['data']['_FillValue']
dinput['data'].mask = (dinput['data'].data == dinput['data'].fill_value)
#-- Closing the NetCDF file
fileID.close()
#-- return the spatial variables
return dinput
def from_HDF5(filename, compression=None, verbose=False,
timename='time', xname='lon', yname='lat', varname='data'):
"""
Read data from a HDF5 file
Inputs: full path of input HDF5 file
Options:
HDF5 file is compressed or streamed from memory
verbose output of file information
HDF5 variable names of time, longitude, latitude, and data
"""
#-- read data from HDF5 file
#-- Open the HDF5 file for reading
if (compression == 'gzip'):
#-- read gzip compressed file and extract into in-memory file object
with gzip.open(case_insensitive_filename(filename),'r') as f:
fid = io.BytesIO(f.read())
#-- set filename of BytesIO object
fid.filename = os.path.basename(filename)
#-- rewind to start of file
fid.seek(0)
#-- read as in-memory (diskless) HDF5 dataset from BytesIO object
fileID = h5py.File(fid, 'r')
elif (compression == 'bytes'):
#-- read as in-memory (diskless) HDF5 dataset
fileID = h5py.File(filename, 'r')
else:
#-- read HDF5 dataset
fileID = h5py.File(case_insensitive_filename(filename), 'r')
#-- Output HDF5 file information
if verbose:
print(fileID.filename)
print(list(fileID.keys()))
#-- create python dictionary for output variables and attributes
dinput = {}
dinput['attributes'] = {}
#-- get attributes for the file
for attr in ['title','description','projection']:
#-- try getting the attribute
try:
dinput['attributes'][attr] = fileID.attrs[attr]
except (KeyError,AttributeError):
pass
#-- list of attributes to attempt to retrieve from included variables
attributes_list = ['description','units','long_name','calendar',
'standard_name','_FillValue']
#-- mapping between HDF5 variable names and output names
variable_mapping = dict(x=xname,y=yname,data=varname,time=timename)
#-- for each variable
for key,h5 in variable_mapping.items():
#-- Getting the data from each HDF5 variable
dinput[key] = np.copy(fileID[h5][:])
#-- get attributes for the included variables
dinput['attributes'][key] = {}
for attr in attributes_list:
#-- try getting the attribute
try:
dinput['attributes'][key][attr] = fileID[h5].attrs[attr]
except (KeyError,AttributeError):
pass
#-- convert to masked array if fill values
if '_FillValue' in dinput['attributes']['data'].keys():
dinput['data'] = np.ma.asarray(dinput['data'])
dinput['data'].fill_value = dinput['attributes']['data']['_FillValue']
dinput['data'].mask = (dinput['data'].data == dinput['data'].fill_value)
#-- Closing the HDF5 file
fileID.close()
#-- return the spatial variables
return dinput
def from_geotiff(filename, compression=None, verbose=False):
"""
Read data from a geotiff file
Inputs: full path of input geotiff file
Options:
geotiff file is compressed or streamed from memory
verbose output of file information
"""
#-- Open the geotiff file for reading
if (compression == 'gzip'):
#-- read gzip compressed file and extract into memory-mapped object
mmap_name = "/vsimem/{0}".format(uuid.uuid4().hex)
with gzip.open(case_insensitive_filename(filename),'r') as f:
osgeo.gdal.FileFromMemBuffer(mmap_name, f.read())
#-- read as GDAL memory-mapped (diskless) geotiff dataset
ds = osgeo.gdal.Open(mmap_name)
elif (compression == 'bytes'):
#-- read as GDAL memory-mapped (diskless) geotiff dataset
mmap_name = "/vsimem/{0}".format(uuid.uuid4().hex)
osgeo.gdal.FileFromMemBuffer(mmap_name, filename.read())
ds = osgeo.gdal.Open(mmap_name)
else:
#-- read geotiff dataset
ds = osgeo.gdal.Open(case_insensitive_filename(filename))
#-- print geotiff file if verbose
print(filename) if verbose else None
#-- create python dictionary for output variables and attributes
dinput = {}
dinput['attributes'] = {c:dict() for c in ['x','y','data']}
#-- get the spatial projection reference information
srs = ds.GetSpatialRef()
dinput['attributes']['projection'] = srs.ExportToProj4()
dinput['attributes']['wkt'] = srs.ExportToWkt()
#-- get dimensions
xsize = ds.RasterXSize
ysize = ds.RasterYSize
bsize = ds.RasterCount
#-- get geotiff info
info_geotiff = ds.GetGeoTransform()
dinput['attributes']['spacing'] = (info_geotiff[1],info_geotiff[5])
#-- calculate image extents
xmin = info_geotiff[0]
ymax = info_geotiff[3]
xmax = xmin + (xsize-1)*info_geotiff[1]
ymin = ymax + (ysize-1)*info_geotiff[5]
dinput['attributes']['extent'] = (xmin,xmax,ymin,ymax)
#-- x and y pixel center coordinates (converted from upper left)
dinput['x'] = xmin + info_geotiff[1]/2.0 + np.arange(xsize)*info_geotiff[1]
dinput['y'] = ymax + info_geotiff[5]/2.0 + np.arange(ysize)*info_geotiff[5]
#-- read full image with GDAL
dinput['data'] = ds.ReadAsArray()
#-- set default time to zero for each band
dinput.setdefault('time', np.zeros((bsize)))
#-- check if image has fill values
if ds.GetRasterBand(1).GetNoDataValue():
#-- convert to masked array if fill values
dinput['data'] = np.ma.asarray(dinput['data'])
#-- mask invalid values
dinput['data'].fill_value = ds.GetRasterBand(1).GetNoDataValue()
#-- create mask array for bad values
dinput['data'].mask = (dinput['data'].data == dinput['data'].fill_value)
#-- set attribute for fill value
dinput['attributes']['data']['_FillValue'] = dinput['data'].fill_value
#-- close the dataset
ds = None
#-- return the spatial variables
return dinput
def to_ascii(output, attributes, filename, delimiter=',',
columns=['time','lat','lon','tide'], header=False, verbose=False):
"""
Write data to an ascii file
Inputs:
python dictionary of output data
python dictionary of output attributes
full path of output ascii file
Options:
delimiter for output spatial file
order of columns for output spatial file
create a YAML header with data attributes
verbose output
"""
filename = os.path.expanduser(filename)
print(filename) if verbose else None
#-- open the output file
fid = open(filename, 'w')
#-- create a column stack arranging data in column order
data_stack = np.c_[[output[col] for col in columns]]
ncol,nrow = np.shape(data_stack)
#-- print YAML header to top of file
if header:
fid.write('{0}:\n'.format('header'))
#-- data dimensions
fid.write('\n {0}:\n'.format('dimensions'))
fid.write(' {0:22}: {1:d}\n'.format('time',nrow))
#-- non-standard attributes
fid.write(' {0}:\n'.format('non-standard_attributes'))
#-- data format
fid.write(' {0:22}: ({1:d}f0.8)\n'.format('formatting_string',ncol))
fid.write('\n')
#-- global attributes
fid.write('\n {0}:\n'.format('global_attributes'))
today = datetime.datetime.now().isoformat()
fid.write(' {0:22}: {1}\n'.format('date_created', today))
# print variable descriptions to YAML header
fid.write('\n {0}:\n'.format('variables'))
#-- print YAML header with variable attributes
for i,v in enumerate(columns):
fid.write(' {0:22}:\n'.format(v))
for atn,atv in attributes[v].items():
fid.write(' {0:20}: {1}\n'.format(atn,atv))
#-- add precision and column attributes for ascii yaml header
fid.write(' {0:20}: double_precision\n'.format('precision'))
fid.write(' {0:20}: column {1:d}\n'.format('comments',i+1))
#-- end of header
fid.write('\n\n# End of YAML header\n')
#-- write to file for each data point
for line in range(nrow):
line_contents = ['{0:0.8f}'.format(d) for d in data_stack[:,line]]
print(delimiter.join(line_contents), file=fid)
#-- close the output file
fid.close()
def to_netCDF4(output, attributes, filename, verbose=False):
"""
Write data to a netCDF4 file
Inputs:
python dictionary of output data
python dictionary of output attributes
full path of output netCDF4 file
Options: verbose output
"""
#-- opening NetCDF file for writing
fileID = netCDF4.Dataset(os.path.expanduser(filename),'w',format="NETCDF4")
#-- Defining the NetCDF dimensions
fileID.createDimension('time', len(np.atleast_1d(output['time'])))
#-- defining the NetCDF variables
nc = {}
for key,val in output.items():
if '_FillValue' in attributes[key].keys():
nc[key] = fileID.createVariable(key, val.dtype, ('time',),
fill_value=attributes[key]['_FillValue'], zlib=True)
else:
nc[key] = fileID.createVariable(key, val.dtype, ('time',))
#-- filling NetCDF variables
nc[key][:] = val
#-- Defining attributes for variable
for att_name,att_val in attributes[key].items():
setattr(nc[key],att_name,att_val)
#-- add attribute for date created
fileID.date_created = datetime.datetime.now().isoformat()
#-- Output NetCDF structure information
if verbose:
print(filename)
print(list(fileID.variables.keys()))
#-- Closing the NetCDF file
fileID.close()
def to_HDF5(output, attributes, filename, verbose=False):
"""
Write data to a HDF5 file
Inputs:
python dictionary of output data
python dictionary of output attributes
full path of output HDF5 file
Options: verbose output
"""
#-- opening HDF5 file for writing
fileID = h5py.File(filename, 'w')
#-- Defining the HDF5 dataset variables
h5 = {}
for key,val in output.items():
if '_FillValue' in attributes[key].keys():
h5[key] = fileID.create_dataset(key, val.shape, data=val,
dtype=val.dtype, fillvalue=attributes[key]['_FillValue'],
compression='gzip')
else:
h5[key] = fileID.create_dataset(key, val.shape, data=val,
dtype=val.dtype, compression='gzip')
#-- Defining attributes for variable
for att_name,att_val in attributes[key].items():
h5[key].attrs[att_name] = att_val
#-- add attribute for date created
fileID.attrs['date_created'] = datetime.datetime.now().isoformat()
#-- Output HDF5 structure information
if verbose:
print(filename)
print(list(fileID.keys()))
#-- Closing the HDF5 file
fileID.close()
def to_geotiff(output, attributes, filename, verbose=False,
varname='data', dtype=osgeo.gdal.GDT_Float64):
"""
Write data to a geotiff file
Inputs:
python dictionary of output data
python dictionary of output attributes
full path of output geotiff file
Options:
verbose output
output variable name
GDAL data type
"""
#-- verify grid dimensions to be iterable
output = expand_dims(output, varname=varname)
#-- grid shape
ny,nx,nband = np.shape(output[varname])
#-- output as geotiff
driver = osgeo.gdal.GetDriverByName("GTiff")
#-- set up the dataset with compression options
ds = driver.Create(filename,nx,ny,nband,dtype,['COMPRESS=LZW'])
#-- top left x, w-e pixel resolution, rotation
#-- top left y, rotation, n-s pixel resolution
xmin,xmax,ymin,ymax = attributes['extent']
dx,dy = attributes['spacing']
ds.SetGeoTransform([xmin,dx,0,ymax,0,dy])
#-- set the spatial projection reference information
srs = osgeo.osr.SpatialReference()
srs.ImportFromWkt(attributes['wkt'])
#-- export
ds.SetProjection( srs.ExportToWkt() )
#-- for each band
for band in range(nband):
#-- set fill value for band
if '_FillValue' in attributes[varname].keys():
fill_value = attributes[varname]['_FillValue']
ds.GetRasterBand(band+1).SetNoDataValue(fill_value)
#-- write band to geotiff array
ds.GetRasterBand(band+1).WriteArray(output[varname][:,:,band])
#-- print filename if verbose
print(filename) if verbose else None
#-- close dataset
ds.FlushCache()
def expand_dims(obj, varname='data'):
"""
Add a singleton dimension to a spatial dictionary if non-existent
Options:
variable name to modify
"""
#-- change time dimensions to be iterableinformation
try:
obj['time'] = np.atleast_1d(obj['time'])
except:
pass
#-- output spatial with a third dimension
if isinstance(varname,list):
for v in varname:
obj[v] = np.atleast_3d(obj[v])
elif isinstance(varname,str):
obj[varname] = np.atleast_3d(obj[varname])
#-- return reformed spatial dictionary
return obj
def convert_ellipsoid(phi1, h1, a1, f1, a2, f2, eps=1e-12, itmax=10):
"""
Convert latitudes and heights to a different ellipsoid using Newton-Raphson
Inputs:
phi1: latitude of input ellipsoid in degrees
h1: height above input ellipsoid in meters
a1: semi-major axis of input ellipsoid
f1: flattening of input ellipsoid
a2: semi-major axis of output ellipsoid
f2: flattening of output ellipsoid
Options:
eps: tolerance to prevent division by small numbers
and to determine convergence
itmax: maximum number of iterations to use in Newton-Raphson
Returns:
phi2: latitude of output ellipsoid in degrees
h2: height above output ellipsoid in meters
References:
Astronomical Algorithms, <NAME>, 1991, Willmann-Bell, Inc.
pp. 77-82
"""
if (len(phi1) != len(h1)):
raise ValueError('phi and h have incompatable dimensions')
#-- semiminor axis of input and output ellipsoid
b1 = (1.0 - f1)*a1
b2 = (1.0 - f2)*a2
#-- initialize output arrays
npts = len(phi1)
phi2 = np.zeros((npts))
h2 = np.zeros((npts))
#-- for each point
for N in range(npts):
#-- force phi1 into range -90 <= phi1 <= 90
if (np.abs(phi1[N]) > 90.0):
phi1[N] = np.sign(phi1[N])*90.0
#-- handle special case near the equator
#-- phi2 = phi1 (latitudes congruent)
#-- h2 = h1 + a1 - a2
if (np.abs(phi1[N]) < eps):
phi2[N] = np.copy(phi1[N])
h2[N] = h1[N] + a1 - a2
#-- handle special case near the poles
#-- phi2 = phi1 (latitudes congruent)
#-- h2 = h1 + b1 - b2
elif ((90.0 - np.abs(phi1[N])) < eps):
phi2[N] = np.copy(phi1[N])
h2[N] = h1[N] + b1 - b2
#-- handle case if latitude is within 45 degrees of equator
elif (np.abs(phi1[N]) <= 45):
#-- convert phi1 to radians
phi1r = phi1[N] * np.pi/180.0
sinphi1 = np.sin(phi1r)
cosphi1 = np.cos(phi1r)
#-- prevent division by very small numbers
cosphi1 = np.copy(eps) if (cosphi1 < eps) else cosphi1
#-- calculate tangent
tanphi1 = sinphi1 / cosphi1
u1 = np.arctan(b1 / a1 * tanphi1)
hpr1sin = b1 * np.sin(u1) + h1[N] * sinphi1
hpr1cos = a1 * np.cos(u1) + h1[N] * cosphi1
#-- set initial value for u2
u2 = np.copy(u1)
#-- setup constants
k0 = b2 * b2 - a2 * a2
k1 = a2 * hpr1cos
k2 = b2 * hpr1sin
#-- perform newton-raphson iteration to solve for u2
#-- cos(u2) will not be close to zero since abs(phi1) <= 45
for i in range(0, itmax+1):
cosu2 = np.cos(u2)
fu2 = k0 * np.sin(u2) + k1 * np.tan(u2) - k2
fu2p = k0 * cosu2 + k1 / (cosu2 * cosu2)
if (np.abs(fu2p) < eps):
i = np.copy(itmax)
else:
delta = fu2 / fu2p
u2 -= delta
if (np.abs(delta) < eps):
i = np.copy(itmax)
#-- convert latitude to degrees and verify values between +/- 90
phi2r = np.arctan(a2 / b2 * np.tan(u2))
phi2[N] = phi2r*180.0/np.pi
if (np.abs(phi2[N]) > 90.0):
phi2[N] = np.sign(phi2[N])*90.0
#-- calculate height
h2[N] = (hpr1cos - a2 * np.cos(u2)) / np.cos(phi2r)
#-- handle final case where latitudes are between 45 degrees and pole
else:
#-- convert phi1 to radians
phi1r = phi1[N] * np.pi/180.0
sinphi1 = np.sin(phi1r)
cosphi1 = np.cos(phi1r)
#-- prevent division by very small numbers
cosphi1 = np.copy(eps) if (cosphi1 < eps) else cosphi1
#-- calculate tangent
tanphi1 = sinphi1 / cosphi1
u1 = np.arctan(b1 / a1 * tanphi1)
hpr1sin = b1 * np.sin(u1) + h1[N] * sinphi1
hpr1cos = a1 * np.cos(u1) + h1[N] * cosphi1
#-- set initial value for u2
u2 = np.copy(u1)
#-- setup constants
k0 = a2 * a2 - b2 * b2
k1 = b2 * hpr1sin
k2 = a2 * hpr1cos
#-- perform newton-raphson iteration to solve for u2
#-- sin(u2) will not be close to zero since abs(phi1) > 45
for i in range(0, itmax+1):
sinu2 = np.sin(u2)
fu2 = k0 * np.cos(u2) + k1 /
|
np.tan(u2)
|
numpy.tan
|
#!/usr/bin/env python
# -*- encoding:utf-8 -*-
#
# Author: ZhaoFei - <EMAIL>
# Create: 2017-09-19 14:26:14
# Last Modified: 2017-09-19 14:26:14
# Filename: read_tiffile.py
# Description: ---
# Copyright (c) 2016 Chengdu Lanjing Data&Information Co.
from collections import defaultdict
import csv
import sys
from shapely.geometry import MultiPolygon, Polygon
import shapely.wkt
import shapely.affinity
import numpy as np
import tifffile as tiff
# %matplotlib inline
import matplotlib.pyplot as plt
from matplotlib import cm
DATA_DIR = "../quickbird-data"
FILE_2015 = DATA_DIR+'/preliminary/quickbird2015.tif'
FILE_2017 = DATA_DIR+'/preliminary/quickbird2017.tif'
FILE_cadastral2015 = DATA_DIR+'/20170907_hint/cadastral2015.tif'
FILE_tinysample = DATA_DIR+'/20170907_hint/tinysample.tif'
im_2015 = tiff.imread(FILE_2015).transpose([1, 2, 0])
im_2017 = tiff.imread(FILE_2017).transpose([1, 2, 0])
im_tiny = tiff.imread(FILE_tinysample)
im_cada = tiff.imread(FILE_cadastral2015)
im_2015.shape
im_tiny.shape
im_cada.shape
def scale_percentile(matrix):
w, h, d = matrix.shape
matrix = np.reshape(matrix, [w * h, d]).astype(np.float64)
# Get 2nd and 98th percentile
mins = np.percentile(matrix, 1, axis=0)
maxs = np.percentile(matrix, 99, axis=0) - mins
matrix = (matrix - mins[None, :]) / maxs[None, :]
matrix = np.reshape(matrix, [w, h, d])
matrix = matrix.clip(0, 1)
return matrix
fig, axes = plt.subplots(ncols=2, nrows=1, figsize=(16, 6))
p1 = plt.subplot(121)
i1 = p1.imshow(scale_percentile(im_2015[100:1000, 100:1000, :3]))
plt.colorbar(i1)
p2 = plt.subplot(122)
i2 = p2.imshow(im_2015[100:1000, 100:1000, 3])
plt.colorbar(i2)
plt.show()
# ------------------------------
fig, axes = plt.subplots(ncols=2, nrows=2, figsize=(14, 10))
p1 = plt.subplot(221)
i1 = p1.imshow(im_2015[100:1000, 100:1000, 0])
plt.colorbar(i1)
p2 = plt.subplot(222)
i2 = p2.imshow(im_2015[100:1000, 100:1000, 1])
plt.colorbar(i2)
p3 = plt.subplot(223)
i3 = p3.imshow(im_2015[100:1000, 100:1000, 2])
plt.colorbar(i3)
p4 = plt.subplot(224)
i4 = p4.imshow(im_2015[100:1000, 100:1000, 3])
plt.colorbar(i4)
plt.show()
# ------------------------------
x_start = 500
x_end = 1000
y_start = 4800
y_end = 5300
plt.subplots(ncols=3, nrows=2, figsize=(16, 8))
mt = np.ma.array(np.ones((x_end-x_start, y_end-y_start)),
mask=((im_tiny[x_start:x_end, y_start:y_end, 0]/np.max(im_tiny)+im_cada[x_start:x_end, y_start:y_end])==0))
p151 = plt.subplot(231)
i151 = p151.imshow(im_2015[x_start:x_end, y_start:y_end, 3])
plt.colorbar(i151)
p152 = plt.subplot(233)
i152 = p152.imshow(im_tiny[x_start:x_end, y_start:y_end, 0])
plt.colorbar(i152)
p153 = plt.subplot(232)
i153 = p153.imshow(im_2015[x_start:x_end, y_start:y_end, 3])
p153.imshow(mt, cmap=cm.bwr, alpha=0.3, vmin=0, vmax=1)
plt.colorbar(i153)
p171 = plt.subplot(234)
i171 = p171.imshow(im_2017[x_start:x_end, y_start:y_end, 3])
plt.colorbar(i171)
p172 = plt.subplot(236)
i172 = p172.imshow(im_cada[x_start:x_end, y_start:y_end])
plt.colorbar(i172)
p173 = plt.subplot(235)
i173 = p173.imshow(im_2017[x_start:x_end, y_start:y_end, 3])
p173.imshow(mt, cmap=cm.bwr, alpha=0.3, vmin=0, vmax=1)
plt.colorbar(i173)
plt.show()
# ------------------------------
result_temp =
|
np.random.randint(2, size=im_cada.shape, dtype=np.uint8)
|
numpy.random.randint
|
#!/usr/bin/env python
__doc__ = \
"""
Calculate Root-mean-square deviation (RMSD) between structure A and B, in XYZ
or PDB format, using transformation and rotation.
For more information, usage, example and citation read more at
https://github.com/charnley/rmsd
"""
__version__ = '1.3.2'
import copy
import re
import numpy as np
from scipy.optimize import linear_sum_assignment
from scipy.spatial.distance import cdist
AXIS_SWAPS = np.array([
[0, 1, 2],
[0, 2, 1],
[1, 0, 2],
[1, 2, 0],
[2, 1, 0],
[2, 0, 1]])
AXIS_REFLECTIONS = np.array([
[1, 1, 1],
[-1, 1, 1],
[1, -1, 1],
[1, 1, -1],
[-1, -1, 1],
[-1, 1, -1],
[1, -1, -1],
[-1, -1, -1]])
def rmsd(V, W):
"""
Calculate Root-mean-square deviation from two sets of vectors V and W.
Parameters
----------
V : array
(N,D) matrix, where N is points and D is dimension.
W : array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
rmsd : float
Root-mean-square deviation between the two vectors
"""
D = len(V[0])
N = len(V)
result = 0.0
for v, w in zip(V, W):
result += sum([(v[i] - w[i])**2.0 for i in range(D)])
return np.sqrt(result/N)
def kabsch_rmsd(P, Q, translate=False):
"""
Rotate matrix P unto Q using Kabsch algorithm and calculate the RMSD.
Parameters
----------
P : array
(N,D) matrix, where N is points and D is dimension.
Q : array
(N,D) matrix, where N is points and D is dimension.
translate : bool
Use centroids to translate vector P and Q unto each other.
Returns
-------
rmsd : float
root-mean squared deviation
"""
if translate:
Q = Q - centroid(Q)
P = P - centroid(P)
P = kabsch_rotate(P, Q)
return rmsd(P, Q)
def kabsch_rotate(P, Q):
"""
Rotate matrix P unto matrix Q using Kabsch algorithm.
Parameters
----------
P : array
(N,D) matrix, where N is points and D is dimension.
Q : array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
P : array
(N,D) matrix, where N is points and D is dimension,
rotated
"""
U = kabsch(P, Q)
# Rotate P
P = np.dot(P, U)
return P
def kabsch(P, Q):
"""
Using the Kabsch algorithm with two sets of paired point P and Q, centered
around the centroid. Each vector set is represented as an NxD
matrix, where D is the the dimension of the space.
The algorithm works in three steps:
- a centroid translation of P and Q (assumed done before this function
call)
- the computation of a covariance matrix C
- computation of the optimal rotation matrix U
For more info see http://en.wikipedia.org/wiki/Kabsch_algorithm
Parameters
----------
P : array
(N,D) matrix, where N is points and D is dimension.
Q : array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
U : matrix
Rotation matrix (D,D)
"""
# Computation of the covariance matrix
C = np.dot(np.transpose(P), Q)
# Computation of the optimal rotation matrix
# This can be done using singular value decomposition (SVD)
# Getting the sign of the det(V)*(W) to decide
# whether we need to correct our rotation matrix to ensure a
# right-handed coordinate system.
# And finally calculating the optimal rotation matrix U
# see http://en.wikipedia.org/wiki/Kabsch_algorithm
V, S, W = np.linalg.svd(C)
d = (np.linalg.det(V) * np.linalg.det(W)) < 0.0
if d:
S[-1] = -S[-1]
V[:, -1] = -V[:, -1]
# Create Rotation matrix U
U = np.dot(V, W)
return U
def quaternion_rmsd(P, Q):
"""
Rotate matrix P unto Q and calculate the RMSD
based on doi:10.1016/1049-9660(91)90036-O
Parameters
----------
P : array
(N,D) matrix, where N is points and D is dimension.
Q : array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
rmsd : float
"""
rot = quaternion_rotate(P, Q)
P = np.dot(P, rot)
return rmsd(P, Q)
def quaternion_transform(r):
"""
Get optimal rotation
note: translation will be zero when the centroids of each molecule are the
same
"""
Wt_r = makeW(*r).T
Q_r = makeQ(*r)
rot = Wt_r.dot(Q_r)[:3, :3]
return rot
def makeW(r1, r2, r3, r4=0):
"""
matrix involved in quaternion rotation
"""
W = np.asarray([
[r4, r3, -r2, r1],
[-r3, r4, r1, r2],
[r2, -r1, r4, r3],
[-r1, -r2, -r3, r4]])
return W
def makeQ(r1, r2, r3, r4=0):
"""
matrix involved in quaternion rotation
"""
Q = np.asarray([
[r4, -r3, r2, r1],
[r3, r4, -r1, r2],
[-r2, r1, r4, r3],
[-r1, -r2, -r3, r4]])
return Q
def quaternion_rotate(X, Y):
"""
Calculate the rotation
Parameters
----------
X : array
(N,D) matrix, where N is points and D is dimension.
Y: array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
rot : matrix
Rotation matrix (D,D)
"""
N = X.shape[0]
W = np.asarray([makeW(*Y[k]) for k in range(N)])
Q = np.asarray([makeQ(*X[k]) for k in range(N)])
Qt_dot_W = np.asarray([np.dot(Q[k].T, W[k]) for k in range(N)])
W_minus_Q = np.asarray([W[k] - Q[k] for k in range(N)])
A = np.sum(Qt_dot_W, axis=0)
eigen = np.linalg.eigh(A)
r = eigen[1][:, eigen[0].argmax()]
rot = quaternion_transform(r)
return rot
def centroid(X):
"""
Centroid is the mean position of all the points in all of the coordinate
directions, from a vectorset X.
https://en.wikipedia.org/wiki/Centroid
C = sum(X)/len(X)
Parameters
----------
X : array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
C : float
centroid
"""
C = X.mean(axis=0)
return C
def reorder_distance(p_atoms, q_atoms, p_coord, q_coord):
"""
Re-orders the input atom list and xyz coordinates by atom type and then by
distance of each atom from the centroid.
Parameters
----------
atoms : array
(N,1) matrix, where N is points holding the atoms' names
coord : array
(N,D) matrix, where N is points and D is dimension
Returns
-------
atoms_reordered : array
(N,1) matrix, where N is points holding the ordered atoms' names
coords_reordered : array
(N,D) matrix, where N is points and D is dimension (rows re-ordered)
"""
# Find unique atoms
unique_atoms = np.unique(p_atoms)
# generate full view from q shape to fill in atom view on the fly
view_reorder = np.zeros(q_atoms.shape, dtype=int)
for atom in unique_atoms:
p_atom_idx, = np.where(p_atoms == atom)
q_atom_idx, = np.where(q_atoms == atom)
A_coord = p_coord[p_atom_idx]
B_coord = q_coord[q_atom_idx]
# Calculate distance from each atom to centroid
A_norms = np.linalg.norm(A_coord, axis=1)
B_norms = np.linalg.norm(B_coord, axis=1)
reorder_indices_A =
|
np.argsort(A_norms)
|
numpy.argsort
|
# -*- coding: utf-8 -*-
# Copyright 2021 <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lorenz attractor code modified from:
# https://matplotlib.org/stable/gallery/mplot3d/lorenz_attractor.html
import numpy as np
from torch.utils.data import Dataset
import h5py
def lorenz(x, y, z, s=10, r=28, b=2.667):
"""
Given:
x, y, z: a point of interest in three dimensional space
s, r, b: parameters defining the lorenz attractor
Returns:
x_dot, y_dot, z_dot: values of the lorenz attractor's partial
derivatives at the point x, y, z
"""
x_dot = s * (y - x)
y_dot = r * x - y - x * z
z_dot = x * y - b * z
return x_dot, y_dot, z_dot
class Lorenz(Dataset):
def __init__(self, window_size, dt=0.01, num_steps=200000, s=10, r=28, b=2.667):
super().__init__()
dt = 0.01
num_steps = 200000
# Need one more for the initial values
xs = np.empty(num_steps + 1)
ys =
|
np.empty(num_steps + 1)
|
numpy.empty
|
import argparse
import logging
import time
import threading
import uuid
import cv2
import numpy as np
import torch
import models
from models import dynamic_channel
from models import headpose
n_style_to_change = 12
LOG = logging.getLogger(__name__)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--generator')
parser.add_argument('--encoder')
parser.add_argument('--boundary')
parser.add_argument('--image')
parser.add_argument('--head-pose-model-path')
parser.add_argument('--scale', type=float, default=0.0)
parser.add_argument('--interactive', action='store_true')
parser.add_argument('--show', action='store_true')
parser.add_argument('--output')
return parser.parse_args()
def to_img(img):
return (img.transpose([1, 2, 0]) * 127.5 + 127.5).clip(0, 255).astype(np.uint8)
def show(img, direction_idx, direction_i):
small = cv2.resize(img, (img.shape[1] // 2, img.shape[0] // 2), interpolation=cv2.INTER_AREA)
bar = np.zeros([small.shape[0], 450, 3], dtype=np.uint8)
font = cv2.FONT_HERSHEY_SIMPLEX
rows = 20
for k, v in direction_idx.items():
color = (200, 200, 200) if k != direction_i else (0, 250, 0)
y = 20 + (k % rows) * 25
x = 20 + 180 * (k // rows)
cv2.putText(
bar,
v,
(x, y),
font,
0.6,
color, thickness=1, lineType=cv2.LINE_AA
)
pic = np.hstack([small, bar])
cv2.imshow('Face', pic[:, :, ::-1])
class CachedVector:
def __init__(self, vector, time):
self.vector = vector
self.time = time
class FaceGen:
def __init__(self, config_name='anycost-ffhq-config-f', gen_path=None, enc_path=None,
bound_path=None, head_pose_driver=None, head_pose_model_path=None):
device_str = 'cuda' if torch.cuda.is_available() else 'cpu'
device = torch.device(device_str)
self.device = device
self.gen = models.get_pretrained('generator', config=config_name, path=gen_path).to(device)
self.encoder = models.get_pretrained('encoder', config=config_name, path=enc_path).to(device)
# mean_latent = gen.mean_style(10000)
self.boundaries = models.get_pretrained('boundary', config_name, path=bound_path)
self.keep_cache_sec = 3600
self.cache = {}
self.lock = threading.Lock()
self.resolutions = [128, 256, 512, 1024]
self.channel_ratio = [0.25, 0.5, 0.75, 1]
self.head_pose_driver = head_pose_driver
self.head_pose_threshold = [19, 20, 20]
# self.head_pose_threshold = [37, 35, 25]
self.head_pose_axis_threshold = None
self.head_pose = None
if self.head_pose_driver is not None or head_pose_model_path is not None:
self.head_pose = headpose.HeadPoseFilter(
head_pose_driver=head_pose_driver,
head_pose_model_path=head_pose_model_path,
head_pose_thresholds=self.head_pose_threshold
)
'''
possible keys:
['00_5_o_Clock_Shadow', '01_Arched_Eyebrows', '02_Attractive', '03_Bags_Under_Eyes', '04_Bald', '05_Bangs',
'06_Big_Lips', '07_Big_Nose', '08_Black_Hair', '09_Blond_Hair', '10_Blurry', '11_Brown_Hair', '12_Bushy_Eyebrows',
'13_Chubby', '14_Double_Chin', '15_Eyeglasses', '16_Goatee', '17_Gray_Hair', '18_Heavy_Makeup', '19_High_Cheekbones',
'20_Male', '21_Mouth_Slightly_Open', '22_Mustache', '23_Narrow_Eyes', '24_No_Beard', '25_Oval_Face', '26_Pale_Skin',
'27_Pointy_Nose', '28_Receding_Hairline', '29_Rosy_Cheeks', '30_Sideburns', '31_Smiling', '32_Straight_Hair',
'33_Wavy_Hair', '34_Wearing_Earrings', '35_Wearing_Hat', '36_Wearing_Lipstick', '37_Wearing_Necklace',
'38_Wearing_Necktie', '39_Young']
'''
direction_map = {
'5_o_clock_shadow': '00_5_o_Clock_Shadow',
'arched_eyebrows': '01_Arched_Eyebrows',
'attractive': '02_Attractive',
'bags_under_eyes': '03_Bags_Under_Eyes',
'bald': '04_Bald',
'bangs': '05_Bangs',
'big_lips': '06_Big_Lips',
'big_nose': '07_Big_Nose',
'black_hair': '08_Black_Hair',
'blonde_hair': '09_Blond_Hair',
'blurry': '10_Blurry',
'brown_hair': '11_Brown_Hair',
'bushy_eyebrows': '12_Bushy_Eyebrows',
'chubby': '13_Chubby',
'double_chin': '14_Double_Chin',
'eyeglasses': '15_Eyeglasses',
'goatee': '16_Goatee',
'gray_hair': '17_Gray_Hair',
'heavy_makeup': '18_Heavy_Makeup',
'high_cheekbones': '19_High_Cheekbones',
'male': '20_Male',
'mouth_slightly_open': '21_Mouth_Slightly_Open',
'mustache': '22_Mustache',
'narrow_eyes': '23_Narrow_Eyes',
'no_beard': '24_No_Beard',
'oval_face': '25_Oval_Face',
'pale_skin': '26_Pale_Skin',
'pointy_nose': '27_Pointy_Nose',
'receding_hairline': '28_Receding_Hairline',
'rosy_cheeks': '29_Rosy_Cheeks',
'sideburns': '30_Sideburns',
'smiling': '31_Smiling',
'straight_hair': '32_Straight_Hair',
'wavy_hair': '33_Wavy_Hair',
'wearing_earrings': '34_Wearing_Earrings',
'wearing_hat': '35_Wearing_Hat',
'wearing_lipstick': '36_Wearing_Lipstick',
'wearing_necklace': '37_Wearing_Necklace',
'wearing_necktie': '38_Wearing_Necktie',
'young': '39_Young'
}
self.direction_idx = {k: v for k, v in zip(range(len(direction_map)), direction_map)}
self.direction_dict = dict()
self._direction_values = {k: 0.0 for k in direction_map}
for k, v in direction_map.items():
self.direction_dict[k] = self.boundaries[v].view(1, 1, -1).to(device)
self.input_kwargs = {
'noise': None,
'randomize_noise': False,
'input_is_style': True
}
def _is_appropriate_head_pose(self, image):
poses = self.head_pose.head_poses_for_images(
|
np.stack([image])
|
numpy.stack
|
"""Contains all available distributions. Utilize the functions in this module
by using the `DISTRIBUTION` keyword in your configuration file. As an example:
```
seeing:
DISTRIBUTION:
NAME: uniform # function name to call
PARAMETERS:
minimum: 0 # value to set for function argument
maximim: 6 # value to set for function argument
```
"""
import numpy as np
import random
from scipy.stats import poisson
## Single parameter sampling distributions
def uniform(minimum, maximum, bands=''):
"""
Return a samle from a uniform probability distribution
on the interval [`minimum`, `maximum`]
Args:
minimum (float or int): The minimum of the interval to sample
maximum (float or int): The maximum of the interval to sample
Returns:
A sample of the specified uniform distribution for each band in the simulation
"""
draw = random.uniform(minimum, maximum)
return [draw] * len(bands.split(','))
def uniform_int(minimum, maximum, bands=''):
"""
Return a samle from a uniform probability distribution
on the interval [`minimum`, `maximum`] rounded to the nearest integer
Args:
minimum (int): The minimum of the interval to sample
maximum (int): The maximum of the interval to sample
Returns:
A rounded sample of the specified uniform distribution for each band in the simulation
"""
draw = round(random.uniform(minimum, maximum))
return [draw] * len(bands.split(','))
def normal(mean, std, bands=''):
"""
Return a samle from a normal probability distribution
with specifeid mean and standard deviation
Args:
mean (float or int): The mean of the normal distribution to sample
std (float or int): The standard deviation of the normal distribution to sample
Returns:
A sample of the specified normal distribution for each band in the simulation
"""
draw = np.random.normal(loc=mean, scale=std)
return [draw] * len(bands.split(','))
def lognormal(mean, sigma, bands=''):
"""
Return a samle from a lognormal probability distribution
with specifeid mean and standard deviation
Args:
mean (float or int): The mean of the lognormal distribution to sample
sigma (float or int): The standard deviation of the lognormal distribution to sample
Returns:
A sample of the specified lognormal distribution for each band in the simulation
"""
draw =
|
np.random.lognormal(mean=mean, sigma=sigma)
|
numpy.random.lognormal
|
"""
Test the eigenvalue solver
"""
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg, interpolate, sparse
from iwaves.utils import imodes as iwaves
from time import time
import pdb
def iwave_modes(N2, dz, k=None):
"""
Calculates the eigenvalues and eigenfunctions to the internal wave eigenvalue problem:
$$
\left[ \frac{d^2}{dz^2} - \frac{1}{c_0} \bar{\rho}_z \right] \phi = 0
$$
with boundary conditions
"""
nz = N2.shape[0] # Remove the surface values
dz2 = 1/dz**2
# Construct the LHS matrix, A
A = np.diag(-1*dz2*np.ones((nz-1)),-1) + \
np.diag(2*dz2*np.ones((nz,)),0) + \
np.diag(-1*dz2*np.ones((nz-1)),1)
# BC's
A[0,0] = -1.
A[0,1] = 0.
A[-1,-1] = -1.
A[-1,-2] = 0.
# Construct the RHS matrix i.e. put N^2 along diagonals
B = np.diag(N2,0)
# Solve... (use scipy not numpy)
w, phi = linalg.eig(A, b=B, check_finite=False)
#w, phi = linalg.eigh(A, b=B, check_finite=False)
c = 1. / np.power(w, 0.5) # since term is ... + N^2/c^2 \phi
# Sort by the eigenvalues
idx = np.argsort(c)[::-1] # descending order
# Calculate the actual phase speed
cn = np.real( c[idx] )
return phi[:,idx], cn
def iwave_modes_sparse(N2, dz, k=None, Asparse=None, return_A=False, v0=None):
"""
Calculates the eigenvalues and eigenfunctions to the internal wave eigenvalue problem:
$$
\left[ \frac{d^2}{dz^2} - \frac{1}{c_0} \bar{\rho}_z \right] \phi = 0
$$
with boundary conditions
"""
nz = N2.shape[0] # Remove the surface values
if k is None:
k = nz-2
if Asparse is None:
dz2 = 1/dz**2
# Construct the LHS matrix, A
A = np.vstack([-1*dz2*np.ones((nz,)),\
2*dz2*np.ones((nz,)),\
-1*dz2*np.ones((nz,)),\
])
# BC's
eps = 1e-10
#A[0,0] = -1.
#A[0,1] = 0.
#A[-1,-1] = -1.
#A[-1,-2] = 0.
A[1,0] = -1.
A[2,0] = 0.
A[1,-1] = -1.
A[0,-1] = 0.
Asparse = sparse.spdiags(A,[-1,0,1],nz,nz, format='csc')
# Construct the RHS matrix i.e. put N^2 along diagonals
#B = np.diag(N2,0)
B = sparse.spdiags(N2,[0],nz,nz, format='csc')
Binv = sparse.spdiags(1/N2,[0],nz,nz, format='csc')
if v0 is not None:
w0 = 1/v0[0]**2.
else:
w0=None
#w, phi = sparse.linalg.eigsh(Asparse, M=B, Minv=Binv, which='SM', k=k, v0=None)
w, phi = sparse.linalg.eigsh(Asparse, M=B, sigma=1., k=k)
#w, phi = sparse.linalg.eigsh(Asparse, M=B, which='LM', k=k)
# Solve... (use scipy not numpy)
#w, phi = linalg.eig(A, b=B)
c = 1. / np.power(w, 0.5) # since term is ... + N^2/c^2 \phi
# Sort by the eigenvalues
idx = np.argsort(c)[::-1] # descending order
# Calculate the actual phase speed
cn = np.real( c[idx] )
if return_A:
return np.real(phi), np.real(cn), Asparse
else:
return np.real(phi), np.real(cn)
def iwave_modes_tri(N2, dz, k=None):
"""
!!! DOES NOT WORK!!!
Calculates the eigenvalues and eigenfunctions to the internal wave eigenvalue problem:
$$
\left[ \frac{d^2}{dz^2} - \frac{1}{c_0} \bar{\rho}_z \right] \phi = 0
$$
with boundary conditions
"""
nz = N2.shape[0] # Remove the surface values
if k is None:
k = nz-2
dz2 = 1/dz**2
# Construct the LHS matrix, A
Ao = -1*dz2*np.ones((nz-1,))
Am = 2*dz2*np.ones((nz,))
# BC's
Am[0] = -1.
Ao[0] = 0.
Am[-1] = -1.
Ao[-1] = 0.
# Now convert from a generalized eigenvalue problem to
# A.v = lambda.B.v
# a standard problem
# A.v = lambda.v
# By multiply the LHS by inverse of B
# (B^-1.A).v = lambda.v
# B^-1 = 1/N2 since B is diagonal
Am /= N2
w, phi = linalg.eigh_tridiagonal(Am, Ao)
## Main diagonal
#dd = 2*dz2*np.ones((nz,))
#dd /= N2
#dd[0] = -1
#dd[-1] = -1
## Off diagonal
#ee = -1*dz2*np.ones((nz-1,))
#ee /= N2[0:-1]
#ee[0] = 0
#ee[-1] = 0
## Solve... (use scipy not numpy)
#w, phi = linalg.eigh_tridiagonal(dd, ee )
#####
c = 1. / np.power(w, 0.5) # since term is ... + N^2/c^2 \phi
# Sort by the eigenvalues
idx = np.argsort(c)[::-1] # descending order
## Calculate the actual phase speed
cn = np.real( c[idx] )
idxgood = ~np.isnan(cn)
phisort = phi[:,idx]
return np.real(phisort[:,idxgood]), np.real(cn[idxgood])
def iwave_modes_uneven(N2, z, k=None):
"""
Calculates the eigenvalues and eigenfunctions to the internal wave eigenvalue problem:
$$
\left[ \frac{d^2}{dz^2} - \frac{1}{c_0} \bar{\rho}_z \right] \phi = 0
$$
with boundary conditions
"""
nz = N2.shape[0]
if k is None:
k = nz-2
dz = np.zeros((nz,))
zm = np.zeros((nz,))
dzm = np.zeros((nz,))
dz[0:-1] = z[0:-1] - z[1:]
zm[0:-1] = z[0:-1] - 0.5*dz[0:-1]
dzm[1:-1] = zm[0:-2] - zm[1:-1]
dzm[0] = dzm[1]
dzm[-1] = dzm[-2]
# Solve as a matrix
#A = np.zeros((nz,nz))
#for i in range(1,nz-1):
# A[i,i] = 1/ (dz[i-1]*dzm[i]) + 1/(dz[i]*dzm[i])
# A[i,i-1] = -1/(dz[i-1]*dzm[i])
# A[i,i+1] = -1/(dz[i]*dzm[i])
# Solve as a banded matrix
A =
|
np.zeros((nz,3))
|
numpy.zeros
|
#!/usr/bin/env python
# Copyright 2014-2020 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: <NAME> <<EMAIL>>
#
import unittest
import numpy
from functools import reduce
from pyscf import lib
from pyscf import gto
from pyscf import symm
from pyscf.symm import geom
numpy.random.seed(12)
u = numpy.random.random((3,3))
u = numpy.linalg.svd(u)[0]
class KnownValues(unittest.TestCase):
def test_d5h(self):
atoms = ringhat(5, u)
atoms = atoms[5:]
gpname, orig, axes = geom.detect_symm(atoms)
self.assertEqual(gpname, 'D5h')
gpname, axes = geom.subgroup(gpname, axes)
atoms = geom.shift_atom(atoms, orig, axes)
self.assertEqual(gpname, 'C2v')
self.assertTrue(geom.check_given_symm('C2v', atoms))
self.assertEqual(geom.symm_identical_atoms(gpname, atoms),
[[0], [1, 4], [2, 3], [5, 6]])
atoms = ringhat(5, u)
atoms = atoms[5:]
atoms[1][0] = 'C1'
gpname, orig, axes = geom.detect_symm(atoms, {'C':'ccpvdz','C1':'sto3g','N':'631g'})
self.assertEqual(gpname, 'C2v')
gpname, axes = geom.subgroup(gpname, axes)
atoms = geom.shift_atom(atoms, orig, axes)
self.assertEqual(gpname, 'C2v')
self.assertTrue(geom.check_given_symm('C2v', atoms))
self.assertEqual(geom.symm_identical_atoms(gpname, atoms),
[[0,2],[1],[3,4],[5,6]])
def test_d6h(self):
atoms = ringhat(6, u)
atoms = atoms[6:]
gpname, orig, axes = geom.detect_symm(atoms)
self.assertEqual(gpname, 'D6h')
gpname, axes = geom.subgroup(gpname, axes)
atoms = geom.shift_atom(atoms, orig, axes)
self.assertEqual(geom.symm_identical_atoms(gpname, atoms),
[[0,3],[1,2,4,5],[6,7]])
self.assertTrue(geom.check_given_symm('D2h', atoms))
def test_d6h_1(self):
atoms = ringhat(6, u)
atoms = atoms[6:]
atoms[1][0] = 'C1'
atoms[2][0] = 'C1'
basis = {'C': 'sto3g', 'N':'sto3g', 'C1':'sto3g'}
gpname, orig, axes = geom.detect_symm(atoms, basis)
self.assertEqual(gpname, 'D6h')
gpname, axes = geom.subgroup(gpname, axes)
atoms = geom.shift_atom(atoms, orig, axes)
self.assertEqual(geom.symm_identical_atoms(gpname, atoms),
[[0,3],[1,2,4,5],[6,7]])
self.assertTrue(geom.check_given_symm('D2h', atoms, basis))
def test_c2v(self):
atoms = ringhat(6, u)
atoms = atoms[6:]
atoms[1][0] = 'C1'
atoms[2][0] = 'C1'
basis = {'C': 'sto3g', 'N':'sto3g', 'C1':'ccpvdz'}
gpname, orig, axes = geom.detect_symm(atoms, basis)
self.assertEqual(gpname, 'C2v')
gpname, axes = geom.subgroup(gpname, axes)
atoms = geom.shift_atom(atoms, orig, axes)
self.assertEqual(geom.symm_identical_atoms(gpname, atoms),
[[0, 3], [1, 2], [4, 5], [6, 7]])
self.assertTrue(geom.check_given_symm('C2', atoms, basis))
def test_s4(self):
atoms = [['C', ( 0.5, 0 , 1)],
['O', ( 0.4, 0.2 , 1)],
['C', ( -0.5, 0 , 1)],
['O', ( -0.4, -0.2 , 1)],
['C', ( 0 , 0.5 , -1)],
['O', ( -0.2, 0.4 , -1)],
['C', ( 0 , -0.5 , -1)],
['O', ( 0.2, -0.4 , -1)]]
gpname, orig, axes = geom.detect_symm(atoms)
self.assertEqual(gpname, 'S4')
gpname, axes = geom.subgroup(gpname, axes)
atoms = geom.shift_atom(atoms, orig, axes)
self.assertEqual(geom.symm_identical_atoms(gpname, atoms),
[[0, 2], [1, 3], [4, 6], [5, 7]])
self.assertTrue(geom.check_given_symm('C2', atoms))
def test_s6(self):
rotz = random_rotz()
atoms = ringhat(3, 1)[3:6] + ringhat(3, rotz)[:3]
rotz[2,2] = -1
rotz[:2,:2] = numpy.array(((.5, numpy.sqrt(3)/2),(-numpy.sqrt(3)/2, .5)))
r = numpy.dot([x[1] for x in atoms], rotz) - numpy.array((0,0,3.5))
atoms += list(zip([x[0] for x in atoms], r))
gpname, orig, axes = geom.detect_symm(atoms)
self.assertEqual(gpname, 'S6')
gpname, axes = geom.subgroup(gpname, axes)
self.assertEqual(gpname, 'C3')
def test_c5h(self):
atoms = ringhat(5, u)
gpname, orig, axes = geom.detect_symm(atoms)
self.assertEqual(gpname, 'C5h')
gpname, axes = geom.subgroup(gpname, axes)
atoms = geom.shift_atom(atoms, orig, axes)
self.assertEqual(gpname, 'Cs')
self.assertTrue(geom.check_given_symm('Cs', atoms))
self.assertEqual(geom.symm_identical_atoms(gpname, atoms),
[[0], [1], [2], [3], [4], [5], [6], [7], [8], [9], [10,11]])
def test_c5(self):
atoms = ringhat(5, u)[:-1]
gpname, orig, axes = geom.detect_symm(atoms)
self.assertEqual(gpname, 'C5')
gpname, axes = geom.subgroup(gpname, axes)
self.assertEqual(gpname, 'C1')
def test_c5v(self):
atoms = ringhat(5, u)[5:-1]
gpname, orig, axes = geom.detect_symm(atoms)
self.assertEqual(gpname, 'C5v')
gpname, axes = geom.subgroup(gpname, axes)
atoms = geom.shift_atom(atoms, orig, axes)
self.assertEqual(gpname, 'Cs')
self.assertTrue(geom.check_given_symm('Cs', atoms))
self.assertEqual(geom.symm_identical_atoms(gpname, atoms),
[[0, 4], [1, 3], [2], [5]])
def test_ih1(self):
coords = numpy.dot(make60(1.5, 1), u)
atoms = [['C', c] for c in coords]
gpname, orig, axes = geom.detect_symm(atoms)
self.assertEqual(gpname, 'Ih')
gpname, axes = geom.subgroup(gpname, axes)
atoms = geom.shift_atom(atoms, orig, axes)
self.assertEqual(gpname, 'Ci')
self.assertTrue(geom.check_given_symm('Ci', atoms))
self.assertEqual(geom.symm_identical_atoms(gpname, atoms),
[[0, 55], [1, 56], [2, 57], [3, 58], [4, 59],
[5, 30], [6, 31], [7, 32], [8, 33], [9, 34],
[10, 35], [11, 36], [12, 37], [13, 38], [14, 39],
[15, 40], [16, 41], [17, 42], [18, 43], [19, 44],
[20, 45], [21, 46], [22, 47], [23, 48], [24, 49],
[25, 50], [26, 51], [27, 52], [28, 53], [29, 54]])
def test_ih2(self):
coords1 = numpy.dot(make60(1.5, 3), u)
coords2 = numpy.dot(make12(1.1), u)
atoms = [['C', c] for c in coords1] + [['C', c] for c in coords2]
gpname, orig, axes = geom.detect_symm(atoms)
self.assertEqual(gpname, 'Ih')
gpname, axes = geom.subgroup(gpname, axes)
atoms = geom.shift_atom(atoms, orig, axes)
self.assertEqual(gpname, 'Ci')
self.assertTrue(geom.check_given_symm('Ci', atoms))
def test_ih3(self):
coords1 = numpy.dot(make20(1.5), u)
atoms = [['C', c] for c in coords1]
gpname, orig, axes = geom.detect_symm(atoms)
self.assertEqual(gpname, 'Ih')
gpname, axes = geom.subgroup(gpname, axes)
atoms = geom.shift_atom(atoms, orig, axes)
self.assertEqual(gpname, 'Ci')
self.assertTrue(geom.check_given_symm('Ci', atoms))
def test_ih4(self):
coords1 = make12(1.5)
atoms = [['C', c] for c in coords1]
gpname, orig, axes = geom.detect_symm(atoms)
self.assertEqual(gpname, 'Ih')
gpname, axes = geom.subgroup(gpname, axes)
atoms = geom.shift_atom(atoms, orig, axes)
self.assertEqual(gpname, 'Ci')
self.assertTrue(geom.check_given_symm('Ci', atoms))
def test_d5d_1(self):
coords1 = numpy.dot(make20(2.0), u)
coords2 = numpy.dot(make12(1.1), u)
atoms = [['C', c] for c in coords1] + [['C', c] for c in coords2]
gpname, orig, axes = geom.detect_symm(atoms)
self.assertEqual(gpname, 'D5d')
def test_s10(self):
numpy.random.seed(19)
rotz = numpy.eye(3)
rotz[:2,:2] = numpy.linalg.svd(numpy.random.random((2,2)))[0]
coords1 = numpy.dot(make60(1.5, 3.0), u)
coords2 = reduce(numpy.dot, (make20(1.1), rotz, u))
atoms = [['C', c] for c in coords1] + [['C', c] for c in coords2]
gpname, orig, axes = geom.detect_symm(atoms)
self.assertEqual(gpname, 'S10')
def test_oh1(self):
coords1 = numpy.dot(make6(1.5), u)
atoms = [['C', c] for c in coords1]
gpname, orig, axes = geom.detect_symm(atoms)
self.assertEqual(gpname, 'Oh')
gpname, axes = geom.subgroup(gpname, axes)
atoms = geom.shift_atom(atoms, orig, axes)
self.assertEqual(gpname, 'D2h')
self.assertTrue(geom.check_given_symm('D2h', atoms))
def test_oh2(self):
coords1 = numpy.dot(make8(1.5), u)
atoms = [['C', c] for c in coords1]
gpname, orig, axes = geom.detect_symm(atoms)
self.assertEqual(gpname, 'Oh')
gpname, axes = geom.subgroup(gpname, axes)
atoms = geom.shift_atom(atoms, orig, axes)
self.assertEqual(gpname, 'D2h')
self.assertTrue(geom.check_given_symm('D2h', atoms))
def test_oh3(self):
coords1 = numpy.dot(make8(1.5), u)
coords2 = numpy.dot(make6(1.5), u)
atoms = [['C', c] for c in coords1] + [['C', c] for c in coords2]
gpname, orig, axes = geom.detect_symm(atoms)
self.assertEqual(gpname, 'Oh')
gpname, axes = geom.subgroup(gpname, axes)
atoms = geom.shift_atom(atoms, orig, axes)
self.assertEqual(gpname, 'D2h')
self.assertTrue(geom.check_given_symm('D2h', atoms))
def test_c4h(self):
coords1 = numpy.dot(make8(1.5), u)
coords2 = numpy.dot(make6(1.5).dot(random_rotz()), u)
atoms = [['C', c] for c in coords1] + [['C', c] for c in coords2]
gpname, orig, axes = geom.detect_symm(atoms)
self.assertEqual(gpname, 'C4h')
def test_c2h(self):
coords1 = numpy.dot(make8(2.5), u)
coords2 = numpy.dot(make20(1.2), u)
atoms = [['C', c] for c in numpy.vstack((coords1,coords2))]
gpname, orig, axes = geom.detect_symm(atoms)
self.assertEqual(gpname, 'C2h')
def test_c1(self):
coords1 = numpy.dot(make4(2.5), u)
coords2 = make20(1.2)
atoms = [['C', c] for c in numpy.vstack((coords1,coords2))]
gpname, orig, axes = geom.detect_symm(atoms)
self.assertEqual(gpname, 'C1')
def test_c2(self):
coords1 = numpy.dot(make4(2.5), 1)
coords2 = make12(1.2)
axes = geom._make_axes(coords2[1]-coords2[0], coords2[2])
coords2 = numpy.dot(coords2, axes.T)
atoms = [['C', c] for c in numpy.vstack((coords1,coords2))]
gpname, orig, axes = geom.detect_symm(atoms)
self.assertEqual(gpname, 'C2')
def test_ci(self):
coords1 = numpy.dot(make8(2.5), u)
coords2 = numpy.dot(numpy.dot(make20(1.2), random_rotz()), u)
atoms = [['C', c] for c in numpy.vstack((coords1,coords2))]
gpname, orig, axes = geom.detect_symm(atoms)
self.assertEqual(gpname, 'Ci')
def test_cs(self):
coords1 = make4(2.5)
axes = geom._make_axes(coords1[1]-coords1[0], coords1[2])
coords1 = numpy.dot(coords1, axes.T)
coords2 = make12(1.2)
axes = geom._make_axes(coords2[1]-coords2[0], coords2[2])
coords2 = numpy.dot(coords2, axes.T)
atoms = [['C', c] for c in numpy.vstack((coords1,coords2))]
gpname, orig, axes = geom.detect_symm(atoms)
self.assertEqual(gpname, 'Cs')
numpy.random.seed(1)
c0 = numpy.random.random((4,3))
c0[:,1] *= .5
c1 = c0.copy()
c1[:,1] *= -1
atoms = [['C', c] for c in numpy.vstack((c0,c1))]
gpname, orig, axes = geom.detect_symm(atoms)
self.assertEqual(gpname, 'Cs')
def test_td1(self):
coords1 = numpy.dot(make4(1.5), u)
atoms = [['C', c] for c in coords1]
gpname, orig, axes = geom.detect_symm(atoms)
self.assertEqual(gpname, 'Td')
gpname, axes = geom.subgroup(gpname, axes)
atoms = geom.shift_atom(atoms, orig, axes)
self.assertEqual(gpname, 'D2')
self.assertTrue(geom.check_given_symm('D2', atoms))
self.assertEqual(geom.symm_identical_atoms(gpname, atoms),
[[0, 1, 2, 3]])
def test_td2(self):
coords1 = numpy.dot(make4(1.5), u)
coords2 = numpy.dot(make4(1.9), u)
atoms = [['C', c] for c in coords1] + [['C', c] for c in coords2]
gpname, orig, axes = geom.detect_symm(atoms)
self.assertEqual(gpname, 'Td')
gpname, axes = geom.subgroup(gpname, axes)
atoms = geom.shift_atom(atoms, orig, axes)
self.assertEqual(gpname, 'D2')
self.assertTrue(geom.check_given_symm('D2', atoms))
self.assertEqual(geom.symm_identical_atoms(gpname, atoms),
[[0, 1, 2, 3], [4, 5, 6, 7]])
def test_c3v(self):
coords1 = numpy.dot(make4(1.5), u)
coords2 = numpy.dot(make4(1.9), u)
atoms = [['C', c] for c in coords1] + [['C', c] for c in coords2]
atoms[2][0] = 'C1'
gpname, orig, axes = geom.detect_symm(atoms)
self.assertEqual(gpname, 'C3v')
gpname, axes = geom.subgroup(gpname, axes)
atoms = geom.shift_atom(atoms, orig, axes)
self.assertEqual(gpname, 'Cs')
self.assertTrue(geom.check_given_symm('Cs', atoms))
self.assertEqual(geom.symm_identical_atoms(gpname, atoms),
[[0, 1], [2], [3], [4, 5], [6], [7]])
def test_c3v_1(self):
mol = gto.M(atom='''
C 0.948065 -0.081406 -0.007893
C 0.462608 -0.144439 1.364854
N 0.077738 -0.194439 2.453356
H 0.591046 0.830035 -0.495369
H 0.591062 -0.944369 -0.576807
H 2.041481 -0.080642 -0.024174''')
gpname, orig, axes = geom.detect_symm(mol._atom)
self.assertEqual(gpname, 'C1')
with lib.temporary_env(geom, TOLERANCE=1e-3):
gpname, orig, axes = geom.detect_symm(mol._atom)
self.assertEqual(gpname, 'C3v')
def test_t(self):
atoms = [['C', ( 1.0 ,-1.0 , 1.0 )],
['O', ( 1.0-.1,-1.0+.2, 1.0 )],
['O', ( 1.0 ,-1.0+.1, 1.0-.2)],
['O', ( 1.0-.2,-1.0 , 1.0-.1)],
['C', (-1.0 , 1.0 , 1.0 )],
['O', (-1.0+.1, 1.0-.2, 1.0 )],
['O', (-1.0 , 1.0-.1, 1.0-.2)],
['O', (-1.0+.2, 1.0 , 1.0-.1)],
['C', ( 1.0 , 1.0 ,-1.0 )],
['O', ( 1.0-.2, 1.0 ,-1.0+.1)],
['O', ( 1.0 , 1.0-.1,-1.0+.2)],
['O', ( 1.0-.1, 1.0-.2,-1.0 )],
['C', (-1.0 ,-1.0 ,-1.0 )],
['O', (-1.0 ,-1.0+.1,-1.0+.2)],
['O', (-1.0+.2,-1.0 ,-1.0+.1)],
['O', (-1.0+.1,-1.0+.2,-1.0 )]]
gpname, orig, axes = geom.detect_symm(atoms)
self.assertEqual(gpname, 'T')
gpname, axes = geom.subgroup(gpname, axes)
atoms = geom.shift_atom(atoms, orig, axes)
self.assertEqual(gpname, 'D2')
self.assertTrue(geom.check_given_symm('D2', atoms))
self.assertEqual(geom.symm_identical_atoms(gpname, atoms),
[[0, 4, 8, 12], [1, 5, 11, 15],
[2, 6, 10, 13], [3, 7, 9, 14]])
def test_th(self):
atoms = [['C', ( 1.0 ,-1.0 , 1.0 )],
['O', ( 1.0-.1,-1.0+.2, 1.0 )],
['O', ( 1.0 ,-1.0+.1, 1.0-.2)],
['O', ( 1.0-.2,-1.0 , 1.0-.1)],
['C', ( 1.0 , 1.0 , 1.0 )],
['O', ( 1.0-.1, 1.0-.2, 1.0 )],
['O', ( 1.0 , 1.0-.1, 1.0-.2)],
['O', ( 1.0-.2, 1.0 , 1.0-.1)],
['C', (-1.0 , 1.0 , 1.0 )],
['O', (-1.0+.1, 1.0-.2, 1.0 )],
['O', (-1.0 , 1.0-.1, 1.0-.2)],
['O', (-1.0+.2, 1.0 , 1.0-.1)],
['C', (-1.0 ,-1.0 , 1.0 )],
['O', (-1.0+.1,-1.0+.2, 1.0 )],
['O', (-1.0 ,-1.0+.1, 1.0-.2)],
['O', (-1.0+.2,-1.0 , 1.0-.1)],
['C', ( 1.0 ,-1.0 ,-1.0 )],
['O', ( 1.0-.2,-1.0 ,-1.0+.1)],
['O', ( 1.0 ,-1.0+.1,-1.0+.2)],
['O', ( 1.0-.1,-1.0+.2,-1.0 )],
['C', ( 1.0 , 1.0 ,-1.0 )],
['O', ( 1.0-.2, 1.0 ,-1.0+.1)],
['O', ( 1.0 , 1.0-.1,-1.0+.2)],
['O', ( 1.0-.1, 1.0-.2,-1.0 )],
['C', (-1.0 , 1.0 ,-1.0 )],
['O', (-1.0+.2, 1.0 ,-1.0+.1)],
['O', (-1.0 , 1.0-.1,-1.0+.2)],
['O', (-1.0+.1, 1.0-.2,-1.0 )],
['C', (-1.0 ,-1.0 ,-1.0 )],
['O', (-1.0 ,-1.0+.1,-1.0+.2)],
['O', (-1.0+.2,-1.0 ,-1.0+.1)],
['O', (-1.0+.1,-1.0+.2,-1.0 )]]
gpname, orig, axes = geom.detect_symm(atoms)
self.assertEqual(gpname, 'Th')
gpname, axes = geom.subgroup(gpname, axes)
atoms = geom.shift_atom(atoms, orig, axes)
self.assertEqual(gpname, 'D2')
self.assertTrue(geom.check_given_symm('D2', atoms))
self.assertEqual(geom.symm_identical_atoms(gpname, atoms),
[[0, 8, 20, 28], [1, 9, 23, 31],
[2, 10, 22, 29], [3, 11, 21, 30],
[4, 12, 16, 24], [5, 13, 19, 27],
[6, 14, 18, 26], [7, 15, 17, 25]])
def test_s4_1(self):
coords1 = numpy.dot(make4(1.5), u)
coords2 = numpy.dot(numpy.dot(make4(2.4), random_rotz()), u)
atoms = [['C', c] for c in coords1] + [['C', c] for c in coords2]
gpname, orig, axes = geom.detect_symm(atoms)
self.assertEqual(gpname, 'S4')
def test_Dooh(self):
atoms = [['H', (0,0,0)], ['H', (0,0,-1)], ['H1', (0,0,1)]]
basis = {'H':'sto3g'}
gpname, orig, axes = geom.detect_symm(atoms, basis)
self.assertEqual(gpname, 'Dooh')
self.assertEqual(geom.symm_identical_atoms(gpname, atoms),
[[0], [1,2]])
gpname, axes = geom.subgroup(gpname, axes)
atoms = geom.shift_atom(atoms, orig, axes)
self.assertEqual(gpname, 'Dooh')
self.assertTrue(geom.check_given_symm('Dooh', atoms, basis))
self.assertTrue(geom.check_given_symm('D2h', atoms, basis))
self.assertEqual(geom.symm_identical_atoms(gpname, atoms),
[[0], [1,2]])
def test_Coov(self):
atoms = [['H', (0,0,0)], ['H', (0,0,-1)], ['H1', (0,0,1)]]
basis = {'H':'sto3g', 'H1':'6-31g'}
gpname, orig, axes = geom.detect_symm(atoms, basis)
self.assertEqual(gpname, 'Coov')
self.assertEqual(geom.symm_identical_atoms(gpname, atoms),
[[0], [1] ,[2]])
gpname, axes = geom.subgroup(gpname, axes)
atoms = geom.shift_atom(atoms, orig, axes)
self.assertEqual(gpname, 'Coov')
self.assertTrue(geom.check_given_symm('Coov', atoms, basis))
self.assertTrue(geom.check_given_symm('C2v', atoms, basis))
self.assertEqual(geom.symm_identical_atoms(gpname, atoms),
[[0], [1], [2]])
def test_d5(self):
coord1 = ring(5)
coord2 = ring(5, .1)
coord1[:,2] = 1
coord2[:,2] =-1
atoms = [['H', c] for c in numpy.vstack((coord1,coord2))]
gpname, orig, axes = geom.detect_symm(atoms)
self.assertEqual(gpname, 'D5')
gpname, axes = geom.subgroup(gpname, axes)
atoms = geom.shift_atom(atoms, orig, axes)
self.assertEqual(gpname, 'C2')
self.assertTrue(geom.check_given_symm('C2', atoms))
self.assertEqual(geom.symm_identical_atoms(gpname, atoms),
[[0, 5], [1, 9], [2, 8], [3, 7], [4, 6]])
def test_d5d(self):
coord1 = ring(5)
coord2 = ring(5, numpy.pi/5)
coord1[:,2] = 1
coord2[:,2] =-1
atoms = [['H', c] for c in numpy.vstack((coord1,coord2))]
gpname, orig, axes = geom.detect_symm(atoms)
self.assertEqual(gpname, 'D5d')
gpname, axes = geom.subgroup(gpname, axes)
atoms = geom.shift_atom(atoms, orig, axes)
self.assertEqual(gpname, 'C2h')
self.assertTrue(geom.check_given_symm('C2h', atoms))
self.assertEqual(geom.symm_identical_atoms(gpname, atoms),
[[0, 3, 5, 7], [1, 2, 8, 9], [4, 6]])
def test_detect_symm_c2v(self):
atoms = [['H' , (1., 0., 2.)],
['He', (0., 1., 0.)],
['H' , (-2.,0.,-1.)],
['He', (0.,-1., 0.)]]
l, orig, axes = geom.detect_symm(atoms)
self.assertEqual(l, 'C2v')
atoms = geom.shift_atom(atoms, orig, axes)
self.assertTrue(geom.check_given_symm('C2v', atoms))
self.assertEqual(geom.symm_identical_atoms(l,atoms), [[0,2],[1,3]])
def test_detect_symm_d2h_a(self):
atoms = [['He', (0., 1., 0.)],
['H' , (1., 0., 0.)],
['H' , (-1.,0., 0.)],
['He', (0.,-1., 0.)]]
l, orig, axes = geom.detect_symm(atoms)
self.assertEqual(l, 'D2h')
atoms = geom.shift_atom(atoms, orig, axes)
self.assertTrue(geom.check_given_symm('D2h', atoms))
self.assertEqual(geom.symm_identical_atoms(l,atoms),
[[0, 3], [1, 2]])
def test_detect_symm_d2h_b(self):
atoms = [['H' , (1., 0., 2.)],
['He', (0., 1., 0.)],
['H' , (-1.,0.,-2.)],
['He', (0.,-1., 0.)]]
l, orig, axes = geom.detect_symm(atoms)
self.assertEqual(l, 'D2h')
atoms = geom.shift_atom(atoms, orig, axes)
self.assertTrue(geom.check_given_symm('D2h', atoms))
self.assertEqual(geom.symm_identical_atoms(l,atoms), [[0,2],[1,3]])
def test_detect_symm_c2h_a(self):
atoms = [['H' , (1., 0., 2.)],
['He', (0., 1.,-1.)],
['H' , (-1.,0.,-2.)],
['He', (0.,-1., 1.)]]
l, orig, axes = geom.detect_symm(atoms)
atoms = geom.shift_atom(atoms, orig, axes)
self.assertEqual(l, 'C2h')
self.assertEqual(geom.symm_identical_atoms(l,atoms), [[0,2],[1,3]])
self.assertTrue(geom.check_given_symm('C2h', atoms))
def test_detect_symm_c2h(self):
atoms = [['H' , (1., 0., 2.)],
['He', (0., 1., 0.)],
['H' , (1., 0., 0.)],
['H' , (-1.,0., 0.)],
['H' , (-1.,0.,-2.)],
['He', (0.,-1., 0.)]]
l, orig, axes = geom.detect_symm(atoms)
atoms = geom.shift_atom(atoms, orig, axes)
self.assertEqual(l, 'C2h')
self.assertEqual(geom.symm_identical_atoms(l,atoms), [[0,4],[1,5],[2,3]])
self.assertTrue(geom.check_given_symm('C2h', atoms))
atoms = [['H' , (1., 0., 1.)],
['H' , (1., 0.,-1.)],
['He', (0., 0., 2.)],
['He', (2., 0.,-2.)],
['Li', (1., 1., 0.)],
['Li', (1.,-1., 0.)]]
l, orig, axes = geom.detect_symm(atoms)
atoms = geom.shift_atom(atoms, orig, axes)
self.assertEqual(l, 'C2h')
self.assertEqual(geom.symm_identical_atoms(l,atoms),
[[0, 1], [2, 3], [4, 5]])
self.assertTrue(geom.check_given_symm('C2h', atoms))
def test_detect_symm_d2_a(self):
atoms = [['H' , (1., 0., 1.)],
['H' , (1., 0.,-1.)],
['He', (0., 0., 2.)],
['He', (2., 0., 2.)],
['He', (1., 1.,-2.)],
['He', (1.,-1.,-2.)]]
l, orig, axes = geom.detect_symm(atoms)
self.assertEqual(l, 'D2d')
atoms = geom.shift_atom(atoms, orig, axes)
self.assertTrue(geom.check_given_symm('D2', atoms))
self.assertEqual(geom.symm_identical_atoms('D2', atoms),
[[0, 1], [2, 3, 4, 5]])
def test_detect_symm_d2_b(self):
s2 = numpy.sqrt(.5)
atoms = [['C', (0., 0., 1.)],
['C', (0., 0.,-1.)],
['H', ( 1, 0., 2.)],
['H', (-1, 0., 2.)],
['H', ( s2, s2,-2.)],
['H', (-s2,-s2,-2.)]]
l, orig, axes = geom.detect_symm(atoms)
self.assertEqual(l, 'D2')
atoms = geom.shift_atom(atoms, orig, axes)
self.assertTrue(geom.check_given_symm('D2', atoms))
self.assertEqual(geom.symm_identical_atoms(l,atoms),
[[0, 1], [2, 3, 4, 5]])
def test_detect_symm_s4(self):
atoms = [['H', (-1,-1.,-2.)],
['H', ( 1, 1.,-2.)],
['C', (-.9,-1.,-2.)],
['C', (.9, 1.,-2.)],
['H', ( 1,-1., 2.)],
['H', (-1, 1., 2.)],
['C', ( 1,-.9, 2.)],
['C', (-1, .9, 2.)],]
l, orig, axes = geom.detect_symm(atoms)
self.assertEqual(l, 'S4')
atoms = geom.shift_atom(atoms, orig, axes)
self.assertTrue(geom.check_given_symm('C2', atoms))
self.assertEqual(geom.symm_identical_atoms('C2',atoms),
[[0, 1], [2, 3], [4, 5], [6, 7]])
def test_detect_symm_ci(self):
atoms = [['H' , ( 1., 0., 0.)],
['He', ( 0., 1., 0.)],
['Li', ( 0., 0., 1.)],
['Be', ( .5, .5, .5)],
['H' , (-1., 0., 0.)],
['He', ( 0.,-1., 0.)],
['Li', ( 0., 0.,-1.)],
['Be', (-.5,-.5,-.5)]]
l, orig, axes = geom.detect_symm(atoms)
atoms = geom.shift_atom(atoms, orig, axes)
self.assertEqual(l, 'Ci')
self.assertTrue(geom.check_given_symm('Ci', atoms))
self.assertEqual(geom.symm_identical_atoms(l,atoms),
[[0, 4], [1, 5], [2, 6], [3, 7]])
def test_detect_symm_cs1(self):
atoms = [['H' , (1., 2., 0.)],
['He', (1., 0., 0.)],
['Li', (2.,-1., 0.)],
['Be', (0., 1., 0.)]]
l, orig, axes = geom.detect_symm(atoms)
atoms = geom.shift_atom(atoms, orig, axes)
self.assertEqual(l, 'Cs')
self.assertTrue(geom.check_given_symm('Cs', atoms))
self.assertEqual(geom.symm_identical_atoms(l,atoms),
[[0], [1], [2], [3]])
def test_detect_symm_cs2(self):
atoms = [['H' , (0., 1., 2.)],
['He', (0., 1., 0.)],
['Li', (0., 2.,-1.)],
['Be', (0., 0., 1.)],
['S' , (-3, 1., .5)],
['S' , ( 3, 1., .5)]]
coord = numpy.dot([a[1] for a in atoms], u)
atoms = [[atoms[i][0], c] for i,c in enumerate(coord)]
l, orig, axes = geom.detect_symm(atoms)
atoms = geom.shift_atom(atoms, orig, axes)
self.assertEqual(l, 'Cs')
self.assertTrue(geom.check_given_symm('Cs', atoms))
self.assertEqual(geom.symm_identical_atoms(l,atoms),
[[0], [1], [2], [3], [4, 5]])
def test_detect_symm_cs3(self):
atoms = [['H' , ( 2.,1., 0.)],
['He', ( 0.,1., 0.)],
['Li', (-1.,2., 0.)],
['Be', ( 1.,0., 0.)],
['S' , ( .5,1., -3)],
['S' , ( .5,1., 3)]]
coord = numpy.dot([a[1] for a in atoms], u)
atoms = [[atoms[i][0], c] for i,c in enumerate(coord)]
l, orig, axes = geom.detect_symm(atoms)
atoms = geom.shift_atom(atoms, orig, axes)
self.assertEqual(l, 'Cs')
self.assertTrue(geom.check_given_symm('Cs', atoms))
self.assertEqual(geom.symm_identical_atoms(l,atoms),
[[0], [1], [2], [3], [4, 5]])
def test_detect_symm_c1(self):
atoms = [['H' , ( 1., 0., 0.)],
['He', ( 0., 1., 0.)],
['Li', ( 0., 0., 1.)],
['Be', ( .5, .5, .5)]]
l, orig, axes = geom.detect_symm(atoms)
atoms = geom.shift_atom(atoms, orig, axes)
self.assertEqual(l, 'C1')
self.assertTrue(geom.check_given_symm('C1', atoms))
self.assertEqual(geom.symm_identical_atoms(l,atoms),
[[0], [1], [2], [3]])
def test_detect_symm_c2(self):
atoms = [['H' , ( 1., 0., 1.)],
['H' , ( 1., 0.,-1.)],
['He', ( 0.,-3., 2.)],
['He', ( 0., 3.,-2.)]]
l, orig, axes = geom.detect_symm(atoms)
atoms = geom.shift_atom(atoms, orig, axes)
self.assertEqual(l, 'C2')
self.assertTrue(geom.check_given_symm('C2', atoms))
self.assertEqual(geom.symm_identical_atoms(l,atoms), [[0,1],[2,3]])
def test_detect_symm_d3d(self):
atoms = [
['C', ( 1.25740, -0.72596, -0.25666)],
['C', ( 1.25740, 0.72596, 0.25666)],
['C', ( 0.00000, 1.45192, -0.25666)],
['C', (-1.25740, 0.72596, 0.25666)],
['C', (-1.25740, -0.72596, -0.25666)],
['C', ( 0.00000, -1.45192, 0.25666)],
['H', ( 2.04168, -1.17876, 0.05942)],
['H', ( 1.24249, -0.71735, -1.20798)],
['H', ( 2.04168, 1.17876, -0.05942)],
['H', ( 1.24249, 0.71735, 1.20798)],
['H', ( 0.00000, 1.43470, -1.20798)],
['H', ( 0.00000, 2.35753, 0.05942)],
['H', (-2.04168, 1.17876, -0.05942)],
['H', (-1.24249, 0.71735, 1.20798)],
['H', (-1.24249, -0.71735, -1.20798)],
['H', (-2.04168, -1.17876, 0.05942)],
['H', ( 0.00000, -1.43470, 1.20798)],
['H', ( 0.00000, -2.35753, -0.05942)], ]
with lib.temporary_env(geom, TOLERANCE=1e-4):
l, orig, axes = geom.detect_symm(atoms)
self.assertEqual(l, 'D3d')
def test_quasi_c2v(self):
atoms = [
['Fe', ( 0.0000000000, 0.0055197721, 0.0055197721)],
['O' , (-1.3265475500, 0.0000000000, -0.9445024777)],
['O' , ( 1.3265475500, 0.0000000000, -0.9445024777)],
['O' , ( 0.0000000000, -1.3265374484, 0.9444796669)],
['O' , ( 0.0000000000, 1.3265374484, 0.9444796669)],]
l, orig, axes = geom.detect_symm(atoms)
self.assertEqual(l, 'Cs')
with lib.temporary_env(geom, TOLERANCE=1e-2):
l, orig, axes = geom.detect_symm(atoms)
self.assertEqual(l, 'C2v')
with lib.temporary_env(geom, TOLERANCE=1e-1):
l, orig, axes = geom.detect_symm(atoms)
self.assertEqual(l, 'Td')
def test_as_subgroup(self):
axes = numpy.eye(3)
g, ax = symm.as_subgroup('D2d', axes)
self.assertEqual(g, 'D2')
self.assertAlmostEqual(abs(ax-numpy.eye(3)).max(), 0, 12)
g, ax = symm.as_subgroup('D2d', axes, 'D2')
self.assertEqual(g, 'D2')
self.assertAlmostEqual(abs(ax-numpy.eye(3)).max(), 0, 12)
g, ax = symm.as_subgroup('D2d', axes, 'C2v')
self.assertEqual(g, 'C2v')
self.assertAlmostEqual(ax[0,1], numpy.sqrt(.5), 9)
self.assertAlmostEqual(ax[1,0],-numpy.sqrt(.5), 9)
g, ax = symm.as_subgroup('C2v', axes, 'Cs')
self.assertEqual(g, 'Cs')
self.assertAlmostEqual(ax[2,0], 1, 9)
g, ax = symm.as_subgroup('D6', axes)
self.assertEqual(g, 'D2')
g, ax = symm.as_subgroup('C6h', axes)
self.assertEqual(g, 'C2h')
g, ax = symm.as_subgroup('C6v', axes)
self.assertEqual(g, 'C2v')
g, ax = symm.as_subgroup('C6', axes)
self.assertEqual(g, 'C2')
def test_ghost(self):
atoms = [
['Fe' , ( 0.0, 0.0, 0.0)],
['O' , (-1.3, 0.0, 0.0)],
['GHOST-O' , ( 1.3, 0.0, 0.0)],
['GHOST-O' , ( 0.0, -1.3, 0.0)],
['O' , ( 0.0, 1.3, 0.0)],]
l, orig, axes = geom.detect_symm(atoms)
self.assertEqual(l, 'C2v')
self.assertAlmostEqual(axes[2,0]*axes[2,1], -.5)
atoms = [
['Fe' , ( 0.0, 0.0, 0.0)],
['O' , (-1.3, 0.0, 0.0)],
['XO' , ( 1.3, 0.0, 0.0)],
['GHOSTO' , ( 0.0, -1.3, 0.0)],
['O' , ( 0.0, 1.3, 0.0)],]
l, orig, axes = geom.detect_symm(atoms)
self.assertEqual(l, 'C2v')
self.assertAlmostEqual(axes[2,0]*axes[2,1], -.5)
atoms = [
['Fe' , ( 0.0, 0.0, 0.0)],
['O' , (-1.3, 0.0, 0.0)],
['X' , ( 1.3, 0.0, 0.0)],
['X' , ( 0.0, -1.3, 0.0)],
['O' , ( 0.0, 1.3, 0.0)],]
l, orig, axes = geom.detect_symm(atoms)
self.assertEqual(l, 'C2v')
self.assertAlmostEqual(axes[2,0]*axes[2,1], -.5)
def test_sort_coords(self):
c = numpy.random.random((5,3))
c0 = symm.sort_coords(c)
idx = symm.argsort_coords(c)
self.assertAlmostEqual(abs(c[idx] - c0).max(), 0, 9)
def ring(n, start=0):
r = 1. / numpy.sin(numpy.pi/n)
coord = []
for i in range(n):
theta = i * (2*numpy.pi/n)
coord.append([r*numpy.cos(theta+start), r*numpy.sin(theta+start), 0])
return numpy.array(coord)
def ringhat(n, u):
atoms = [['H', c] for c in ring(n)] \
+ [['C', c] for c in ring(n, .1)] \
+ [['N', [0,0, 1.3]],
['N', [0,0,-1.3]]]
c = numpy.dot([a[1] for a in atoms], u)
return [[atoms[i][0], c[i]] for i in range(len(atoms))]
def rotmatz(ang):
c = numpy.cos(ang)
s = numpy.sin(ang)
return numpy.array((( c, s, 0),
(-s, c, 0),
( 0, 0, 1),))
def rotmaty(ang):
c = numpy.cos(ang)
s = numpy.sin(ang)
return numpy.array((( c, 0, s),
( 0, 1, 0),
(-s, 0, c),))
def r2edge(ang, r):
return 2*r*numpy.sin(ang/2)
def make60(b5, b6):
theta1 = numpy.arccos(1/numpy.sqrt(5))
theta2 = (numpy.pi - theta1) * .5
r = (b5*2+b6)/2/numpy.sin(theta1/2)
rot72 = rotmatz(numpy.pi*2/5)
s1 = numpy.sin(theta1)
c1 = numpy.cos(theta1)
s2 = numpy.sin(theta2)
c2 = numpy.cos(theta2)
p1 = numpy.array(( s2*b5, 0, r-c2*b5))
p9 = numpy.array((-s2*b5, 0,-r+c2*b5))
p2 = numpy.array(( s2*(b5+b6), 0, r-c2*(b5+b6)))
rot1 = reduce(numpy.dot, (rotmaty(theta1), rot72, rotmaty(-theta1)))
p2s = []
for i in range(5):
p2s.append(p2)
p2 = numpy.dot(p2, rot1)
coord = []
for i in range(5):
coord.append(p1)
p1 = numpy.dot(p1, rot72)
for pj in p2s:
pi = pj
for i in range(5):
coord.append(pi)
pi = numpy.dot(pi, rot72)
for pj in p2s:
pi = pj
for i in range(5):
coord.append(-pi)
pi = numpy.dot(pi, rot72)
for i in range(5):
coord.append(p9)
p9 = numpy.dot(p9, rot72)
return numpy.array(coord)
def make12(b):
theta1 = numpy.arccos(1/numpy.sqrt(5))
theta2 = (numpy.pi - theta1) * .5
r = b/2./numpy.sin(theta1/2)
rot72 = rotmatz(numpy.pi*2/5)
s1 = numpy.sin(theta1)
c1 = numpy.cos(theta1)
p1 = numpy.array(( s1*r, 0, c1*r))
p2 = numpy.array((-s1*r, 0, -c1*r))
coord = [( 0, 0, r)]
for i in range(5):
coord.append(p1)
p1 = numpy.dot(p1, rot72)
for i in range(5):
coord.append(p2)
p2 = numpy.dot(p2, rot72)
coord.append(( 0, 0, -r))
return numpy.array(coord)
def make20(b):
theta1 = numpy.arccos(numpy.sqrt(5)/3)
theta2 = numpy.arcsin(r2edge(theta1,1)/2/numpy.sin(numpy.pi/5))
r = b/2./numpy.sin(theta1/2)
rot72 = rotmatz(numpy.pi*2/5)
s2 = numpy.sin(theta2)
c2 = numpy.cos(theta2)
s3 = numpy.sin(theta1+theta2)
c3 = numpy.cos(theta1+theta2)
p1 = numpy.array(( s2*r, 0, c2*r))
p2 = numpy.array(( s3*r, 0, c3*r))
p3 = numpy.array((-s3*r, 0, -c3*r))
p4 = numpy.array((-s2*r, 0, -c2*r))
coord = []
for i in range(5):
coord.append(p1)
p1 = numpy.dot(p1, rot72)
for i in range(5):
coord.append(p2)
p2 =
|
numpy.dot(p2, rot72)
|
numpy.dot
|
import numpy as np
import math
import h5py
import re
import cv2
import random
from sklearn.linear_model import LinearRegression
import time
start_time = time.time()
def img_border(img_gray):
img_bordered = img_gray.copy()
img_w, img_h = img_bordered.shape[1], img_bordered.shape[0]
border_horizontal = (768 - img_w) / 2
border_vertical = (768 - img_h) / 2
# horizontal border
if border_horizontal%1 == 0.5:
border_horizontal = border_horizontal - 0.5
border_horizontal = int(border_horizontal)
img_bordered = cv2.hconcat((img_bordered, np.zeros((np.shape(img_bordered)[0], border_horizontal + 1, 1), dtype=np.uint8) ))
img_bordered = cv2.hconcat((np.zeros((np.shape(img_bordered)[0], border_horizontal, 1), dtype=np.uint8), img_bordered ))
else:
border_horizontal = int(border_horizontal)
img_bordered = cv2.hconcat((img_bordered, np.zeros((np.shape(img_bordered)[0], border_horizontal, 1), dtype=np.uint8) ))
img_bordered = cv2.hconcat((np.zeros((
|
np.shape(img_bordered)
|
numpy.shape
|
# Lint as: python3
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the Python extension-based XLA client."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import itertools
import threading
import unittest
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
from tensorflow.compiler.xla.python import custom_call_for_test
from tensorflow.compiler.xla.python import xla_client
# pylint: disable=g-import-not-at-top
try:
import portpicker
except ImportError:
portpicker = None
# pylint: enable=g-import-not-at-top
bfloat16 = xla_client.bfloat16
class ComputationTest(absltest.TestCase):
"""Base class for running an XLA Computation through the local client."""
def _NewComputation(self, name=None):
if name is None:
name = self.id()
return xla_client.ComputationBuilder(name)
def _Execute(self, c, arguments):
compiled_c = c.Build().Compile()
return xla_client.execute_with_python_values(compiled_c, arguments)
def _ExecuteAndAssertWith(self, assert_func, c, arguments, expected):
assert expected is not None
results = self._Execute(c, arguments)
self.assertLen(results, len(expected))
for result, e in zip(results, expected):
# Numpy's comparison methods are a bit too lenient by treating inputs as
# "array-like", meaning that scalar 4 will be happily compared equal to
# [[4]]. We'd like to be more strict so assert shapes as well.
self.assertEqual(np.asanyarray(result).shape, np.asanyarray(e).shape)
assert_func(result, e)
def _ExecuteAndCompareExact(self, c, arguments=(), expected=None):
self._ExecuteAndAssertWith(np.testing.assert_equal, c, arguments, expected)
def _ExecuteAndCompareClose(self,
c,
arguments=(),
expected=None,
rtol=1e-7,
atol=0):
self._ExecuteAndAssertWith(
functools.partial(np.testing.assert_allclose, rtol=rtol, atol=atol), c,
arguments, expected)
def NumpyArrayF32(*args, **kwargs):
"""Convenience wrapper to create Numpy arrays with a np.float32 dtype."""
return np.array(*args, dtype=np.float32, **kwargs)
def NumpyArrayF64(*args, **kwargs):
"""Convenience wrapper to create Numpy arrays with a np.float64 dtype."""
return np.array(*args, dtype=np.float64, **kwargs)
def NumpyArrayS32(*args, **kwargs):
"""Convenience wrapper to create Numpy arrays with a np.int32 dtype."""
return np.array(*args, dtype=np.int32, **kwargs)
def NumpyArrayS64(*args, **kwargs):
"""Convenience wrapper to create Numpy arrays with a np.int64 dtype."""
return np.array(*args, dtype=np.int64, **kwargs)
def NumpyArrayBool(*args, **kwargs):
"""Convenience wrapper to create Numpy arrays with a np.bool dtype."""
return np.array(*args, dtype=np.bool, **kwargs)
class ComputationPrinting(absltest.TestCase):
def ExampleComputation(self):
builder = xla_client.ComputationBuilder("acomputation")
p0 = builder.ParameterFromNumpy(np.float32(0))
p1 = builder.ParameterFromNumpy(np.zeros((4,), np.float32))
x = builder.Mul(p0, p1)
builder.Add(x, x)
return builder.Build()
def testComputationToHloText(self):
computation = self.ExampleComputation()
hlo_text = computation.GetHloText()
self.assertTrue(hlo_text.startswith("HloModule acomputation"))
def testComputationToHloGraph(self):
computation = self.ExampleComputation()
hlo_dot_graph = computation.GetHloDotGraph()
self.assertTrue(hlo_dot_graph.startswith("digraph "))
def testHloModuleToHloText(self):
computation = self.ExampleComputation()
hlo_text = computation.computation.get_hlo_module().to_string()
self.assertTrue(hlo_text.startswith("HloModule acomputation"))
def testHloModuleToHloGraph(self):
computation = self.ExampleComputation()
hlo_dot_graph = xla_client._xla.hlo_module_to_dot_graph(
computation.computation.get_hlo_module())
self.assertTrue(hlo_dot_graph.startswith("digraph "))
def testCompiledHloModuleToHloText(self):
computation = self.ExampleComputation()
executable = computation.Compile()
hlo_modules = executable.get_hlo_modules()
self.assertLen(hlo_modules, 1)
hlo_text = hlo_modules[0].to_string()
self.assertTrue(hlo_text.startswith("HloModule acomputation"))
self.assertIn("fusion", hlo_text)
class ComputationHashTest(absltest.TestCase):
def testHash(self):
builder0 = xla_client.ComputationBuilder("computation0")
p0 = builder0.ParameterFromNumpy(np.float32(0))
p1 = builder0.ParameterFromNumpy(np.zeros((4,), np.float32))
builder0.Mul(p0, p1)
computation0 = builder0.Build()
builder1 = xla_client.ComputationBuilder("computation1")
p0 = builder1.ParameterFromNumpy(np.float32(0))
p1 = builder1.ParameterFromNumpy(np.zeros((4,), np.float32))
builder1.Mul(p0, p1)
computation1 = builder1.Build()
self.assertEqual(computation0.Hash(), computation1.Hash())
class ComputationsWithConstantsTest(ComputationTest):
"""Tests focusing on Constant ops."""
def testConstantScalarSumS8(self):
c = self._NewComputation()
c.Add(c.Constant(np.int8(1)), c.Constant(np.int8(2)))
self._ExecuteAndCompareExact(c, expected=[np.int8(3)])
def testConstantScalarSumBF16(self):
c = self._NewComputation()
c.Add(c.Constant(bfloat16(1.11)), c.Constant(bfloat16(3.14)))
self._ExecuteAndCompareClose(c, expected=[bfloat16(4.25)])
def testConstantScalarSumF32(self):
c = self._NewComputation()
c.Add(c.ConstantF32Scalar(1.11), c.ConstantF32Scalar(3.14))
self._ExecuteAndCompareClose(c, expected=[4.25])
def testConstantScalarSumF64(self):
c = self._NewComputation()
c.Add(c.ConstantF64Scalar(1.11), c.ConstantF64Scalar(3.14))
self._ExecuteAndCompareClose(c, expected=[4.25])
def testConstantScalarSumS32(self):
c = self._NewComputation()
c.Add(c.ConstantS32Scalar(1), c.ConstantS32Scalar(2))
self._ExecuteAndCompareClose(c, expected=[3])
def testConstantScalarSumS64(self):
c = self._NewComputation()
c.Add(c.ConstantS64Scalar(1), c.ConstantS64Scalar(2))
self._ExecuteAndCompareClose(c, expected=[3])
def testConstantVectorMulF16(self):
c = self._NewComputation()
c.Mul(
c.Constant(np.array([2.5, 3.3, -1.2, 0.7], np.float16)),
c.Constant(np.array([-1.2, 2, -2, -3], np.float16)))
self._ExecuteAndCompareClose(
c, expected=[np.array([-3, 6.6, 2.4, -2.1], np.float16)], rtol=2e-3)
def testConstantVectorMulF32(self):
c = self._NewComputation()
c.Mul(
c.Constant(NumpyArrayF32([2.5, 3.3, -1.2, 0.7])),
c.Constant(NumpyArrayF32([-1.2, 2, -2, -3])))
self._ExecuteAndCompareClose(c, expected=[[-3, 6.6, 2.4, -2.1]])
def testConstantVectorMulF64(self):
c = self._NewComputation()
c.Mul(
c.Constant(NumpyArrayF64([2.5, 3.3, -1.2, 0.7])),
c.Constant(NumpyArrayF64([-1.2, 2, -2, -3])))
self._ExecuteAndCompareClose(c, expected=[[-3, 6.6, 2.4, -2.1]])
def testConstantVectorScalarDivF32(self):
c = self._NewComputation()
c.Div(
c.Constant(NumpyArrayF32([1.5, 2.5, 3.0, -10.8])),
c.ConstantF32Scalar(2.0))
self._ExecuteAndCompareClose(c, expected=[[0.75, 1.25, 1.5, -5.4]])
def testConstantVectorScalarDivF64(self):
c = self._NewComputation()
c.Div(
c.Constant(NumpyArrayF64([1.5, 2.5, 3.0, -10.8])),
c.ConstantF64Scalar(2.0))
self._ExecuteAndCompareClose(c, expected=[[0.75, 1.25, 1.5, -5.4]])
def testConstantVectorScalarPowF32(self):
c = self._NewComputation()
c.Pow(c.Constant(NumpyArrayF32([1.5, 2.5, 3.0])), c.ConstantF32Scalar(2.))
self._ExecuteAndCompareClose(c, expected=[[2.25, 6.25, 9.]])
def testConstantVectorScalarPowF64(self):
c = self._NewComputation()
c.Pow(c.Constant(NumpyArrayF64([1.5, 2.5, 3.0])), c.ConstantF64Scalar(2.))
self._ExecuteAndCompareClose(c, expected=[[2.25, 6.25, 9.]])
def testIota(self):
c = self._NewComputation()
c.Iota(np.float32, 10)
self._ExecuteAndCompareExact(c, expected=[np.arange(10, dtype=np.float32)])
def testBroadcastedIota(self):
c = self._NewComputation()
c.BroadcastedIota(np.int64, (2, 3), 1)
expected = np.array([[0, 1, 2], [0, 1, 2]], dtype=np.int64)
self._ExecuteAndCompareExact(c, expected=[expected])
def testBooleanAnd(self):
c = self._NewComputation()
c.And(
c.Constant(NumpyArrayBool([True, False, True, False])),
c.Constant(NumpyArrayBool([True, True, False, False])))
self._ExecuteAndCompareExact(c, expected=[[True, False, False, False]])
def testBooleanOr(self):
c = self._NewComputation()
c.Or(
c.Constant(NumpyArrayBool([True, False, True, False])),
c.Constant(NumpyArrayBool([True, True, False, False])))
self._ExecuteAndCompareExact(c, expected=[[True, True, True, False]])
def testBooleanXor(self):
c = self._NewComputation()
c.Xor(
c.Constant(NumpyArrayBool([True, False, True, False])),
c.Constant(NumpyArrayBool([True, True, False, False])))
self._ExecuteAndCompareExact(c, expected=[[False, True, True, False]])
def testSum2DF32(self):
c = self._NewComputation()
c.Add(
c.Constant(NumpyArrayF32([[1, 2, 3], [4, 5, 6]])),
c.Constant(NumpyArrayF32([[1, -1, 1], [-1, 1, -1]])))
self._ExecuteAndCompareClose(c, expected=[[[2, 1, 4], [3, 6, 5]]])
def testShiftLeft(self):
c = self._NewComputation()
c.ShiftLeft(c.Constant(NumpyArrayS32([3])), c.Constant(NumpyArrayS32([2])))
self._ExecuteAndCompareClose(c, expected=[[12]])
def testShiftRightArithmetic(self):
c = self._NewComputation()
c.ShiftRightArithmetic(
c.Constant(NumpyArrayS32([-2])), c.Constant(NumpyArrayS32([1])))
self._ExecuteAndCompareClose(c, expected=[[-1]])
def testShiftRightLogical(self):
c = self._NewComputation()
c.ShiftRightLogical(
c.Constant(NumpyArrayS32([-1])), c.Constant(NumpyArrayS32([1])))
self._ExecuteAndCompareClose(c, expected=[[2**31 - 1]])
def testSum2DF64(self):
c = self._NewComputation()
c.Add(
c.Constant(NumpyArrayF64([[1, 2, 3], [4, 5, 6]])),
c.Constant(NumpyArrayF64([[1, -1, 1], [-1, 1, -1]])))
self._ExecuteAndCompareClose(c, expected=[[[2, 1, 4], [3, 6, 5]]])
def testSum2DWith1DBroadcastDim0F32(self):
# sum of a 2D array with a 1D array where the latter is replicated across
# dimension 0 to match the former's shape.
c = self._NewComputation()
c.Add(
c.Constant(NumpyArrayF32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
c.Constant(NumpyArrayF32([10, 20, 30])),
broadcast_dimensions=(0,))
self._ExecuteAndCompareClose(
c, expected=[[[11, 12, 13], [24, 25, 26], [37, 38, 39]]])
def testSum2DWith1DBroadcastDim0F64(self):
# sum of a 2D array with a 1D array where the latter is replicated across
# dimension 0 to match the former's shape.
c = self._NewComputation()
c.Add(
c.Constant(NumpyArrayF64([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
c.Constant(NumpyArrayF64([10, 20, 30])),
broadcast_dimensions=(0,))
self._ExecuteAndCompareClose(
c, expected=[[[11, 12, 13], [24, 25, 26], [37, 38, 39]]])
def testSum2DWith1DBroadcastDim1F32(self):
# sum of a 2D array with a 1D array where the latter is replicated across
# dimension 1 to match the former's shape.
c = self._NewComputation()
c.Add(
c.Constant(NumpyArrayF32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
c.Constant(NumpyArrayF32([10, 20, 30])),
broadcast_dimensions=(1,))
self._ExecuteAndCompareClose(
c, expected=[[[11, 22, 33], [14, 25, 36], [17, 28, 39]]])
def testSum2DWith1DBroadcastDim1F64(self):
# sum of a 2D array with a 1D array where the latter is replicated across
# dimension 1 to match the former's shape.
c = self._NewComputation()
c.Add(
c.Constant(NumpyArrayF64([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
c.Constant(NumpyArrayF64([10, 20, 30])),
broadcast_dimensions=(1,))
self._ExecuteAndCompareClose(
c, expected=[[[11, 22, 33], [14, 25, 36], [17, 28, 39]]])
def testConstantAxpyF32(self):
c = self._NewComputation()
c.Add(
c.Mul(
c.ConstantF32Scalar(2),
c.Constant(NumpyArrayF32([2.2, 3.3, 4.4, 5.5]))),
c.Constant(NumpyArrayF32([100, -100, 200, -200])))
self._ExecuteAndCompareClose(c, expected=[[104.4, -93.4, 208.8, -189]])
def testConstantAxpyF64(self):
c = self._NewComputation()
c.Add(
c.Mul(
c.ConstantF64Scalar(2),
c.Constant(NumpyArrayF64([2.2, 3.3, 4.4, 5.5]))),
c.Constant(NumpyArrayF64([100, -100, 200, -200])))
self._ExecuteAndCompareClose(c, expected=[[104.4, -93.4, 208.8, -189]])
def testCustomCall(self):
c = self._NewComputation()
for name, fn in custom_call_for_test.cpu_custom_call_targets.items():
xla_client.register_custom_call_target(name, fn, platform="cpu")
c.CustomCall(
b"test_subtract_f32",
operands=(c.ConstantF32Scalar(1.25), c.ConstantF32Scalar(0.5)),
shape=xla_client.Shape.array_shape(np.dtype(np.float32), (), ()),
operand_shapes_with_layout=(
xla_client.Shape.array_shape(np.dtype(np.float32), (), ()),
xla_client.Shape.array_shape(np.dtype(np.float32), (), ()),
))
self._ExecuteAndCompareClose(c, expected=[0.75])
class ComputationFromProtoTest(absltest.TestCase):
"""Test computation execution from HLO proto."""
def testExecuteFromProto(self):
# Build the HLO proto
b = xla_client.ComputationBuilder("computation")
b.Add(b.Constant(np.int8(1)), b.Constant(np.int8(2)))
serialized_proto = b.Build().GetSerializedProto()
# Load and execute the proto
c = xla_client.Computation(xla_client._xla.XlaComputation(serialized_proto))
ans, = xla_client.execute_with_python_values(c.Compile())
np.testing.assert_equal(ans, np.int8(3))
class ParametersTest(ComputationTest):
"""Tests focusing on Parameter ops and argument-passing."""
def setUp(self):
self.f32_scalar_2 = NumpyArrayF32(2.0)
self.f32_4vector = NumpyArrayF32([-2.3, 3.3, -4.3, 5.3])
self.f64_scalar_2 = NumpyArrayF64(2.0)
self.f64_4vector = NumpyArrayF64([-2.3, 3.3, -4.3, 5.3])
self.s32_scalar_3 = NumpyArrayS32(3)
self.s32_4vector = NumpyArrayS32([10, 15, -2, 7])
self.s64_scalar_3 = NumpyArrayS64(3)
self.s64_4vector = NumpyArrayS64([10, 15, -2, 7])
def testScalarTimesVectorAutonumberF32(self):
c = self._NewComputation()
p0 = c.ParameterFromNumpy(self.f32_scalar_2)
p1 = c.ParameterFromNumpy(self.f32_4vector)
c.Mul(p0, p1)
self._ExecuteAndCompareClose(
c,
arguments=[self.f32_scalar_2, self.f32_4vector],
expected=[[-4.6, 6.6, -8.6, 10.6]])
def testScalarTimesVectorAutonumberF64(self):
c = self._NewComputation()
p0 = c.ParameterFromNumpy(self.f64_scalar_2)
p1 = c.ParameterFromNumpy(self.f64_4vector)
c.Mul(p0, p1)
self._ExecuteAndCompareClose(
c,
arguments=[self.f64_scalar_2, self.f64_4vector],
expected=[[-4.6, 6.6, -8.6, 10.6]])
def testScalarTimesVectorS32(self):
c = self._NewComputation()
p0 = c.ParameterFromNumpy(self.s32_scalar_3)
p1 = c.ParameterFromNumpy(self.s32_4vector)
c.Mul(p0, p1)
self._ExecuteAndCompareExact(
c,
arguments=[self.s32_scalar_3, self.s32_4vector],
expected=[[30, 45, -6, 21]])
def testScalarTimesVectorS64(self):
c = self._NewComputation()
p0 = c.ParameterFromNumpy(self.s64_scalar_3)
p1 = c.ParameterFromNumpy(self.s64_4vector)
c.Mul(p0, p1)
self._ExecuteAndCompareExact(
c,
arguments=[self.s64_scalar_3, self.s64_4vector],
expected=[[30, 45, -6, 21]])
def testScalarMinusVectorExplicitNumberingF32(self):
# Use explicit numbering and pass parameter_num first. Sub is used since
# it's not commutative and can help catch parameter reversal within the
# computation.
c = self._NewComputation()
p1 = c.ParameterFromNumpy(self.f32_4vector, parameter_num=1)
p0 = c.ParameterFromNumpy(self.f32_scalar_2, parameter_num=0)
c.Sub(p1, p0)
self._ExecuteAndCompareClose(
c,
arguments=[self.f32_scalar_2, self.f32_4vector],
expected=[[-4.3, 1.3, -6.3, 3.3]])
def testScalarMinusVectorExplicitNumberingF64(self):
# Use explicit numbering and pass parameter_num first. Sub is used since
# it's not commutative and can help catch parameter reversal within the
# computation.
c = self._NewComputation()
p1 = c.ParameterFromNumpy(self.f64_4vector, parameter_num=1)
p0 = c.ParameterFromNumpy(self.f64_scalar_2, parameter_num=0)
c.Sub(p1, p0)
self._ExecuteAndCompareClose(
c,
arguments=[self.f64_scalar_2, self.f64_4vector],
expected=[[-4.3, 1.3, -6.3, 3.3]])
class BufferTest(ComputationTest):
"""Tests focusing on execution with Buffers."""
def testConstantSum(self):
c = self._NewComputation()
c.Add(c.ConstantF32Scalar(1.11), c.ConstantF32Scalar(3.14))
self._ExecuteAndCompareClose(c, expected=[4.25])
def testOneParameterSum(self):
c = self._NewComputation()
c.Add(c.ParameterFromNumpy(NumpyArrayF32(0.)), c.ConstantF32Scalar(3.14))
self._ExecuteAndCompareClose(
c, arguments=[NumpyArrayF32(1.11)], expected=[4.25])
def testTwoParameterSum(self):
c = self._NewComputation()
c.Add(
c.ParameterFromNumpy(NumpyArrayF32(0.)),
c.ParameterFromNumpy(NumpyArrayF32(0.)))
self._ExecuteAndCompareClose(
c,
arguments=[NumpyArrayF32(1.11),
NumpyArrayF32(3.14)],
expected=[4.25])
def testCannotCallWithDeletedBuffers(self):
c = self._NewComputation()
c.Add(c.ParameterFromNumpy(NumpyArrayF32(0.)), c.ConstantF32Scalar(3.14))
arg = NumpyArrayF32(1.11)
compiled_c = c.Build().Compile()
arg_buffer = xla_client.Buffer.from_pyval(arg)
arg_buffer.delete()
with self.assertRaises(RuntimeError):
compiled_c.Execute([arg_buffer])
def testShape(self):
pyval = np.array([[1., 2.]], np.float32)
local_buffer = xla_client.Buffer.from_pyval(pyval)
xla_shape = local_buffer.shape()
self.assertEqual(xla_shape.dimensions(), (1, 2))
self.assertEqual(np.dtype(xla_shape.element_type()), np.dtype(np.float32))
def testBlockHostUntilReadyWorks(self):
arg = np.array([[1., 2.]], np.float32)
arg_buffer = xla_client.Buffer.from_pyval(arg)
arg_buffer.block_host_until_ready()
# This test merely checks that nothing goes awry when we call
# block_host_until_ready(); it's difficult to test anything else.
def testCopyToHost(self):
arg0 = np.array([[1., 2.]], np.float32)
arg1 = np.array([[3., 4.]], np.float32)
arg0_buffer = xla_client.Buffer.from_pyval(arg0)
arg1_buffer = xla_client.Buffer.from_pyval(arg1)
# Prefetch two buffers using copy_to_host_async, and then retrieve their
# values using to_py.
arg0_buffer.copy_to_host_async()
arg0_buffer.copy_to_host_async() # Duplicate calls don't do anything.
arg1_buffer.copy_to_host_async()
np.testing.assert_equal(arg0, arg0_buffer.to_py())
np.testing.assert_equal(arg1, arg1_buffer.to_py())
# copy_to_host_async does nothing after to_py is called.
arg0_buffer.copy_to_host_async()
np.testing.assert_equal(arg0, arg0_buffer.to_py())
def testDevice(self):
x = np.arange(8)
for device in xla_client.get_local_backend().local_devices():
buf = xla_client.Buffer.from_pyval(x, device=device)
self.assertEqual(buf.device(), device)
np.testing.assert_equal(x, buf.to_py())
class SingleOpTest(ComputationTest):
"""Tests for single ops.
The goal here is smoke testing - to exercise the most basic functionality of
single XLA ops. As minimal as possible number of additional ops are added
around the op being tested.
"""
def testConcatenateF32(self):
c = self._NewComputation()
args = (
c.Constant(NumpyArrayF32([1.0, 2.0, 3.0])),
c.Constant(NumpyArrayF32([4.0, 5.0, 6.0])),
)
c.Concatenate(args, dimension=0)
self._ExecuteAndCompareClose(c, expected=[[1.0, 2.0, 3.0, 4.0, 5.0, 6.0]])
def testConcatenateF64(self):
c = self._NewComputation()
args = (
c.Constant(NumpyArrayF64([1.0, 2.0, 3.0])),
c.Constant(NumpyArrayF64([4.0, 5.0, 6.0])),
)
c.Concatenate(args, dimension=0)
self._ExecuteAndCompareClose(c, expected=[[1.0, 2.0, 3.0, 4.0, 5.0, 6.0]])
def testConvertElementType(self):
xla_types = {
np.bool: xla_client.PrimitiveType.PRED,
np.int32: xla_client.PrimitiveType.S32,
np.int64: xla_client.PrimitiveType.S64,
np.float32: xla_client.PrimitiveType.F32,
np.float64: xla_client.PrimitiveType.F64,
}
def _ConvertAndTest(template, src_dtype, dst_dtype):
c = self._NewComputation()
x = c.Constant(np.array(template, dtype=src_dtype))
c.ConvertElementType(x, xla_types[dst_dtype])
result = xla_client.execute_with_python_values(c.Build().Compile())
self.assertLen(result, 1)
expected = np.array(template, dtype=dst_dtype)
self.assertEqual(result[0].shape, expected.shape)
self.assertEqual(result[0].dtype, expected.dtype)
np.testing.assert_equal(result[0], expected)
x = [0, 1, 0, 0, 1]
for src_dtype, dst_dtype in itertools.product(xla_types, xla_types):
_ConvertAndTest(x, src_dtype, dst_dtype)
def testBitcastConvertType(self):
xla_x32_types = {
np.int32: xla_client.PrimitiveType.S32,
np.float32: xla_client.PrimitiveType.F32,
}
xla_x64_types = {
np.int64: xla_client.PrimitiveType.S64,
np.float64: xla_client.PrimitiveType.F64,
}
def _ConvertAndTest(template, src_dtype, dst_dtype, dst_etype):
c = self._NewComputation()
x = c.Constant(np.array(template, dtype=src_dtype))
c.BitcastConvertType(x, dst_etype)
result = xla_client.execute_with_python_values(c.Build().Compile())
self.assertLen(result, 1)
expected = np.array(template, src_dtype).view(dst_dtype)
self.assertEqual(result[0].shape, expected.shape)
self.assertEqual(result[0].dtype, expected.dtype)
np.testing.assert_equal(result[0], expected)
x = [0, 1, 0, 0, 1]
for xla_types in [xla_x32_types, xla_x64_types]:
for src_dtype, dst_dtype in itertools.product(xla_types, xla_types):
_ConvertAndTest(x, src_dtype, dst_dtype, xla_types[dst_dtype])
# TODO(b/123523486) implement AllToAll on CPU
def DISABLED_testAllToAllOneReplica(self):
samples = [
NumpyArrayF32([97.0]),
NumpyArrayF32([64.0, 117.0]),
NumpyArrayF32([[2.0, 3.0], [4.0, 5.0]]),
]
for lhs in samples[:1]:
c = self._NewComputation()
c.AllToAll(c.Constant(lhs), 0, 0)
self._ExecuteAndCompareExact(c, expected=[lhs])
def testCrossReplicaSumOneReplica(self):
samples = [
NumpyArrayF32(42.0),
NumpyArrayF32([97.0]),
NumpyArrayF32([64.0, 117.0]),
NumpyArrayF32([[2.0, 3.0], [4.0, 5.0]]),
]
for lhs in samples:
c = self._NewComputation()
c.CrossReplicaSum(c.Constant(lhs))
self._ExecuteAndCompareExact(c, expected=[lhs])
def testReplicaId(self):
c = self._NewComputation()
_ = c.ReplicaId()
self._ExecuteAndCompareExact(c, expected=[0])
def testCrossReplicaSumOneReplicaWithSingletonGroup(self):
samples = [
NumpyArrayF32(42.0),
NumpyArrayF32([97.0]),
NumpyArrayF32([64.0, 117.0]),
NumpyArrayF32([[2.0, 3.0], [4.0, 5.0]]),
]
for lhs in samples:
c = self._NewComputation()
c.CrossReplicaSum(c.Constant(lhs), [[0]])
self._ExecuteAndCompareExact(c, expected=[lhs])
def testDotMatrixVectorF32(self):
c = self._NewComputation()
lhs = NumpyArrayF32([[2.0, 3.0], [4.0, 5.0]])
rhs = NumpyArrayF32([[10.0], [20.0]])
c.Dot(c.Constant(lhs), c.Constant(rhs))
self._ExecuteAndCompareClose(c, expected=[np.dot(lhs, rhs)])
def testDotMatrixVectorF64(self):
c = self._NewComputation()
lhs = NumpyArrayF64([[2.0, 3.0], [4.0, 5.0]])
rhs = NumpyArrayF64([[10.0], [20.0]])
c.Dot(c.Constant(lhs), c.Constant(rhs))
self._ExecuteAndCompareClose(c, expected=[np.dot(lhs, rhs)])
def testDotMatrixMatrixF32(self):
c = self._NewComputation()
lhs = NumpyArrayF32([[2.0, 3.0], [4.0, 5.0]])
rhs = NumpyArrayF32([[10.0, 20.0], [100.0, 200.0]])
c.Dot(c.Constant(lhs), c.Constant(rhs))
self._ExecuteAndCompareClose(c, expected=[np.dot(lhs, rhs)])
def testDotMatrixMatrixF64(self):
c = self._NewComputation()
lhs = NumpyArrayF64([[2.0, 3.0], [4.0, 5.0]])
rhs = NumpyArrayF64([[10.0, 20.0], [100.0, 200.0]])
c.Dot(c.Constant(lhs), c.Constant(rhs))
self._ExecuteAndCompareClose(c, expected=[np.dot(lhs, rhs)])
def testDotGeneral(self):
c = self._NewComputation()
rng = np.random.RandomState(0)
lhs = NumpyArrayF32(rng.randn(10, 3, 4))
rhs = NumpyArrayF32(rng.randn(10, 4, 5))
dimension_numbers = (([2], [1]), ([0], [0]))
c.DotGeneral(c.Constant(lhs), c.Constant(rhs), dimension_numbers)
self._ExecuteAndCompareClose(c, expected=[np.matmul(lhs, rhs)], rtol=1e-6)
def testDotGeneralWithDotDimensionNumbersProto(self):
c = self._NewComputation()
rng = np.random.RandomState(0)
lhs = NumpyArrayF32(rng.randn(10, 3, 4))
rhs = NumpyArrayF32(rng.randn(10, 4, 5))
dimension_numbers = xla_client.DotDimensionNumbers()
dimension_numbers.lhs_contracting_dimensions.append(2)
dimension_numbers.rhs_contracting_dimensions.append(1)
dimension_numbers.lhs_batch_dimensions.append(0)
dimension_numbers.rhs_batch_dimensions.append(0)
c.DotGeneral(c.Constant(lhs), c.Constant(rhs), dimension_numbers)
self._ExecuteAndCompareClose(c, expected=[np.matmul(lhs, rhs)], rtol=1e-6)
def testDotGeneralWithPrecisionConfig(self):
c = self._NewComputation()
rng = np.random.RandomState(0)
lhs = NumpyArrayF32(rng.randn(10, 3, 4))
rhs = NumpyArrayF32(rng.randn(10, 4, 5))
dimension_numbers = (([2], [1]), ([0], [0]))
config = xla_client.PrecisionConfig()
config.operand_precision.append(config.Precision.HIGH)
config.operand_precision.append(config.Precision.HIGHEST)
c.DotGeneral(
c.Constant(lhs),
c.Constant(rhs),
dimension_numbers,
precision_config=config)
self._ExecuteAndCompareClose(c, expected=[np.matmul(lhs, rhs)], rtol=1e-6)
def testConvF32Same(self):
c = self._NewComputation()
a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32")
lhs = a(1, 2, 3, 4)
rhs = a(1, 2, 1, 2) * 10
c.Conv(
c.Constant(lhs), c.Constant(rhs), [1, 1], xla_client.PaddingType.SAME)
result = np.array([[[
[640., 700., 760., 300.],
[880., 940., 1000., 380.],
[1120., 1180., 1240., 460.],
]]])
self._ExecuteAndCompareClose(c, expected=[result])
def testConvF32Valid(self):
c = self._NewComputation()
a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32")
lhs = a(1, 2, 3, 4)
rhs = a(1, 2, 1, 2) * 10
c.Conv(
c.Constant(lhs), c.Constant(rhs), [2, 1], xla_client.PaddingType.VALID)
result = np.array([[[
[640., 700., 760.],
[1120., 1180., 1240.],
]]])
self._ExecuteAndCompareClose(c, expected=[result])
def testConvWithGeneralPaddingF32(self):
c = self._NewComputation()
a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32")
lhs = a(1, 1, 2, 3)
rhs = a(1, 1, 1, 2) * 10
strides = [1, 1]
pads = [(1, 0), (0, 1)]
lhs_dilation = (2, 1)
rhs_dilation = (1, 1)
c.ConvWithGeneralPadding(
c.Constant(lhs), c.Constant(rhs), strides, pads, lhs_dilation,
rhs_dilation)
result = np.array([[[
[0., 0., 0.],
[10., 20., 0.],
[0., 0., 0.],
[40., 50., 0.],
]]])
self._ExecuteAndCompareClose(c, expected=[result])
def testConvGeneralDilatedF32(self):
c = self._NewComputation()
a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32")
lhs = a(1, 1, 2, 3)
rhs = a(1, 1, 1, 2) * 10
strides = [1, 1]
pads = [(1, 0), (0, 1)]
lhs_dilation = (2, 1)
rhs_dilation = (1, 1)
dimension_numbers = ("NCHW", "OIHW", "NCHW")
c.ConvGeneralDilated(
c.Constant(lhs), c.Constant(rhs), strides, pads, lhs_dilation,
rhs_dilation, dimension_numbers)
result = np.array([[[
[0., 0., 0.],
[10., 20., 0.],
[0., 0., 0.],
[40., 50., 0.],
]]])
self._ExecuteAndCompareClose(c, expected=[result])
def testConvGeneralDilatedF32WithPrecisionConfig(self):
c = self._NewComputation()
a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32")
lhs = a(1, 1, 2, 3)
rhs = a(1, 1, 1, 2) * 10
strides = [1, 1]
pads = [(1, 0), (0, 1)]
lhs_dilation = (2, 1)
rhs_dilation = (1, 1)
dimension_numbers = ("NCHW", "OIHW", "NCHW")
config = xla_client.PrecisionConfig()
config.operand_precision.append(config.Precision.HIGHEST)
config.operand_precision.append(config.Precision.DEFAULT)
c.ConvGeneralDilated(
c.Constant(lhs),
c.Constant(rhs),
strides,
pads,
lhs_dilation,
rhs_dilation,
dimension_numbers,
precision_config=config)
result = np.array([[[
[0., 0., 0.],
[10., 20., 0.],
[0., 0., 0.],
[40., 50., 0.],
]]])
self._ExecuteAndCompareClose(c, expected=[result])
def testConvGeneralDilatedPermutedF32(self):
c = self._NewComputation()
a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32")
lhs = a(1, 1, 2, 3)
rhs = a(1, 1, 1, 2) * 10
strides = [1, 1]
pads = [(1, 0), (0, 1)]
lhs_dilation = (2, 1)
rhs_dilation = (1, 1)
dimension_numbers = ("NHWC", "OIHW", "CWNH")
c.ConvGeneralDilated(
c.Constant(np.transpose(lhs, (0, 2, 3, 1))), c.Constant(rhs), strides,
pads, lhs_dilation, rhs_dilation, dimension_numbers)
result = np.array([[[[0., 0., 0.], [10., 20., 0.], [0., 0., 0.],
[40., 50., 0.]]]])
self._ExecuteAndCompareClose(
c, expected=[np.transpose(result, (1, 3, 0, 2))])
def testConvGeneralDilatedGroupedConvolutionF32(self):
c = self._NewComputation()
a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32")
lhs = a(1, 2, 2, 3)
rhs = a(2, 1, 1, 2) * 10
strides = [1, 1]
pads = [(1, 0), (0, 1)]
lhs_dilation = (2, 1)
rhs_dilation = (1, 1)
dimension_numbers = ("NCHW", "OIHW", "NCHW")
feature_group_count = 2
c.ConvGeneralDilated(
c.Constant(lhs), c.Constant(rhs), strides, pads, lhs_dilation,
rhs_dilation, dimension_numbers, feature_group_count)
result = np.array([[[
[0., 0., 0.],
[10., 20., 0.],
[0., 0., 0.],
[40., 50., 0.],
], [
[0., 0., 0.],
[330., 380., 160.],
[0., 0., 0.],
[480., 530., 220.],
]]])
self._ExecuteAndCompareClose(c, expected=[result])
def testBooleanNot(self):
c = self._NewComputation()
arr = NumpyArrayBool([True, False, True])
c.Not(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=[~arr])
def testPopulationCount(self):
c = self._NewComputation()
arr = NumpyArrayS32([3, 0, 1])
c.PopulationCount(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=[np.array([2, 0, 1])])
def testCountLeadingZeros(self):
c = self._NewComputation()
arr = NumpyArrayS32([0x7FFF, 0x12345678])
c.Clz(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=[[17, 3]])
def testExp(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
c.Exp(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=[np.exp(arr)])
def testExpm1(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
c.Expm1(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=[np.expm1(arr)])
def testRound(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
c.Round(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=[np.round(arr)])
def testLog(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
c.Log(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=[np.log(arr)])
def testLog1p(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
c.Log1p(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=[
|
np.log1p(arr)
|
numpy.log1p
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Training script for permute MNIST experiment.
"""
from __future__ import print_function
import argparse
import os
import sys
import math
import time
import datetime
import numpy as np
import tensorflow as tf
from copy import deepcopy
from six.moves import cPickle as pickle
from utils.data_utils import construct_permute_mnist
from utils.er_utils import update_reservior, update_fifo_buffer, er_mem_update_hindsight, update_avg_image_vectors
from utils.utils import get_sample_weights, sample_from_dataset, update_episodic_memory, concatenate_datasets, samples_for_each_class, sample_from_dataset_icarl, average_acc_stats_across_runs, average_fgt_stats_across_runs
from utils.vis_utils import plot_acc_multiple_runs, plot_histogram, snapshot_experiment_meta_data, snapshot_experiment_eval
from model import Model
###############################################################
################ Some definitions #############################
### These will be edited by the command line options ##########
###############################################################
## Training Options
NUM_RUNS = 10 # Number of experiments to average over
TRAIN_ITERS = 5000 # Number of training iterations per task
BATCH_SIZE = 16
LEARNING_RATE = 1e-3
RANDOM_SEED = 1234
VALID_OPTIMS = ['SGD', 'MOMENTUM', 'ADAM']
OPTIM = 'SGD'
OPT_POWER = 0.9
OPT_MOMENTUM = 0.9
VALID_ARCHS = ['FC-S', 'FC-B']
ARCH = 'FC-S'
## Model options
MODELS = ['VAN', 'PI', 'EWC', 'MAS', 'RWALK', 'A-GEM', 'S-GEM', 'FTR_EXT', 'MER', 'ER-Reservoir', 'ER-Ring', 'ER-Hindsight-Anchors'] #List of valid models
IMP_METHOD = 'EWC'
SYNAP_STGTH = 75000
FISHER_EMA_DECAY = 0.9 # Exponential moving average decay factor for Fisher computation (online Fisher)
FISHER_UPDATE_AFTER = 10 # Number of training iterations for which the F_{\theta}^t is computed (see Eq. 10 in RWalk paper)
SAMPLES_PER_CLASS = 25 # Number of samples per task
INPUT_FEATURE_SIZE = 784
IMG_HEIGHT = 28
IMG_WIDTH = 28
IMG_CHANNELS = 1
TOTAL_CLASSES = 10 # Total number of classes in the dataset
EPS_MEM_BATCH_SIZE = 256
DEBUG_EPISODIC_MEMORY = False
USE_GPU = True
K_FOR_CROSS_VAL = 3
TIME_MY_METHOD = False
COUNT_VIOLATIONS = False
MEASURE_PERF_ON_EPS_MEMORY = False
# MER Specific options
MER_S = 10
MER_BETA = 0.1
MER_GAMMA = 0.1
## Logging, saving and testing options
LOG_DIR = './permute_mnist_results'
## Evaluation options
## Num Tasks
NUM_TASKS = 23
MULTI_TASK = False
def normalize_tensors(a):
num_tensors = a.shape[0]
for i in range(num_tensors):
a[i] = (a[i] - a[i].min())/ (a[i].max() - a[i].min())
return a
def get_arguments():
"""Parse all the arguments provided from the CLI.
Returns:
A list of parsed arguments.
"""
parser = argparse.ArgumentParser(description="Script for permutted mnist experiment.")
parser.add_argument("--cross-validate-mode", action="store_true",
help="If option is chosen then snapshoting after each batch is disabled")
parser.add_argument("--online-cross-val", action="store_true",
help="If option is chosen then enable the online cross validation of the learning rate")
parser.add_argument("--train-single-epoch", action="store_true",
help="If option is chosen then train for single epoch")
parser.add_argument("--eval-single-head", action="store_true",
help="If option is chosen then evaluate on a single head setting.")
parser.add_argument("--arch", type=str, default=ARCH, help="Network Architecture for the experiment.\
\n \nSupported values: %s"%(VALID_ARCHS))
parser.add_argument("--num-runs", type=int, default=NUM_RUNS,
help="Total runs/ experiments over which accuracy is averaged.")
parser.add_argument("--train-iters", type=int, default=TRAIN_ITERS,
help="Number of training iterations for each task.")
parser.add_argument("--batch-size", type=int, default=BATCH_SIZE,
help="Mini-batch size for each task.")
parser.add_argument("--random-seed", type=int, default=RANDOM_SEED,
help="Random Seed.")
parser.add_argument("--learning-rate", type=float, default=LEARNING_RATE,
help="Starting Learning rate for each task.")
parser.add_argument("--optim", type=str, default=OPTIM,
help="Optimizer for the experiment. \
\n \nSupported values: %s"%(VALID_OPTIMS))
parser.add_argument("--imp-method", type=str, default=IMP_METHOD,
help="Model to be used for LLL. \
\n \nSupported values: %s"%(MODELS))
parser.add_argument("--synap-stgth", type=float, default=SYNAP_STGTH,
help="Synaptic strength for the regularization.")
parser.add_argument("--fisher-ema-decay", type=float, default=FISHER_EMA_DECAY,
help="Exponential moving average decay for Fisher calculation at each step.")
parser.add_argument("--fisher-update-after", type=int, default=FISHER_UPDATE_AFTER,
help="Number of training iterations after which the Fisher will be updated.")
parser.add_argument("--mem-size", type=int, default=SAMPLES_PER_CLASS,
help="Number of samples per class from previous tasks.")
parser.add_argument("--eps-mem-batch", type=int, default=EPS_MEM_BATCH_SIZE,
help="Number of samples per class from previous tasks.")
parser.add_argument("--examples-per-task", type=int, default=1000,
help="Number of examples per task.")
parser.add_argument("--anchor-eta", type=float, default=0.1,
help="Neural mean embedding strength for anchor generation.")
parser.add_argument("--log-dir", type=str, default=LOG_DIR,
help="Directory where the plots and model accuracies will be stored.")
return parser.parse_args()
def train_task_sequence(model, sess, args):
"""
Train and evaluate LLL system such that we only see a example once
Args:
Returns:
dict A dictionary containing mean and stds for the experiment
"""
# List to store accuracy for each run
runs = []
batch_size = args.batch_size
if model.imp_method == 'A-GEM' or model.imp_method == 'MER' or 'ER-' in model.imp_method:
use_episodic_memory = True
else:
use_episodic_memory = False
# Loop over number of runs to average over
for runid in range(args.num_runs):
print('\t\tRun %d:'%(runid))
# Initialize the random seeds
np.random.seed(args.random_seed+runid)
time_start = time.time()
# Load the permute mnist dataset
datasets = construct_permute_mnist(model.num_tasks)
time_end = time.time()
time_spent = time_end - time_start
print('Data loading time: {}'.format(time_spent))
episodic_mem_size = args.mem_size*model.num_tasks*TOTAL_CLASSES
# Initialize all the variables in the model
sess.run(tf.global_variables_initializer())
# Run the init ops
model.init_updates(sess)
# List to store accuracies for a run
evals = []
# List to store the classes that we have so far - used at test time
test_labels = np.arange(TOTAL_CLASSES)
if use_episodic_memory:
# Reserve a space for episodic memory
episodic_images = np.zeros([episodic_mem_size, INPUT_FEATURE_SIZE])
episodic_labels = np.zeros([episodic_mem_size, TOTAL_CLASSES])
count_cls = np.zeros(TOTAL_CLASSES, dtype=np.int32)
episodic_filled_counter = 0
examples_seen_so_far = 0
if model.imp_method == 'ER-Hindsight-Anchors':
avg_img_vectors = np.zeros([TOTAL_CLASSES, INPUT_FEATURE_SIZE])
anchor_images = np.zeros([model.num_tasks*TOTAL_CLASSES, INPUT_FEATURE_SIZE])
anchor_labels = np.zeros([model.num_tasks*TOTAL_CLASSES, TOTAL_CLASSES])
anchor_count_cls = np.zeros(TOTAL_CLASSES, dtype=np.int32)
# Mask for softmax
# Since all the classes are present in all the tasks so nothing to mask
logit_mask = np.ones(TOTAL_CLASSES)
if COUNT_VIOLATIONS:
violation_count = np.zeros(model.num_tasks)
vc = 0
# Training loop for all the tasks
for task in range(len(datasets)):
print('\t\tTask %d:'%(task))
anchors_counter = task*TOTAL_CLASSES # 1 per class per task
# If not the first task then restore weights from previous task
if(task > 0):
model.restore(sess)
if MULTI_TASK:
if task == 0:
# Extract training images and labels for the current task
task_train_images = datasets[task]['train']['images']
task_train_labels = datasets[task]['train']['labels']
sample_weights = np.ones([task_train_labels.shape[0]], dtype=np.float32)
total_train_examples = task_train_images.shape[0]
# Randomly suffle the training examples
perm = np.arange(total_train_examples)
np.random.shuffle(perm)
train_x = task_train_images[perm][:args.examples_per_task]
train_y = task_train_labels[perm][:args.examples_per_task]
task_sample_weights = sample_weights[perm][:args.examples_per_task]
for t_ in range(1, len(datasets)):
task_train_images = datasets[t_]['train']['images']
task_train_labels = datasets[t_]['train']['labels']
sample_weights = np.ones([task_train_labels.shape[0]], dtype=np.float32)
total_train_examples = task_train_images.shape[0]
# Randomly suffle the training examples
perm = np.arange(total_train_examples)
np.random.shuffle(perm)
train_x = np.concatenate((train_x, task_train_images[perm][:args.examples_per_task]), axis=0)
train_y = np.concatenate((train_y, task_train_labels[perm][:args.examples_per_task]), axis=0)
task_sample_weights = np.concatenate((task_sample_weights, sample_weights[perm][:args.examples_per_task]), axis=0)
perm = np.arange(train_x.shape[0])
np.random.shuffle(perm)
train_x = train_x[perm]
train_y = train_y[perm]
task_sample_weights = task_sample_weights[perm]
else:
# Skip training for this task
continue
else:
# Extract training images and labels for the current task
task_train_images = datasets[task]['train']['images']
task_train_labels = datasets[task]['train']['labels']
# Assign equal weights to all the examples
task_sample_weights = np.ones([task_train_labels.shape[0]], dtype=np.float32)
total_train_examples = task_train_images.shape[0]
# Randomly suffle the training examples
perm = np.arange(total_train_examples)
|
np.random.shuffle(perm)
|
numpy.random.shuffle
|
from multiprocessing import Manager, Pool, Process
from matplotlib import pyplot as plt
from numba import njit
import numpy as np
import pandas as pd
import random
from scipy.optimize import minimize
import time
class LPPLS(object):
def __init__(self, observations):
"""
Args:
observations (np.array,pd.DataFrame): 2xM matrix with timestamp and observed value.
"""
assert isinstance(observations, (np.ndarray, pd.DataFrame)), \
f'Expected observations to be <pd.DataFrame> or <np.ndarray>, got :{type(observations)}'
self.observations = observations
self.coef_ = {}
self.indicator_result_list = None
self.indicator_confs_medians = None
@staticmethod
@njit
def lppls(t, tc, m, w, a, b, c1, c2):
return a + np.power(tc - t, m) * (b + ((c1 * np.cos(w * np.log(tc - t))) + (c2 * np.sin(w * np.log(tc - t)))))
def func_restricted(self, x, *args):
"""
Finds the least square difference.
See https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize.html
Args:
x(np.ndarray): 1-D array with shape (n,).
args: Tuple of the fixed parameters needed to completely specify the function.
Returns:
(float)
"""
tc = x[0]
m = x[1]
w = x[2]
obs = args[0]
a, b, c1, c2 = self.matrix_equation(obs, tc, m, w)
delta = [self.lppls(t, tc, m, w, a, b, c1, c2) for t in obs[0, :]]
delta = np.subtract(delta, obs[1, :])
delta =
|
np.power(delta, 2)
|
numpy.power
|
# -*- coding:utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
def degree(*args):
v =
|
np.subtract(args[1], args[0])
|
numpy.subtract
|
import numpy as np
import warnings
from collections import OrderedDict, namedtuple
from scipy.integrate import odeint, solve_ivp
from scipy.optimize import minimize
from numba import jit
FitResult = namedtuple('FitResult', ['C', 'y0', 'R2', 'MRPD', 'success'])
@jit(nopython=True)
def _gillespie(tmax, y0, C, i1, i2, i3, i4, sN, tblock=100):
nC = len(C)
indsC = range(nC)
traj = np.zeros((sN, tblock, len(y0)+1))
for i in range(sN):
y = y0.copy()
yext = np.concatenate((y, np.ones((1,))))
t_s = 0
t_i = 0
traj[i, 0, 1:] = y
while t_s < tmax:
w = C*yext[i1]*yext[i2]
Wtot = np.sum(w)
if Wtot == 0:
# Then it's finished
break
dt = -np.log(np.random.random())/Wtot
t_s += dt
t_i += 1
C_i = np.where(np.random.random() < np.cumsum(w)/Wtot)[0][0]
yext[i3[C_i]] -= 1.
yext[i4[C_i]] += 1.
yext[-1] = 1.
if t_i >= traj.shape[1]:
traj = np.concatenate((traj,
np.zeros((sN, tblock, len(y0)+1))),
axis=1)
traj[i, t_i, 0] = t_s
traj[i, t_i, 1:] = yext[:-1]
return traj
class CModel(object):
"""CModel
A compartment model. Has a number of states and evolves in time with three
types of terms:
* Constant rate
d y_i / d_t = D^(0)_i
* Linear rate
d y_i / d_t = D^(1)_ij y_j
* Quadratic rate
d y_i / d_t = D^(2)_ijk y_j y_k
"""
def __init__(self, states=''):
"""Initialise a CModel
Initialise a compartment model to have the given states.
Keyword Arguments:
states {str or list} -- States of the model. Can be a string,
in which case each state will be named by
one letter, or a list of strings. State
names must be distinct (default: {''})
"""
# Initialise states
self._N = len(states)
self._states = list(states)
if len(set(states)) != self._N:
raise ValueError('State list has repeated entries')
# Now, the couplings
self._D0 = np.zeros((self._N))
self._D0m = self._D0.copy().astype(bool) # Mask
self._D1 = np.zeros((self._N, self._N))
self._D1m = self._D1.copy().astype(bool) # Mask
self._D2 = np.zeros((self._N, self._N, self._N))
self._D2m = self._D2.copy().astype(bool) # Mask
self._couplings = OrderedDict()
self._cdata = {
'C': np.zeros(0),
'i': np.zeros((0, 4)).astype(int)
}
@property
def size(self):
return self._N
@property
def states(self):
return list(self._states)
@property
def couplings(self):
return OrderedDict(self._couplings)
def set_coupling_rate(self, descr, C=0, name=None):
"""Set a coupling between states
Set a coupling between two states of the model. The coupling can be
of three types:
* CONSTANT
A constant rate of growth for a certain state of the
model.
* LINEAR
A rate of growth that is linearly proportional to the population
of the same or another state.
* QUADRATIC
A rate of growth that is proportional to the product of the
populations of two states.
The coupling is defined by a descriptor string. This can have up to
four states:
"S1*S2:S3=>S4"
defines a quadratic coupling, controlled by the S1*S2 product, from S3
to S4. The argument C controls the coupling. This means having:
dS3/dt = -C*S1*S2
dS4/dt = C*S1*S2
If S3 and S4 are both missing, S3=S1 and S4=S2 is assumed. If one of
S1 or S2 is missing, the coupling is taken to be linear, proportional
to the one that's present. If both are missing, the growth is taken
to be constant. Sign is controlled by the arrow; by convention, S4
sees a term proportional to C, and S3 to -C. Either term can be
missing, in which case we have a source or sink.
Examples:
"S1:=>S4"
results in
dS4/dt = C*S1
"S3=>S4"
results in
dS3/dt = -C
dS4/dt = C
and so on.
Arguments:
descr {str} -- Descriptor string for the coupling.
Keyword Arguments:
C {number} -- Coupling constant (default: {0})
name {str} -- Name of coupling (default: {None})
"""
# Parse the description
if ':' in descr:
s12, s34 = descr.split(':')
elif '=>' in descr:
s12 = None
s34 = descr
else:
s12 = descr
s34 = None
if s12 is None or s12 == '':
s1 = None
s2 = None
else:
if '*' in s12:
s1, s2 = s12.split('*')
s1 = None if s1 == '' else s1
s2 = None if s2 == '' else s2
if s1 is None:
s1, s2 = s2, s1
else:
s1 = s12
s2 = None
if s34 is None or s34 == '':
s3 = None
s4 = None
else:
if '=>' in s34:
s3, s4 = s34.split('=>')
s3 = None if s3 == '' else s3
s4 = None if s4 == '' else s4
else:
s3 = None
s4 = s34
if not all(s in self._states for s in (s1, s2, s3, s4)
if s is not None):
raise(ValueError('Invalid state names used in coupling '
'definition'))
# What kind of coupling is it?
i1 = self._states.index(s1)
i2 = self._states.index(s2) if s2 is not None else self._N
i3 = self._states.index(s3) if s3 is not None else self._N
i4 = self._states.index(s4) if s4 is not None else self._N
if i3+i4 == 2*self._N:
if i2 == self._N:
i4 = i1
else:
i3 = i1
i4 = i2
if name is None:
descr = "{0}*{1}:{2}=>{3}".format(*[s if s is not None else ''
for s in (s1, s2, s3, s4)])
name = descr
if name in self._couplings:
raise ValueError('Coupling {0} already exists '
'for model'.format(name))
self._couplings[name] = (descr, C)
self._cdata['C'] = np.concatenate([self._cdata['C'], [C]])
self._cdata['i'] = np.concatenate(
[self._cdata['i'], [[i1, i2, i3, i4]]], axis=0)
def edit_coupling_rate(self, name, C):
"""Change the coupling rate for an existing coupling
Change the coupling rate for an existing coupling
Arguments:
name {str} -- Name of the coupling
C {number} -- New value
"""
names = list(self._couplings.keys())
try:
i = names.index(name)
except ValueError:
raise ValueError('No coupling with name {0} exists'.format(name))
descr, _ = self._couplings[name]
self._couplings[name] = (descr, C)
self._cdata['C'][i] = C
def dy_dt(self, y):
"""Time derivative from a given state
Compute the time derivative of the model for a given state vector.
Arguments:
y {np.ndarray} -- State vector
Returns:
[np.ndarray] -- Time derivative of y
"""
yext = np.concatenate([y, [1]])
dydt = yext*0.
C = self._cdata['C']
i1, i2, i3, i4 = self._cdata['i'].T
cy = C*yext[i1]*yext[i2]
np.add.at(dydt, i3, -cy)
np.add.at(dydt, i4, cy)
return dydt[:-1]
def d2y_dtdC(self, y, dydC):
"""Time derivative of the model's gradient
Time derivative of the gradient of the model's state with respect
to all its parameters.
Arguments:
y {np.ndarray} -- State vector
dydC {np.ndarray} -- Gradient of y w.r.t. coupling parameters
"""
C = self._cdata['C']
nC = len(C)
i1, i2, i3, i4 = self._cdata['i'].T
yext = np.concatenate([y, [1]])
dydCext = dydC.reshape(-1, self._N)
if dydCext.shape[0] != nC:
raise ValueError('Invalid size gradient passed to d2y_dtdC')
dydCext = np.concatenate(
[dydCext, np.zeros((nC, 1))], axis=1)
d2ydtdC = dydCext*0.
dcy1 = yext[i1]*yext[i2]
np.add.at(d2ydtdC, (range(nC), i3), -dcy1)
np.add.at(d2ydtdC, (range(nC), i4), dcy1)
iC = np.repeat(np.arange(nC), nC)
i1t = np.tile(i1, nC)
i2t = np.tile(i2, nC)
i3t = np.tile(i3, nC)
i4t = np.tile(i4, nC)
dcy2 = np.tile(C, nC)*(dydCext[iC, i1t]*yext[i2t] +
dydCext[iC, i2t]*yext[i1t])
np.add.at(d2ydtdC, (iC, i3t), -dcy2)
np.add.at(d2ydtdC, (iC, i4t), dcy2)
d2ydtdC = d2ydtdC[:, :-1].reshape(-1)
return d2ydtdC
def d2y_dtdy0(self, y, dydy0):
"""Time derivative of the model's gradient w.r.t initial conditions
Time derivative of the gradient of the model's state with respect
to its initial conditions.
Arguments:
y {np.ndarray} -- State vector
dydy0 {np.ndarray} -- Gradient of y w.r.t. initial conditions
"""
C = self._cdata['C']
nC = len(C)
i1, i2, i3, i4 = self._cdata['i'].T
yext = np.concatenate([y, [1]])
dydy0ext = dydy0.reshape(-1, self._N)
if dydy0ext.shape[0] != self._N:
raise ValueError('Invalid size gradient passed to d2y_dtdy0')
dydy0ext = np.concatenate(
[dydy0ext, np.zeros((self._N, 1))], axis=1)
d2ydtdy0 = dydy0ext*0.
iy0 = np.repeat(np.arange(self._N), nC)
i1t = np.tile(i1, self._N)
i2t = np.tile(i2, self._N)
i3t = np.tile(i3, self._N)
i4t = np.tile(i4, self._N)
dcy = np.tile(C, self._N)*(dydy0ext[iy0, i1t]*yext[i2t] +
dydy0ext[iy0, i2t]*yext[i1t])
np.add.at(d2ydtdy0, (iy0, i3t), -dcy)
np.add.at(d2ydtdy0, (iy0, i4t), dcy)
d2ydtdy0 = d2ydtdy0[:, :-1].reshape(-1)
return d2ydtdy0
def integrate(self, t, y0, use_gradient=False, events=None, max_step=np.inf):
"""Integrate the ODEs of the model
Carry out an integration of the system of ODEs representing
the model using the scipy.integrate.solve_ivp method.
Arguments:
t {np.ndarray} -- Array of times for the integration
y0 {np.ndarray} -- Starting state
Keyword Arguments:
use_gradient {bool} -- If True, also compute the
gradient of the curves w.r.t parameters and initial conditions
(default: {False})
events {[fun(t, y)]} -- List of functions of t, y (where y is the
array of states, or states + gradients if use_gradient is True)
at which special events happen and are recorded, or can terminate
integration. See scipy's documentation for solve_ivp for further
details (default: {None})
Returns:
OrderedDict -- Dictionary of the computed trajectories, with the
following keys:
- y: trajectory of the main populations
- dy/d([coupling name]): derivatives wrt coupling constants
- dy/d([state name]0): derivatives wrt starting conditions
- t: times of integration. Usually the entire model.t, but can
be less in presence of terminating events.
- t_events: times of events.
The derivatives are present only if use_gradient = True. The event
times are present only if events is not None.
"""
if not use_gradient:
def ode(t, y):
return self.dy_dt(y)
sol = solve_ivp(ode, [t[0], t[-1]], y0, t_eval=t,
events=events, max_step=max_step)
traj = sol.y.T
ans = OrderedDict({'y': traj})
else:
N2 = self._N**2
def ode(t, y):
dydt = y*0.
dydt[:self._N] = self.dy_dt(y[:self._N])
dydt[self._N:-N2] = self.d2y_dtdC(y[:self._N],
y[self._N:-N2])
dydt[-N2:] = self.d2y_dtdy0(y[:self._N], y[-N2:])
return dydt
nC = len(self._couplings)
y0 = np.concatenate([y0, np.zeros(nC*self._N),
np.eye(self._N).reshape((-1,))])
sol = solve_ivp(ode, [t[0], t[-1]], y0, t_eval=t,
events=events, max_step=max_step)
traj = sol.y.T
ans = OrderedDict({'y': traj[:, :self._N]})
for i, name in enumerate(self._couplings.keys()):
ans['dy/d({0})'.format(name)] = traj[:,
(i+1)*self._N:(i+2) *
self._N]
i0 = self._N*(nC+1)
for i, name in enumerate(self._states):
ans['dy/d({0}0)'.format(name)] = traj[:,
i*self._N+i0:(i+1) *
self._N+i0]
ans['t'] = sol.t
if events is not None:
ans['t_events'] = sol.t_events
return ans
def gillespie(self, tmax, y0, samples=1000):
"""Perform a Gillespie stochastic simulation
Simulate the system stochastically with Gillespie's algorithm.
Arguments:
tmax {float} -- Maximum time for the simulation
y0 {np.ndarray} -- Starting state
Keyword Arguments:
samples {number} -- Number of trajectories to sample
(default: {1000})
Returns:
OrderedDict -- Dictionary of the computed trajectories, with the
following keys:
- t: times at which the state is computed, for each
trajectory
- y: trajectories of the main populations
"""
# Grab coupling data
C = self._cdata['C']
i1, i2, i3, i4 = self._cdata['i'].T
traj = _gillespie(tmax, y0, C, i1, i2, i3, i4, samples)
tmax_i = np.argmax(traj[:, :, 0], axis=1)
# Fill in the rest
for i, tmi in enumerate(tmax_i):
traj[i, tmi:, 0] = tmax
traj[i, tmi:, 1:] = traj[i, tmi, None, 1:]
ans = OrderedDict({'t': traj[:, :, 0]})
ans['y'] = traj[:, :, 1:]
return ans
def fit(self, data, steps=1000, constraints={}):
"""Fit the model to data
Fit the model's parameters to a given data set, minimising the
Root Sum Square of the error. This modifies the couplings of the model
instance, setting them to the fitted values.
Arguments:
data {list} -- A list of the data points available. Can be a list
of dictionaries or arrays. If it's dictionaries, they must all
have a member `t' (for the time of the point) and then can have
members with the names of various compartments of the model. If
it's arrays, then they must have N+1 elements for N compartments;
the first is time, and the latter are the compartments, in order.
Missing values can be set to NaN.
Keyword Arguments:
steps {number} -- Number of integration steps to split the time
interval into (default: {1000})
constraints {dict} -- Dictionary whose keys must be valid names of
coupling constants or X0 with X a valid name of a state, and whose
values must be numbers.
These constants, or the initial conditions for those states,
will be fixed to those values and not allowed to change.
Returns:
FitResult - Named tuple containing the following members:
- C: fitted constants for each coupling
- y0: fitted optimal starting state
- R2: array of R squared values for goodness of fit, one
for each curve (can be R2 > 1 due to this)
- MRPD: Mean Relative Percent Difference, total
- success: if True, the fitting has converged to a final value.
Raises:
ValueError -- Thrown if data is empty or invalid in format.
"""
# Start by putting data in the right format
dN = len(data)
if dN == 0:
raise ValueError('No data passed to fit')
data_raw = data
data = []
for d in data_raw:
if type(d) is dict:
if 't' not in d:
raise ValueError('Data in dictionary format must have a '
't label')
dv = np.zeros(self._N+1)
dv[0] = d['t']
for i, s in enumerate(self._states):
dv[i+1] = d.get(s, np.nan)
else:
dv = np.array(d)
dv = np.where(dv == None, np.nan, dv)
data.append(dv)
data = np.array(data).astype(float)
data = data[np.argsort(data, axis=0)[:, 0]] # Sorting by time
# Define the time range
t = np.linspace(data[0, 0], data[-1, 0], steps)
t = np.concatenate([t, data[:, 0]])
# Now sort, and identify the data points
tsort = np.argsort(t)
t = t[tsort]
# Make it unique
t, uniq_inv = np.unique(t, return_inverse=True)
data_i = np.where(tsort - steps >= 0)[0]
C = self._cdata['C']
nC = len(C)
# Get constraints mask
mask = np.ones(nC+self._N)
y0 = np.ones(self._N)*np.nan
for i, name in enumerate(self._couplings.keys()):
if name in constraints:
self.edit_coupling_rate(name, constraints[name])
mask[i] = 0
for i, name in enumerate(self._states):
if name + '0' in constraints:
y0[i] = constraints[name + '0']
mask[nC+i] = 0
# Cost function
def costf(x):
self._cdata['C'] = x[:nC]
y0 = x[nC:]
traj = self.integrate(t, y0, use_gradient=True)
traj = np.array(list(traj.values())[:-1])
# Rebuild original array
traj = traj[:, uniq_inv, :]
# Gradient?
yref = traj[:, data_i, :]
err = (data[:, 1:]-yref[0])
err = np.where(np.isnan(data[:, 1:]), 0, err)
cost = np.sum(err**2)
return cost
# Gradient of cost function
def costfgrad(x):
self._cdata['C'] = x[:nC]
y0 = x[nC:]
traj = self.integrate(t, y0, use_gradient=True)
traj = np.array(list(traj.values())[:-1])
# Rebuild original array
traj = traj[:, uniq_inv, :]
# Gradient?
yref = traj[:, data_i, :]
err = (yref[0]-data[:, 1:])
err = np.where(np.isnan(data[:, 1:]), 0, err)
return np.sum(2*err[None]*yref[1:, :, :]*mask[:, None, None],
axis=(1, 2))
x0 = data[0, 1:]
x0 = np.where(np.isnan(x0), 0, x0)
x0 = np.where(np.isnan(y0), x0, y0) # Apply constraints
x0 = np.concatenate([self._cdata['C'], x0])
sol = minimize(costf, x0, jac=costfgrad)
x = sol.x
for i, name in enumerate(self._couplings.keys()):
self.edit_coupling_rate(name, x[i])
y0 = x[nC:]
traj = self.integrate(t, y0)['y']
traj = traj[uniq_inv, :]
# R2?
yref = traj[data_i, :]
err = (yref-data[:, 1:])
dataMask = 1.-np.isnan(data[:, 1:])
dataN =
|
np.sum(dataMask, axis=0)
|
numpy.sum
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import random
import numpy as np
import pytest
import quaternion
import habitat
from habitat.config.default import get_config
from habitat.tasks.nav.nav import (
MoveForwardAction,
NavigationEpisode,
NavigationGoal,
)
from habitat.tasks.utils import quaternion_rotate_vector
from habitat.utils.geometry_utils import angle_between_quaternions
from habitat.utils.test_utils import sample_non_stop_action
from habitat.utils.visualizations.utils import (
images_to_video,
observations_to_image,
)
def _random_episode(env, config):
random_location = env._sim.sample_navigable_point()
random_heading = np.random.uniform(-np.pi, np.pi)
random_rotation = [
0,
np.sin(random_heading / 2),
0,
np.cos(random_heading / 2),
]
env.episode_iterator = iter(
[
NavigationEpisode(
episode_id="0",
scene_id=config.SIMULATOR.SCENE,
start_position=random_location,
start_rotation=random_rotation,
goals=[],
)
]
)
def test_state_sensors():
config = get_config()
if not os.path.exists(config.SIMULATOR.SCENE):
pytest.skip("Please download Habitat test data to data folder.")
config.defrost()
config.TASK.SENSORS = ["HEADING_SENSOR", "COMPASS_SENSOR", "GPS_SENSOR"]
config.freeze()
env = habitat.Env(config=config, dataset=None)
env.reset()
random.seed(123)
np.random.seed(123)
for _ in range(100):
random_heading = np.random.uniform(-np.pi, np.pi)
random_rotation = [
0,
np.sin(random_heading / 2),
0,
np.cos(random_heading / 2),
]
env.episode_iterator = iter(
[
NavigationEpisode(
episode_id="0",
scene_id=config.SIMULATOR.SCENE,
start_position=[03.00611, 0.072_447, -2.67867],
start_rotation=random_rotation,
goals=[],
)
]
)
obs = env.reset()
heading = obs["heading"]
assert np.allclose(heading, [random_heading])
assert np.allclose(obs["compass"], [0.0], atol=1e-5)
assert
|
np.allclose(obs["gps"], [0.0, 0.0], atol=1e-5)
|
numpy.allclose
|
import os
import sys
sys.path.append(os.path.join('..', 'codes'))
def confusion_all_matrix(epoch=20, saved=True,
weight_path='../datas/vis_down/outputs/learned/',
outputs_path='../datas/vis_down/outputs/check/'):
'''
正例・unknown・負例についてconfusion_matrixを作成
'''
# -------------------------------------------------------------------------
# 準備
import numpy as np
import os
import torch
from mmm import DataHandler as DH
from mmm import DatasetFlickr
from mmm import VisGCN
from mmm import VisUtils as VU
from torchvision import transforms
from tqdm import tqdm
os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
# データの読み込み先
input_path = '../datas/vis_down/inputs/'
category = DH.loadJson('category.json', input_path)
rep_category = DH.loadJson('upper_category.json', input_path)
vis_down_train = VU.down_anno(category, rep_category, 'train')
vis_down_validate = VU.down_anno(category, rep_category, 'validate')
transform = transforms.Compose(
[
transforms.RandomResizedCrop(
224, scale=(1.0, 1.0), ratio=(1.0, 1.0)
),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]
)
]
)
kwargs_DF = {
'train': {
'category': category,
'annotations': vis_down_train,
'transform': transform,
'image_path': input_path + 'images/train/'
},
'validate': {
'category': category,
'annotations': vis_down_validate,
'transform': transform,
'image_path': input_path + 'images/validate/'
}
}
train_dataset = DatasetFlickr(**kwargs_DF['train'])
val_dataset = DatasetFlickr(**kwargs_DF['validate'])
num_class = train_dataset.num_category()
# maskの読み込み
mask = VU.down_mask(rep_category, category, sim_thr=0.4, saved=False)
# 学習で用いるデータの設定や読み込み先
gcn_settings = {
'category': category,
'rep_category': rep_category,
'relationship': DH.loadPickle('relationship.pickle', input_path),
'rep_weight': torch.load(input_path + 'rep_weight.pth'),
'feature_dimension': 2048
}
# modelの設定
model = VisGCN(
class_num=num_class,
weight_decay=1e-4,
fix_mask=mask,
network_setting=gcn_settings,
)
model.loadmodel('{0:0=3}weight'.format(epoch), weight_path)
def _update_backprop_weight(labels, fmask):
'''
誤差を伝播させる際の重みを指定.誤差を伝播させない部分は0.
'''
labels = labels.data.cpu().numpy()
labels_y, labels_x = np.where(labels == 1)
labels_y = np.append(labels_y, labels_y[-1] + 1)
labels_x = np.append(labels_x, 0)
weight = np.zeros((labels.shape[0] + 1, labels.shape[1]), int)
row_p, columns_p = labels_y[0], [labels_x[0]]
weight[row_p] = fmask[labels_x[0]]
for row, column in zip(labels_y[1:], labels_x[1:]):
if row == row_p:
columns_p.append(column)
else:
if len(columns_p) > 1:
for y in columns_p:
weight[row_p][y] = 0
row_p, columns_p = row, [column]
weight[row] = weight[row] | fmask[column]
weight = weight[:-1]
weight = np.ones(labels.shape, int) - weight
return weight
# ---入力画像のタグから振り分け-----------------------------------------------
# 0: precision, 1: recall, 2: positive_1, 3: positive_all,
# 4: unknown_1, 5: unknown_all, 6: negative_1, 7: negative_all
def count_result(dataset):
from mmm import MakeBPWeight
loader = torch.utils.data.DataLoader(
dataset,
shuffle=True,
batch_size=1,
num_workers=4
)
bp_weight = MakeBPWeight(dataset, len(category), mask)
allnum = 0
counts = np.zeros((len(category), 8))
for locate, label, _ in tqdm(loader):
allnum += 1
fix_mask = _update_backprop_weight(label, mask)
predicts = model.predict(locate, labeling=True)
for idx, flg in enumerate(fix_mask[0]):
# あるクラスcategory[idx]について
if flg == 0:
# 正解がunknownのとき
if predicts[0][idx] == 1:
# 予測が1であれば
counts[idx][4] += 1
continue
if label[0][idx] == 0:
# 正解が0のとき
if predicts[0][idx] == 1:
# 予測が1であれば
counts[idx][6] += 1
else:
# 正解が1のとき
if predicts[0][idx] == 1:
# 予測が1であれば
counts[idx][2] += 1
for idx, (zero, one) in enumerate(bp_weight):
counts[idx][3] = one
counts[idx][5] = allnum - one - zero
counts[idx][7] = zero
if counts[idx][2] + counts[idx][6] != 0:
counts[idx][0] = counts[idx][2] / (counts[idx][2] + counts[idx][6])
if counts[idx][3] != 0:
counts[idx][1] = counts[idx][2] / counts[idx][3]
return counts
train_counts = count_result(train_dataset)
validate_counts = count_result(val_dataset)
if saved:
DH.saveNpy(
np.array(train_counts),
'cm_train_{0:0=3}'.format(epoch),
outputs_path
)
DH.saveNpy(
np.array(validate_counts),
'cm_validate_{0:0=3}'.format(epoch),
outputs_path
)
return np.array(train_counts), np.array(validate_counts)
def rep_confmat(saved=False, output_path='../datas/vis_down/outputs/check/'):
'''
トレーニング前後での精度の変化、各クラス、各クラスの上位クラス
を一覧にしたリストの作成
'''
import numpy as np
from mmm import DataHandler as DH
from tqdm import tqdm
epoch00 = DH.loadNpy(
'cm_train_000.npy',
'../datas/vis_down/outputs/check/learned_lmask/'
)
epoch20 = DH.loadNpy(
'cm_train_020.npy',
'../datas/vis_down/outputs/check/learned_lmask/'
)
category = DH.loadJson('../datas/vis_down/inputs/category.json')
rep_category = DH.loadJson('../datas/vis_down/inputs/upper_category.json')
ldf = DH.loadPickle('../datas/bases/local_df_area16_wocoth_new.pickle')
# -------------------------------------------------------------------------
# 0: label, 1: rep
# 2 ~ 9: confusion_all_matrix of epoch 0
# 10 ~ 17: confusion_all matrix of epoch 20
compare_list = []
for idx, cat in tqdm(enumerate(category)):
if cat in rep_category:
continue
row = [cat, ldf.loc[cat]['representative']]
row.extend(epoch00[idx])
row.extend(epoch20[idx])
compare_list.append(row)
compare_list = np.array(compare_list, dtype=object)
if saved:
DH.saveNpy(compare_list, 'compare_pr', output_path)
return compare_list
def hist_change(phase='train', barnum=3, binnum=5, saved=False):
import matplotlib.pyplot as plt
import numpy as np
import os
from mmm import DataHandler as DH
plt.rcParams['font.family'] = 'IPAexGothic'
# -------------------------------------------------------------------------
data = DH.loadNpy('../datas/vis_down/outputs/check/compare_pr.npy')
before = [[] for _ in range(barnum)]
after = [[] for _ in range(barnum)]
ml = 0
for item in data:
idx = len(item[1]) - 1
ml = max(idx, ml)
idx = min(idx, barnum - 1)
before[idx].append(item[3])
after[idx].append(item[11])
before = [item for item in before if item]
after = [item for item in after if item]
barnum = len(before)
before_heights = np.zeros((barnum, binnum))
after_heights = np.zeros((barnum, binnum))
thresholds = np.linspace(0.0, 1.0, binnum + 1)
for idx, item in enumerate(before):
bin_heights =
|
np.histogram(item, bins=binnum, range=(0.0, 1.0))
|
numpy.histogram
|
import h5py, code
import numpy as np
import pandas as pd
from pysnptools.snpreader import Bed
from scipy.stats import norm
from pandas import DataFrame
from tqdm import tqdm
import statsmodels.api as sm
from scipy.stats import linregress
def imputation_test(chromosomes,
imputed_prefix = '',
expected_prefix = "../UKBioRDE_revision/data/tmp/filtered_ukb_chr",
start = None,
end = None,
backup_threshold=0.01,
genotyping_error_threshold=0.01,
outname_prefix = "",
):
#Data files for chromosome i should be named in this fashion: "prefix{i}"
import matplotlib.pyplot as plt
chromosomes_expected_genes_o = []
chromosomes_expected_genes_pm = []
chromosomes_imputed_genes_o = []
chromosomes_imputed_genes_pm = []
chrom_sizes = []
parent_ratio_backups = []
sib_ratio_backups = []
estimated_genotyping_errors = []
for chromosome in tqdm(chromosomes):
with h5py.File(imputed_prefix+str(chromosome)+".hdf5",'r') as f:
gts = np.array(f["imputed_par_gts"]).astype(float)
fids = np.array(f["families"]).astype(str)
ped_array = np.array(f["pedigree"]).astype(str)
ped = pd.DataFrame(ped_array[1:], columns = ped_array[0])
non_duplicates = np.array(f["non_duplicates"])
parent_ratio_backup = np.array(f["parent_ratio_backup"])
sib_ratio_backup = np.array(f["sib_ratio_backup"])
estimated_genotyping_error = np.array(f["estimated_genotyping_error"])
rsids = np.array(f["bim_values"])[:,1]
if "maf_x" in f:
maf_x = np.array(f["maf_x"])
maf_coefs = np.array(f["maf_coefs"])
maf_TSS = np.array(f["maf_TSS"])
maf_RSS1 = np.array(f["maf_RSS1"])
maf_RSS2 = np.array(f["maf_RSS2"])
maf_R2_1 = np.array(f["maf_R2_1"])
maf_R2_2 = np.array(f["maf_R2_2"])
maf_larger1 = np.array(f["maf_larger1"])
maf_less0 = np.array(f["maf_less0"])
#TODO concat these across chroms
#Fix backup ratio start end
if start is not None:
non_duplicates = non_duplicates+start
parent_ratio_backups.append(parent_ratio_backup)
sib_ratio_backups.append(sib_ratio_backup)
estimated_genotyping_errors.append(estimated_genotyping_error)
expected = Bed(expected_prefix+str(chromosome)+".bed", count_A1 = True)
expected_gts = expected[:, non_duplicates].read().val.astype(float)
chrom_sizes.append(gts.shape[1])
expected_ids = expected.iid
iid_to_bed_index = {i:index for index, i in enumerate(expected_ids[:,1])}
#fids of control families start with _
#this has the predix _*_
index_of_families_in_imputation = {fid:index for index,fid in enumerate(fids)}
# no parent control starts with _o_
# only has father control starts with _p_
# only has father control starts with _m_
control_o_families = list({row["FID"][3:] for index, row in ped.iterrows() if row["FID"].startswith("_o_")})
#for each family select id of the parents
parent_ids = ped.groupby("FID").agg({
'FATHER_ID':lambda x: ([a for a in list(x) if a in ped["IID"].tolist()]+[None])[0],
'MOTHER_ID':lambda x: ([a for a in list(x) if a in ped["IID"].tolist()]+[None])[0],
})
parents_of_control_o_families = parent_ids.loc[control_o_families]
mother_indexes_control_o = [iid_to_bed_index[parents_of_control_o_families.loc[i, "MOTHER_ID"]] for i in control_o_families]
father_indexes_control_o = [iid_to_bed_index[parents_of_control_o_families.loc[i, "FATHER_ID"]] for i in control_o_families]
expected_parent_gts_control_o = (expected_gts[mother_indexes_control_o,:]+expected_gts[father_indexes_control_o,:])/2
expected_genes_o = expected_parent_gts_control_o
index_of_control_families_in_imputation_o = [index_of_families_in_imputation["_o_"+i] for i in control_o_families]
imputed_genes_o = gts[index_of_control_families_in_imputation_o,:]
control_p = list({row["FID"][3:] for index, row in ped.iterrows() if row["FID"].startswith("_p_")})
control_m = list({row["FID"][3:] for index, row in ped.iterrows() if row["FID"].startswith("_m_")})
control_pm_families = control_p + control_m
parent_of_control_m = parent_ids.loc[control_m]
parent_of_control_p = parent_ids.loc[control_p]
father_indexes_control_m = [iid_to_bed_index[parent_of_control_m.loc[i, "FATHER_ID"]] for i in control_m]
mother_indexes_control_p = [iid_to_bed_index[parent_of_control_p.loc[i, "MOTHER_ID"]] for i in control_p]
expected_parent_gts_control_pm = expected_gts[mother_indexes_control_p + father_indexes_control_m, :]
expected_genes_pm = expected_parent_gts_control_pm
index_of_control_families_in_imputation_pm = [index_of_families_in_imputation["_p_" + i] for i in control_p] + [index_of_families_in_imputation["_m_" + i] for i in control_m]
imputed_genes_pm = gts[index_of_control_families_in_imputation_pm,:]
chromosomes_expected_genes_o.append(expected_genes_o)
chromosomes_expected_genes_pm.append(expected_genes_pm)
chromosomes_imputed_genes_o.append(imputed_genes_o)
chromosomes_imputed_genes_pm.append(imputed_genes_pm)
whole_expected_genes_o = np.hstack(chromosomes_expected_genes_o)
whole_imputed_genes_o = np.hstack(chromosomes_imputed_genes_o)
whole_expected_genes_pm = np.hstack(chromosomes_expected_genes_pm)
whole_imputed_genes_pm = np.hstack(chromosomes_imputed_genes_pm)
parent_ratio_backup = np.hstack(parent_ratio_backups)
sib_ratio_backup =
|
np.hstack(sib_ratio_backups)
|
numpy.hstack
|
# !/usr/bin/env python3
import re
import os
import itertools
import pandas as pd
import numpy as np
import scipy.special as spc
import collections as cl
import scipy.stats as sta
import PCprophet.io_ as io
import PCprophet.stats_ as st
import PCprophet.parse_go as go_parser
# datatype which we use for mapping protein ids to a corresponding
# feature and label representation.
DataRec = cl.namedtuple("DataRec", "X y")
class BayesMANOVA:
"""
BayesMANOVA provides Bayesian calculations for differential
regulation for sets of variables. This may be used for identifying
differentially regulated gene expression time courses or
differentially regulated proteins based on protein characterising
feature vectors. The method extends also to biological entities
(pathways, biological processes or similar). It is in particular
also useful for assessing differential regulation of protein
complexes. To avoid excessive model parameters the model
assumptions are simple (no correlation among the multivariate
response dimensions). Improvements are possible though time
consuming.
"""
def yok(y):
# test whether we have replicates in all levels of y
lvs = list(set(y))
ok = len(y) >= 1 and len(lvs) >= 1
for cl in lvs:
ok = ok and np.sum(y == cl) >= 1
return ok
def __init__(self, modeltype="naive", g=0.8, h=1.5, gam=0.025):
"""
modeltype: type of combination can be 'naive' for a
conditional independence type combination of
evidence accross variables in subsets or 'full'
for multivariate input vectors.
g,h: Gamma prior over noise precision. In multivariate
settings g and h specify the diagonal noise level.
Defaults to 0.1, 1
gam: A g-prior like multiplicative factor which specifies the
diagonal precision of the parameter prior.
Defaults to 1.
"""
self.modeltype = modeltype
self.g = g
self.h = h
self.gam = gam
def mrgllh(self, lbls, vals, m=None):
"""
calculate the marginal log likelihood of a MANOVA type
model. under a g-prior like setting. The type of model can
be adjusted by specifying lbls. If lbls contains only one
label, there is only one group of samples. This constitites
a suitable 'NULL model' which can be compared agains
general groupings of the samples.
args
lbls: a discrete vector of groups
vals: a [nsmpl x ndim] matrix of observations.
m: mean in Gaussian prior over Manova parameters
OUT
mrgllh: a scalar marginal likelihood.
"""
grps = list(set(lbls))
ngroups = len(grps)
nsampls, nin = vals.shape
if nsampls != len(lbls):
raise Exception("Mismatching dimensions!")
if m is None:
# we have no mean and use the sample mean as m.
m = np.mean(vals, axis=0)
if len(m) != nin:
raise Exception("Mismatching dimensions!")
# calculate the xi vectors.
allxi = []
alln = []
for cg in grps:
# working on group cg
ntau = sum(lbls == cg)
alln.append(ntau)
cxi = (self.gam * m + np.sum(vals[lbls == cg, :], axis=0)) / np.sqrt(
self.gam + ntau
) # correction!!
allxi.append(cxi.tolist())
# prepare calculation of the log marginal likelihood
allxi = np.array(allxi)
alln = np.array(alln)
g_ht = self.g + 0.5 * nsampls
# initialise h_hat with h
h_ht = np.array([self.h] * nin)
for d in range(nin):
# calculate h_ht
h_ht[d] = h_ht[d] + 0.5 * (
ngroups * self.gam * m[d] ** 2
+ sum(vals[:, d] ** 2)
- sum(allxi[:, d] ** 2)
)
# we may now express the log marginal likelihood:
lgmrgllh = nin * (self.g * np.log(self.h) - spc.gammaln(self.g))
lgmrgllh = lgmrgllh + 0.5 * (
nin * ngroups * np.log(self.gam) - nin * nsampls * np.log(2 * np.pi)
)
lgmrgllh = (
lgmrgllh
+ np.sum(-0.5 * nin * (self.gam + alln))
+ np.sum(spc.gammaln(g_ht) - g_ht *
|
np.log(h_ht)
|
numpy.log
|
"""Climate data and mass-balance computations"""
# Built ins
import logging
import os
import datetime
import warnings
# External libs
import numpy as np
import netCDF4
import pandas as pd
import xarray as xr
from scipy import stats
from scipy import optimize as optimization
# Optional libs
try:
import salem
except ImportError:
pass
# Locals
from oggm import cfg
from oggm import utils
from oggm.core import centerlines
from oggm import entity_task, global_task
from oggm.exceptions import MassBalanceCalibrationError, InvalidParamsError
# Module logger
log = logging.getLogger(__name__)
@entity_task(log, writes=['climate_monthly', 'climate_info'])
def process_custom_climate_data(gdir):
"""Processes and writes the climate data from a user-defined climate file.
The input file must have a specific format (see
https://github.com/OGGM/oggm-sample-data test-files/histalp_merged_hef.nc
for an example).
This is the way OGGM used to do it for HISTALP before it got automatised.
Parameters
----------
gdir : :py:class:`oggm.GlacierDirectory`
the glacier directory to process
"""
if not (('climate_file' in cfg.PATHS) and
os.path.exists(cfg.PATHS['climate_file'])):
raise InvalidParamsError('Custom climate file not found')
if cfg.PARAMS['baseline_climate'] not in ['', 'CUSTOM']:
raise InvalidParamsError("When using custom climate data please set "
"PARAMS['baseline_climate'] to an empty "
"string or `CUSTOM`. Note also that you can "
"now use the `process_histalp_data` task for "
"automated HISTALP data processing.")
# read the file
fpath = cfg.PATHS['climate_file']
nc_ts = salem.GeoNetcdf(fpath)
# set temporal subset for the ts data (hydro years)
sm = cfg.PARAMS['hydro_month_' + gdir.hemisphere]
em = sm - 1 if (sm > 1) else 12
yrs = nc_ts.time.year
y0, y1 = yrs[0], yrs[-1]
if cfg.PARAMS['baseline_y0'] != 0:
y0 = cfg.PARAMS['baseline_y0']
if cfg.PARAMS['baseline_y1'] != 0:
y1 = cfg.PARAMS['baseline_y1']
nc_ts.set_period(t0='{}-{:02d}-01'.format(y0, sm),
t1='{}-{:02d}-01'.format(y1, em))
time = nc_ts.time
ny, r = divmod(len(time), 12)
if r != 0:
raise InvalidParamsError('Climate data should be full years')
# Units
assert nc_ts._nc.variables['hgt'].units.lower() in ['m', 'meters', 'meter',
'metres', 'metre']
assert nc_ts._nc.variables['temp'].units.lower() in ['degc', 'degrees',
'degree', 'c']
assert nc_ts._nc.variables['prcp'].units.lower() in ['kg m-2', 'l m-2',
'mm', 'millimeters',
'millimeter']
# geoloc
lon = nc_ts._nc.variables['lon'][:]
lat = nc_ts._nc.variables['lat'][:]
ilon = np.argmin(np.abs(lon - gdir.cenlon))
ilat = np.argmin(np.abs(lat - gdir.cenlat))
ref_pix_lon = lon[ilon]
ref_pix_lat = lat[ilat]
# read the data
temp = nc_ts.get_vardata('temp')
prcp = nc_ts.get_vardata('prcp')
hgt = nc_ts.get_vardata('hgt')
ttemp = temp[:, ilat-1:ilat+2, ilon-1:ilon+2]
itemp = ttemp[:, 1, 1]
thgt = hgt[ilat-1:ilat+2, ilon-1:ilon+2]
ihgt = thgt[1, 1]
thgt = thgt.flatten()
iprcp = prcp[:, ilat, ilon]
nc_ts.close()
# Should we compute the gradient?
use_grad = cfg.PARAMS['temp_use_local_gradient']
igrad = None
if use_grad:
igrad = np.zeros(len(time)) * np.NaN
for t, loct in enumerate(ttemp):
slope, _, _, p_val, _ = stats.linregress(thgt,
loct.flatten())
igrad[t] = slope if (p_val < 0.01) else np.NaN
gdir.write_monthly_climate_file(time, iprcp, itemp, ihgt,
ref_pix_lon, ref_pix_lat,
gradient=igrad)
# metadata
out = {'baseline_climate_source': fpath,
'baseline_hydro_yr_0': y0+1,
'baseline_hydro_yr_1': y1}
gdir.write_json(out, 'climate_info')
@entity_task(log, writes=['climate_monthly', 'climate_info'])
def process_cru_data(gdir):
"""Processes and writes the CRU baseline climate data for this glacier.
Interpolates the CRU TS data to the high-resolution CL2 climatologies
(provided with OGGM) and writes everything to a NetCDF file.
Parameters
----------
gdir : :py:class:`oggm.GlacierDirectory`
the glacier directory to process
"""
if cfg.PATHS.get('climate_file', None):
warnings.warn("You seem to have set a custom climate file for this "
"run, but are using the default CRU climate "
"file instead.")
if cfg.PARAMS['baseline_climate'] != 'CRU':
raise InvalidParamsError("cfg.PARAMS['baseline_climate'] should be "
"set to CRU")
# read the climatology
clfile = utils.get_cru_cl_file()
ncclim = salem.GeoNetcdf(clfile)
# and the TS data
nc_ts_tmp = salem.GeoNetcdf(utils.get_cru_file('tmp'), monthbegin=True)
nc_ts_pre = salem.GeoNetcdf(utils.get_cru_file('pre'), monthbegin=True)
# set temporal subset for the ts data (hydro years)
sm = cfg.PARAMS['hydro_month_' + gdir.hemisphere]
em = sm - 1 if (sm > 1) else 12
yrs = nc_ts_pre.time.year
y0, y1 = yrs[0], yrs[-1]
if cfg.PARAMS['baseline_y0'] != 0:
y0 = cfg.PARAMS['baseline_y0']
if cfg.PARAMS['baseline_y1'] != 0:
y1 = cfg.PARAMS['baseline_y1']
nc_ts_tmp.set_period(t0='{}-{:02d}-01'.format(y0, sm),
t1='{}-{:02d}-01'.format(y1, em))
nc_ts_pre.set_period(t0='{}-{:02d}-01'.format(y0, sm),
t1='{}-{:02d}-01'.format(y1, em))
time = nc_ts_pre.time
ny, r = divmod(len(time), 12)
assert r == 0
lon = gdir.cenlon
lat = gdir.cenlat
# This is guaranteed to work because I prepared the file (I hope)
ncclim.set_subset(corners=((lon, lat), (lon, lat)), margin=1)
# get climatology data
loc_hgt = ncclim.get_vardata('elev')
loc_tmp = ncclim.get_vardata('temp')
loc_pre = ncclim.get_vardata('prcp')
loc_lon = ncclim.get_vardata('lon')
loc_lat = ncclim.get_vardata('lat')
# see if the center is ok
if not np.isfinite(loc_hgt[1, 1]):
# take another candidate where finite
isok = np.isfinite(loc_hgt)
# wait: some areas are entirely NaNs, make the subset larger
_margin = 1
while not np.any(isok):
_margin += 1
ncclim.set_subset(corners=((lon, lat), (lon, lat)), margin=_margin)
loc_hgt = ncclim.get_vardata('elev')
isok = np.isfinite(loc_hgt)
if _margin > 1:
log.debug('(%s) I had to look up for far climate pixels: %s',
gdir.rgi_id, _margin)
# Take the first candidate (doesn't matter which)
lon, lat = ncclim.grid.ll_coordinates
lon = lon[isok][0]
lat = lat[isok][0]
# Resubset
ncclim.set_subset()
ncclim.set_subset(corners=((lon, lat), (lon, lat)), margin=1)
loc_hgt = ncclim.get_vardata('elev')
loc_tmp = ncclim.get_vardata('temp')
loc_pre = ncclim.get_vardata('prcp')
loc_lon = ncclim.get_vardata('lon')
loc_lat = ncclim.get_vardata('lat')
assert np.isfinite(loc_hgt[1, 1])
isok = np.isfinite(loc_hgt)
hgt_f = loc_hgt[isok].flatten()
assert len(hgt_f) > 0.
# Should we compute the gradient?
use_grad = cfg.PARAMS['temp_use_local_gradient']
ts_grad = None
if use_grad and len(hgt_f) >= 5:
ts_grad = np.zeros(12) * np.NaN
for i in range(12):
loc_tmp_mth = loc_tmp[i, ...][isok].flatten()
slope, _, _, p_val, _ = stats.linregress(hgt_f, loc_tmp_mth)
ts_grad[i] = slope if (p_val < 0.01) else np.NaN
# convert to a timeseries and hydrological years
ts_grad = ts_grad.tolist()
ts_grad = ts_grad[em:] + ts_grad[0:em]
ts_grad = np.asarray(ts_grad * ny)
# maybe this will throw out of bounds warnings
nc_ts_tmp.set_subset(corners=((lon, lat), (lon, lat)), margin=1)
nc_ts_pre.set_subset(corners=((lon, lat), (lon, lat)), margin=1)
# compute monthly anomalies
# of temp
ts_tmp = nc_ts_tmp.get_vardata('tmp', as_xarray=True)
ts_tmp_avg = ts_tmp.sel(time=slice('1961-01-01', '1990-12-01'))
ts_tmp_avg = ts_tmp_avg.groupby('time.month').mean(dim='time')
ts_tmp = ts_tmp.groupby('time.month') - ts_tmp_avg
# of precip
ts_pre = nc_ts_pre.get_vardata('pre', as_xarray=True)
ts_pre_avg = ts_pre.sel(time=slice('1961-01-01', '1990-12-01'))
ts_pre_avg = ts_pre_avg.groupby('time.month').mean(dim='time')
ts_pre_ano = ts_pre.groupby('time.month') - ts_pre_avg
# scaled anomalies is the default. Standard anomalies above
# are used later for where ts_pre_avg == 0
ts_pre = ts_pre.groupby('time.month') / ts_pre_avg
# interpolate to HR grid
if np.any(~np.isfinite(ts_tmp[:, 1, 1])):
# Extreme case, middle pix is not valid
# take any valid pix from the 3*3 (and hope there's one)
found_it = False
for idi in range(2):
for idj in range(2):
if np.all(np.isfinite(ts_tmp[:, idj, idi])):
ts_tmp[:, 1, 1] = ts_tmp[:, idj, idi]
ts_pre[:, 1, 1] = ts_pre[:, idj, idi]
ts_pre_ano[:, 1, 1] = ts_pre_ano[:, idj, idi]
found_it = True
if not found_it:
msg = '({}) there is no climate data'.format(gdir.rgi_id)
raise MassBalanceCalibrationError(msg)
elif np.any(~np.isfinite(ts_tmp)):
# maybe the side is nan, but we can do nearest
ts_tmp = ncclim.grid.map_gridded_data(ts_tmp.values, nc_ts_tmp.grid,
interp='nearest')
ts_pre = ncclim.grid.map_gridded_data(ts_pre.values, nc_ts_pre.grid,
interp='nearest')
ts_pre_ano = ncclim.grid.map_gridded_data(ts_pre_ano.values,
nc_ts_pre.grid,
interp='nearest')
else:
# We can do bilinear
ts_tmp = ncclim.grid.map_gridded_data(ts_tmp.values, nc_ts_tmp.grid,
interp='linear')
ts_pre = ncclim.grid.map_gridded_data(ts_pre.values, nc_ts_pre.grid,
interp='linear')
ts_pre_ano = ncclim.grid.map_gridded_data(ts_pre_ano.values,
nc_ts_pre.grid,
interp='linear')
# take the center pixel and add it to the CRU CL clim
# for temp
loc_tmp = xr.DataArray(loc_tmp[:, 1, 1], dims=['month'],
coords={'month': ts_tmp_avg.month})
ts_tmp = xr.DataArray(ts_tmp[:, 1, 1], dims=['time'],
coords={'time': time})
ts_tmp = ts_tmp.groupby('time.month') + loc_tmp
# for prcp
loc_pre = xr.DataArray(loc_pre[:, 1, 1], dims=['month'],
coords={'month': ts_pre_avg.month})
ts_pre = xr.DataArray(ts_pre[:, 1, 1], dims=['time'],
coords={'time': time})
ts_pre_ano = xr.DataArray(ts_pre_ano[:, 1, 1], dims=['time'],
coords={'time': time})
# scaled anomalies
ts_pre = ts_pre.groupby('time.month') * loc_pre
# standard anomalies
ts_pre_ano = ts_pre_ano.groupby('time.month') + loc_pre
# Correct infinite values with standard anomalies
ts_pre.values = np.where(np.isfinite(ts_pre.values),
ts_pre.values,
ts_pre_ano.values)
# The last step might create negative values (unlikely). Clip them
ts_pre.values = utils.clip_min(ts_pre.values, 0)
# done
loc_hgt = loc_hgt[1, 1]
loc_lon = loc_lon[1]
loc_lat = loc_lat[1]
assert np.isfinite(loc_hgt)
assert np.all(np.isfinite(ts_pre.values))
assert np.all(np.isfinite(ts_tmp.values))
gdir.write_monthly_climate_file(time, ts_pre.values, ts_tmp.values,
loc_hgt, loc_lon, loc_lat,
gradient=ts_grad)
source = nc_ts_tmp._nc.title[:10]
ncclim._nc.close()
nc_ts_tmp._nc.close()
nc_ts_pre._nc.close()
# metadata
out = {'baseline_climate_source': source,
'baseline_hydro_yr_0': y0+1,
'baseline_hydro_yr_1': y1}
gdir.write_json(out, 'climate_info')
@entity_task(log, writes=['climate_monthly', 'climate_info'])
def process_dummy_cru_file(gdir, sigma_temp=2, sigma_prcp=0.5, seed=None):
"""Create a simple baseline climate file for this glacier - for testing!
This simply reproduces the climatology with a little randomness in it.
TODO: extend the functionality by allowing a monthly varying sigma
Parameters
----------
gdir : GlacierDirectory
the glacier directory
sigma_temp : float
the standard deviation of the random timeseries (set to 0 for constant
ts)
sigma_prcp : float
the standard deviation of the random timeseries (set to 0 for constant
ts)
seed : int
the RandomState seed
"""
# read the climatology
clfile = utils.get_cru_cl_file()
ncclim = salem.GeoNetcdf(clfile)
# set temporal subset for the ts data (hydro years)
sm = cfg.PARAMS['hydro_month_' + gdir.hemisphere]
em = sm - 1 if (sm > 1) else 12
y0, y1 = 1901, 2018
if cfg.PARAMS['baseline_y0'] != 0:
y0 = cfg.PARAMS['baseline_y0']
if cfg.PARAMS['baseline_y1'] != 0:
y1 = cfg.PARAMS['baseline_y1']
time = pd.date_range(start='{}-{:02d}-01'.format(y0, sm),
end='{}-{:02d}-01'.format(y1, em),
freq='MS')
ny, r = divmod(len(time), 12)
assert r == 0
lon = gdir.cenlon
lat = gdir.cenlat
# This is guaranteed to work because I prepared the file (I hope)
ncclim.set_subset(corners=((lon, lat), (lon, lat)), margin=1)
# get climatology data
loc_hgt = ncclim.get_vardata('elev')
loc_tmp = ncclim.get_vardata('temp')
loc_pre = ncclim.get_vardata('prcp')
loc_lon = ncclim.get_vardata('lon')
loc_lat = ncclim.get_vardata('lat')
# see if the center is ok
if not np.isfinite(loc_hgt[1, 1]):
# take another candidate where finite
isok = np.isfinite(loc_hgt)
# wait: some areas are entirely NaNs, make the subset larger
_margin = 1
while not np.any(isok):
_margin += 1
ncclim.set_subset(corners=((lon, lat), (lon, lat)), margin=_margin)
loc_hgt = ncclim.get_vardata('elev')
isok = np.isfinite(loc_hgt)
if _margin > 1:
log.debug('(%s) I had to look up for far climate pixels: %s',
gdir.rgi_id, _margin)
# Take the first candidate (doesn't matter which)
lon, lat = ncclim.grid.ll_coordinates
lon = lon[isok][0]
lat = lat[isok][0]
# Resubset
ncclim.set_subset()
ncclim.set_subset(corners=((lon, lat), (lon, lat)), margin=1)
loc_hgt = ncclim.get_vardata('elev')
loc_tmp = ncclim.get_vardata('temp')
loc_pre = ncclim.get_vardata('prcp')
loc_lon = ncclim.get_vardata('lon')
loc_lat = ncclim.get_vardata('lat')
assert np.isfinite(loc_hgt[1, 1])
isok = np.isfinite(loc_hgt)
hgt_f = loc_hgt[isok].flatten()
assert len(hgt_f) > 0.
# Should we compute the gradient?
use_grad = cfg.PARAMS['temp_use_local_gradient']
ts_grad = None
if use_grad and len(hgt_f) >= 5:
ts_grad = np.zeros(12) * np.NaN
for i in range(12):
loc_tmp_mth = loc_tmp[i, ...][isok].flatten()
slope, _, _, p_val, _ = stats.linregress(hgt_f, loc_tmp_mth)
ts_grad[i] = slope if (p_val < 0.01) else np.NaN
# convert to a timeseries and hydrological years
ts_grad = ts_grad.tolist()
ts_grad = ts_grad[em:] + ts_grad[0:em]
ts_grad = np.asarray(ts_grad * ny)
# Make DataArrays
rng = np.random.RandomState(seed)
loc_tmp = xr.DataArray(loc_tmp[:, 1, 1], dims=['month'],
coords={'month': np.arange(1, 13)})
ts_tmp = rng.randn(len(time)) * sigma_temp
ts_tmp = xr.DataArray(ts_tmp, dims=['time'],
coords={'time': time})
loc_pre = xr.DataArray(loc_pre[:, 1, 1], dims=['month'],
coords={'month': np.arange(1, 13)})
ts_pre = utils.clip_min(rng.randn(len(time)) * sigma_prcp + 1, 0)
ts_pre = xr.DataArray(ts_pre, dims=['time'],
coords={'time': time})
# Create the time series
ts_tmp = ts_tmp.groupby('time.month') + loc_tmp
ts_pre = ts_pre.groupby('time.month') * loc_pre
# done
loc_hgt = loc_hgt[1, 1]
loc_lon = loc_lon[1]
loc_lat = loc_lat[1]
assert np.isfinite(loc_hgt)
gdir.write_monthly_climate_file(time, ts_pre.values, ts_tmp.values,
loc_hgt, loc_lon, loc_lat,
gradient=ts_grad)
source = 'CRU CL2 and some randomness'
ncclim._nc.close()
# metadata
out = {'baseline_climate_source': source,
'baseline_hydro_yr_0': y0+1,
'baseline_hydro_yr_1': y1}
gdir.write_json(out, 'climate_info')
@entity_task(log, writes=['climate_monthly', 'climate_info'])
def process_histalp_data(gdir):
"""Processes and writes the HISTALP baseline climate data for this glacier.
Extracts the nearest timeseries and writes everything to a NetCDF file.
Parameters
----------
gdir : :py:class:`oggm.GlacierDirectory`
the glacier directory to process
"""
if cfg.PATHS.get('climate_file', None):
warnings.warn("You seem to have set a custom climate file for this "
"run, but are using the default HISTALP climate file "
"instead.")
if cfg.PARAMS['baseline_climate'] != 'HISTALP':
raise InvalidParamsError("cfg.PARAMS['baseline_climate'] should be "
"set to HISTALP.")
# read the time out of the pure netcdf file
ft = utils.get_histalp_file('tmp')
fp = utils.get_histalp_file('pre')
with utils.ncDataset(ft) as nc:
vt = nc.variables['time']
assert vt[0] == 0
assert vt[-1] == vt.shape[0] - 1
t0 = vt.units.split(' since ')[1][:7]
time_t = pd.date_range(start=t0, periods=vt.shape[0], freq='MS')
with utils.ncDataset(fp) as nc:
vt = nc.variables['time']
assert vt[0] == 0.5
assert vt[-1] == vt.shape[0] - .5
t0 = vt.units.split(' since ')[1][:7]
time_p = pd.date_range(start=t0, periods=vt.shape[0], freq='MS')
# Now open with salem
nc_ts_tmp = salem.GeoNetcdf(ft, time=time_t)
nc_ts_pre = salem.GeoNetcdf(fp, time=time_p)
# set temporal subset for the ts data (hydro years)
# the reference time is given by precip, which is shorter
sm = cfg.PARAMS['hydro_month_nh']
em = sm - 1 if (sm > 1) else 12
yrs = nc_ts_pre.time.year
y0, y1 = yrs[0], yrs[-1]
if cfg.PARAMS['baseline_y0'] != 0:
y0 = cfg.PARAMS['baseline_y0']
if cfg.PARAMS['baseline_y1'] != 0:
y1 = cfg.PARAMS['baseline_y1']
nc_ts_tmp.set_period(t0='{}-{:02d}-01'.format(y0, sm),
t1='{}-{:02d}-01'.format(y1, em))
nc_ts_pre.set_period(t0='{}-{:02d}-01'.format(y0, sm),
t1='{}-{:02d}-01'.format(y1, em))
time = nc_ts_pre.time
ny, r = divmod(len(time), 12)
assert r == 0
# Units
assert nc_ts_tmp._nc.variables['HSURF'].units.lower() in ['m', 'meters',
'meter',
'metres',
'metre']
assert nc_ts_tmp._nc.variables['T_2M'].units.lower() in ['degc', 'degrees',
'degrees celcius',
'degree', 'c']
assert nc_ts_pre._nc.variables['TOT_PREC'].units.lower() in ['kg m-2',
'l m-2', 'mm',
'millimeters',
'millimeter']
# geoloc
lon = gdir.cenlon
lat = gdir.cenlat
nc_ts_tmp.set_subset(corners=((lon, lat), (lon, lat)), margin=1)
nc_ts_pre.set_subset(corners=((lon, lat), (lon, lat)), margin=1)
# read the data
temp = nc_ts_tmp.get_vardata('T_2M')
prcp = nc_ts_pre.get_vardata('TOT_PREC')
hgt = nc_ts_tmp.get_vardata('HSURF')
ref_lon = nc_ts_tmp.get_vardata('lon')
ref_lat = nc_ts_tmp.get_vardata('lat')
source = nc_ts_tmp._nc.title[:7]
nc_ts_tmp._nc.close()
nc_ts_pre._nc.close()
# Should we compute the gradient?
use_grad = cfg.PARAMS['temp_use_local_gradient']
igrad = None
if use_grad:
igrad = np.zeros(len(time)) * np.NaN
for t, loct in enumerate(temp):
slope, _, _, p_val, _ = stats.linregress(hgt.flatten(),
loct.flatten())
igrad[t] = slope if (p_val < 0.01) else np.NaN
gdir.write_monthly_climate_file(time, prcp[:, 1, 1], temp[:, 1, 1],
hgt[1, 1], ref_lon[1], ref_lat[1],
gradient=igrad)
# metadata
out = {'baseline_climate_source': source,
'baseline_hydro_yr_0': y0 + 1,
'baseline_hydro_yr_1': y1}
gdir.write_json(out, 'climate_info')
def mb_climate_on_height(gdir, heights, *, time_range=None, year_range=None):
"""Mass-balance climate of the glacier at a specific height
Reads the glacier's monthly climate data file and computes the
temperature "energies" (temp above 0) and solid precipitation at the
required height.
All MB parameters are considered here! (i.e. melt temp, precip scaling
factor, etc.)
Parameters
----------
gdir : GlacierDirectory
the glacier directory
heights: ndarray
a 1D array of the heights (in meter) where you want the data
time_range : [datetime, datetime], optional
default is to read all data but with this you
can provide a [t0, t1] bounds (inclusive).
year_range : [int, int], optional
Provide a [y0, y1] year range to get the data for specific
(hydrological) years only. Easier to use than the time bounds above.
Returns
-------
(time, tempformelt, prcpsol)::
- time: array of shape (nt,)
- tempformelt: array of shape (len(heights), nt)
- prcpsol: array of shape (len(heights), nt)
"""
if year_range is not None:
sm = cfg.PARAMS['hydro_month_' + gdir.hemisphere]
em = sm - 1 if (sm > 1) else 12
t0 = datetime.datetime(year_range[0]-1, sm, 1)
t1 = datetime.datetime(year_range[1], em, 1)
return mb_climate_on_height(gdir, heights, time_range=[t0, t1])
# Parameters
temp_all_solid = cfg.PARAMS['temp_all_solid']
temp_all_liq = cfg.PARAMS['temp_all_liq']
temp_melt = cfg.PARAMS['temp_melt']
prcp_fac = cfg.PARAMS['prcp_scaling_factor']
default_grad = cfg.PARAMS['temp_default_gradient']
g_minmax = cfg.PARAMS['temp_local_gradient_bounds']
# Read file
igrad = None
with utils.ncDataset(gdir.get_filepath('climate_monthly'), mode='r') as nc:
# time
time = nc.variables['time']
time = netCDF4.num2date(time[:], time.units)
if time_range is not None:
p0 = np.where(time == time_range[0])[0]
try:
p0 = p0[0]
except IndexError:
raise MassBalanceCalibrationError('time_range[0] not found in '
'file')
p1 = np.where(time == time_range[1])[0]
try:
p1 = p1[0]
except IndexError:
raise MassBalanceCalibrationError('time_range[1] not found in '
'file')
else:
p0 = 0
p1 = len(time)-1
time = time[p0:p1+1]
# Read timeseries
itemp = nc.variables['temp'][p0:p1+1]
iprcp = nc.variables['prcp'][p0:p1+1]
if 'gradient' in nc.variables:
igrad = nc.variables['gradient'][p0:p1+1]
# Security for stuff that can happen with local gradients
igrad = np.where(~np.isfinite(igrad), default_grad, igrad)
igrad = utils.clip_array(igrad, g_minmax[0], g_minmax[1])
ref_hgt = nc.ref_hgt
# Default gradient?
if igrad is None:
igrad = itemp * 0 + default_grad
# Correct precipitation
iprcp *= prcp_fac
# For each height pixel:
# Compute temp and tempformelt (temperature above melting threshold)
npix = len(heights)
grad_temp = np.atleast_2d(igrad).repeat(npix, 0)
grad_temp *= (heights.repeat(len(time)).reshape(grad_temp.shape) - ref_hgt)
temp2d = np.atleast_2d(itemp).repeat(npix, 0) + grad_temp
temp2dformelt = temp2d - temp_melt
temp2dformelt = utils.clip_min(temp2dformelt, 0)
# Compute solid precipitation from total precipitation
prcpsol = np.atleast_2d(iprcp).repeat(npix, 0)
fac = 1 - (temp2d - temp_all_solid) / (temp_all_liq - temp_all_solid)
fac = utils.clip_array(fac, 0, 1)
prcpsol = prcpsol * fac
return time, temp2dformelt, prcpsol
def mb_yearly_climate_on_height(gdir, heights, *,
year_range=None, flatten=False):
"""Yearly mass-balance climate of the glacier at a specific height
See also: mb_climate_on_height
Parameters
----------
gdir : GlacierDirectory
the glacier directory
heights: ndarray
a 1D array of the heights (in meter) where you want the data
year_range : [int, int], optional
Provide a [y0, y1] year range to get the data for specific
(hydrological) years only.
flatten : bool
for some applications (glacier average MB) it's ok to flatten the
data (average over height) prior to annual summing.
Returns
-------
(years, tempformelt, prcpsol)::
- years: array of shape (ny,)
- tempformelt: array of shape (len(heights), ny) (or ny if flatten
is set)
- prcpsol: array of shape (len(heights), ny) (or ny if flatten
is set)
"""
time, temp, prcp = mb_climate_on_height(gdir, heights,
year_range=year_range)
ny, r = divmod(len(time), 12)
if r != 0:
raise InvalidParamsError('Climate data should be N full years '
'exclusively')
# Last year gives the tone of the hydro year
years = np.arange(time[-1].year-ny+1, time[-1].year+1, 1)
if flatten:
# Spatial average
temp_yr = np.zeros(len(years))
prcp_yr = np.zeros(len(years))
temp = np.mean(temp, axis=0)
prcp = np.mean(prcp, axis=0)
for i, y in enumerate(years):
temp_yr[i] = np.sum(temp[i*12:(i+1)*12])
prcp_yr[i] = np.sum(prcp[i*12:(i+1)*12])
else:
# Annual prcp and temp for each point (no spatial average)
temp_yr = np.zeros((len(heights), len(years)))
prcp_yr = np.zeros((len(heights), len(years)))
for i, y in enumerate(years):
temp_yr[:, i] = np.sum(temp[:, i*12:(i+1)*12], axis=1)
prcp_yr[:, i] = np.sum(prcp[:, i*12:(i+1)*12], axis=1)
return years, temp_yr, prcp_yr
def mb_yearly_climate_on_glacier(gdir, *, year_range=None):
"""Yearly mass-balance climate at all glacier heights,
multiplied with the flowlines widths. (all in pix coords.)
See also: mb_climate_on_height
Parameters
----------
gdir : GlacierDirectory
the glacier directory
year_range : [int, int], optional
Provide a [y0, y1] year range to get the data for specific
(hydrological) years only.
Returns
-------
(years, tempformelt, prcpsol)::
- years: array of shape (ny)
- tempformelt: array of shape (ny)
- prcpsol: array of shape (ny)
"""
flowlines = gdir.read_pickle('inversion_flowlines')
heights = np.array([])
widths = np.array([])
for fl in flowlines:
heights = np.append(heights, fl.surface_h)
widths = np.append(widths, fl.widths)
years, temp, prcp = mb_yearly_climate_on_height(gdir, heights,
year_range=year_range,
flatten=False)
temp = np.average(temp, axis=0, weights=widths)
prcp = np.average(prcp, axis=0, weights=widths)
return years, temp, prcp
@entity_task(log, writes=['climate_info'])
def glacier_mu_candidates(gdir):
"""Computes the mu candidates, glacier wide.
For each 31 year-period centered on the year of interest, mu is is the
temperature sensitivity necessary for the glacier with its current shape
to be in equilibrium with its climate.
This task is just for documentation and testing! It is not used in
production anymore.
Parameters
----------
gdir : :py:class:`oggm.GlacierDirectory`
the glacier directory to process
"""
warnings.warn('The task `glacier_mu_candidates` is deprecated. It should '
'only be used for testing.', DeprecationWarning)
mu_hp = int(cfg.PARAMS['mu_star_halfperiod'])
# Only get the years were we consider looking for tstar
y0, y1 = cfg.PARAMS['tstar_search_window']
ci = gdir.read_json('climate_info')
y0 = y0 or ci['baseline_hydro_yr_0']
y1 = y1 or ci['baseline_hydro_yr_1']
years, temp_yr, prcp_yr = mb_yearly_climate_on_glacier(gdir,
year_range=[y0, y1])
# Compute mu for each 31-yr climatological period
ny = len(years)
mu_yr_clim = np.zeros(ny) * np.NaN
for i, y in enumerate(years):
# Ignore begin and end
if ((i-mu_hp) < 0) or ((i+mu_hp) >= ny):
continue
t_avg = np.mean(temp_yr[i-mu_hp:i+mu_hp+1])
if t_avg > 1e-3: # if too cold no melt possible
prcp_ts = prcp_yr[i-mu_hp:i+mu_hp+1]
mu_yr_clim[i] = np.mean(prcp_ts) / t_avg
# Check that we found a least one mustar
if np.sum(np.isfinite(mu_yr_clim)) < 1:
raise MassBalanceCalibrationError('({}) no mustar candidates found.'
.format(gdir.rgi_id))
# Write
return pd.Series(data=mu_yr_clim, index=years)
@entity_task(log, writes=['climate_info'])
def t_star_from_refmb(gdir, mbdf=None, glacierwide=None):
"""Computes the ref t* for the glacier, given a series of MB measurements.
Parameters
----------
gdir : oggm.GlacierDirectory
mbdf: a pd.Series containing the observed MB data indexed by year
if None, read automatically from the reference data
Returns
-------
A dict: {t_star:[], bias:[]}
"""
from oggm.core.massbalance import MultipleFlowlineMassBalance
if glacierwide is None:
glacierwide = cfg.PARAMS['tstar_search_glacierwide']
# Be sure we have no marine terminating glacier
assert not gdir.is_tidewater
# Reference time series
if mbdf is None:
mbdf = gdir.get_ref_mb_data()['ANNUAL_BALANCE']
# which years to look at
ref_years = mbdf.index.values
# Average oberved mass-balance
ref_mb = np.mean(mbdf)
# Compute one mu candidate per year and the associated statistics
# Only get the years were we consider looking for tstar
y0, y1 = cfg.PARAMS['tstar_search_window']
ci = gdir.read_json('climate_info')
y0 = y0 or ci['baseline_hydro_yr_0']
y1 = y1 or ci['baseline_hydro_yr_1']
years = np.arange(y0, y1+1)
ny = len(years)
mu_hp = int(cfg.PARAMS['mu_star_halfperiod'])
mb_per_mu = pd.Series(index=years)
if glacierwide:
# The old (but fast) method to find t*
_, temp, prcp = mb_yearly_climate_on_glacier(gdir, year_range=[y0, y1])
# which years to look at
selind = np.searchsorted(years, mbdf.index)
sel_temp = temp[selind]
sel_prcp = prcp[selind]
sel_temp = np.mean(sel_temp)
sel_prcp = np.mean(sel_prcp)
for i, y in enumerate(years):
# Ignore begin and end
if ((i - mu_hp) < 0) or ((i + mu_hp) >= ny):
continue
# Compute the mu candidate
t_avg = np.mean(temp[i - mu_hp:i + mu_hp + 1])
if t_avg < 1e-3: # if too cold no melt possible
continue
mu = np.mean(prcp[i - mu_hp:i + mu_hp + 1]) / t_avg
# Apply it
mb_per_mu[y] =
|
np.mean(sel_prcp - mu * sel_temp)
|
numpy.mean
|
import sys
# import os
from pprint import pprint
import numpy as np
import numpy.random as npr
import climin
# os.chdir(os.path.dirname(os.path.abspath(__file__)))
sys.path.append('..')
from likelihoods.gaussian import Gaussian
from likelihoods.categorical import Categorical
from hetmogp.het_likelihood import HetLikelihood
from hetmogp.svmogp import SVMOGP
# from hetmogp.util import vem_algorithm as VEM
from hetmogp.util import latent_functions_prior
import matplotlib.pyplot as plt
M = 20
Q = 3
likelihoods_list = [Gaussian(sigma=1), Categorical(4)]
likelihood = HetLikelihood(likelihoods_list)
Y_metadata = likelihood.generate_metadata()
T = len(Y_metadata['task_index'])
D = likelihood.num_output_functions(Y_metadata)
npr.seed(1)
pprint((Y_metadata, T, D))
X1 = np.sort(npr.rand(600))[:, None]
X2 = np.sort(
|
npr.rand(500)
|
numpy.random.rand
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# flatbuffers needs importlib.util but fails to import it itself.
import importlib.util # noqa: F401
from typing import List
import jaxlib.mlir.ir as ir
import jaxlib.mlir.dialects.mhlo as mhlo
from . import _pocketfft
from . import pocketfft_flatbuffers_py_generated as pd
import numpy as np
import flatbuffers
from jaxlib import xla_client
for _name, _value in _pocketfft.registrations().items():
xla_client.register_custom_call_target(_name, _value, platform="cpu")
FftType = xla_client.FftType
flatbuffers_version_2 = hasattr(flatbuffers, "__version__")
def _pocketfft_descriptor(shape: List[int], dtype, fft_type: FftType,
fft_lengths: List[int]) -> bytes:
n = len(shape)
assert len(fft_lengths) >= 1
assert len(fft_lengths) <= n, (fft_lengths, n)
builder = flatbuffers.Builder(128)
forward = fft_type in (FftType.FFT, FftType.RFFT)
if fft_type == FftType.RFFT:
pocketfft_type = pd.PocketFftType.R2C
assert dtype in (np.float32, np.float64), dtype
out_dtype = np.dtype(np.complex64 if dtype == np.float32 else np.complex128)
pocketfft_dtype = (
pd.PocketFftDtype.COMPLEX64
if dtype == np.float32 else pd.PocketFftDtype.COMPLEX128)
assert shape[-len(fft_lengths):] == fft_lengths, (shape, fft_lengths)
out_shape = list(shape)
out_shape[-1] = out_shape[-1] // 2 + 1
elif fft_type == FftType.IRFFT:
pocketfft_type = pd.PocketFftType.C2R
assert np.issubdtype(dtype, np.complexfloating), dtype
out_dtype = np.dtype(np.float32 if dtype == np.complex64 else np.float64)
pocketfft_dtype = (
pd.PocketFftDtype.COMPLEX64
if dtype == np.complex64 else pd.PocketFftDtype.COMPLEX128)
assert shape[-len(fft_lengths):-1] == fft_lengths[:-1]
out_shape = list(shape)
out_shape[-1] = fft_lengths[-1]
assert (out_shape[-1] // 2 + 1) == shape[-1]
else:
pocketfft_type = pd.PocketFftType.C2C
assert np.issubdtype(dtype, np.complexfloating), dtype
out_dtype = dtype
pocketfft_dtype = (
pd.PocketFftDtype.COMPLEX64
if dtype == np.complex64 else pd.PocketFftDtype.COMPLEX128)
assert shape[-len(fft_lengths):] == fft_lengths, (shape, fft_lengths)
out_shape = shape
# PocketFft does not allow size 0 dimensions.
if 0 in shape or 0 in out_shape:
return b"", out_dtype, out_shape
# Builds a PocketFftDescriptor flatbuffer. This descriptor is passed to the
# C++ kernel to describe the FFT to perform.
pd.PocketFftDescriptorStartShapeVector(builder, n)
for d in reversed(shape if fft_type != FftType.IRFFT else out_shape):
builder.PrependUint64(d)
if flatbuffers_version_2:
pocketfft_shape = builder.EndVector()
else:
pocketfft_shape = builder.EndVector(n)
pd.PocketFftDescriptorStartStridesInVector(builder, n)
stride = dtype.itemsize
for d in reversed(shape):
builder.PrependUint64(stride)
stride *= d
if flatbuffers_version_2:
strides_in = builder.EndVector()
else:
strides_in = builder.EndVector(n)
pd.PocketFftDescriptorStartStridesOutVector(builder, n)
stride = out_dtype.itemsize
for d in reversed(out_shape):
builder.PrependUint64(stride)
stride *= d
if flatbuffers_version_2:
strides_out = builder.EndVector()
else:
strides_out = builder.EndVector(n)
pd.PocketFftDescriptorStartAxesVector(builder, len(fft_lengths))
for d in range(len(fft_lengths)):
builder.PrependUint32(n - d - 1)
if flatbuffers_version_2:
axes = builder.EndVector()
else:
axes = builder.EndVector(len(fft_lengths))
scale = 1. if forward else (1. / np.prod(fft_lengths))
pd.PocketFftDescriptorStart(builder)
pd.PocketFftDescriptorAddDtype(builder, pocketfft_dtype)
pd.PocketFftDescriptorAddFftType(builder, pocketfft_type)
pd.PocketFftDescriptorAddShape(builder, pocketfft_shape)
pd.PocketFftDescriptorAddStridesIn(builder, strides_in)
pd.PocketFftDescriptorAddStridesOut(builder, strides_out)
pd.PocketFftDescriptorAddAxes(builder, axes)
pd.PocketFftDescriptorAddForward(builder, forward)
pd.PocketFftDescriptorAddScale(builder, scale)
descriptor = pd.PocketFftDescriptorEnd(builder)
builder.Finish(descriptor)
return builder.Output(), out_dtype, out_shape
def pocketfft_mhlo(a, dtype, *, fft_type: FftType, fft_lengths: List[int]):
"""PocketFFT kernel for CPU."""
a_type = ir.RankedTensorType(a.type)
n = len(a_type.shape)
fft_lengths = list(fft_lengths)
descriptor_bytes, out_dtype, out_shape = _pocketfft_descriptor(
list(a_type.shape), dtype, fft_type, fft_lengths)
if out_dtype == np.float32:
out_type = ir.F32Type.get()
elif out_dtype == np.float64:
out_type = ir.F64Type.get()
elif out_dtype == np.complex64:
out_type = ir.ComplexType.get(ir.F32Type.get())
elif out_dtype == np.complex128:
out_type = ir.ComplexType.get(ir.F64Type.get())
else:
raise ValueError(f"Unknown output type {out_dtype}")
if 0 in a_type.shape or 0 in out_shape:
zero = mhlo.ConstOp(ir.RankedTensorType.get([], out_type),
ir.DenseElementsAttr.get(np.array(0, dtype=out_dtype),
type=out_type))
return mhlo.BroadcastOp(
ir.RankedTensorType.get(out_shape, out_type),
zero,
ir.DenseElementsAttr.get(np.asarray(out_shape, np.int64))).result
u8_type = ir.IntegerType.get_unsigned(8)
descriptor = mhlo.ConstOp(
ir.RankedTensorType.get([len(descriptor_bytes)], u8_type),
ir.DenseElementsAttr.get(
|
np.frombuffer(descriptor_bytes, dtype=np.uint8)
|
numpy.frombuffer
|
import numpy as np
import utils.matrix_utils as matrix_utils
from bayesian_framework.bayesian_filter_type import BayesianFilterType, sqrt_sigma_point_filter, linear_kalman_family, sigma_point_family
from bayesian_framework.inference.stochastic_models.covariance_type import CovarianceType, is_sqrt_like
def to_full_covariance(cov: np.ndarray, source_cov_type: CovarianceType) -> np.ndarray:
if source_cov_type == CovarianceType.diag:
return matrix_utils.ensure_diagonal_matrix(cov)
elif source_cov_type == CovarianceType.sqrt_diag:
cov_diag = matrix_utils.ensure_diagonal_matrix(cov)
return cov_diag @ cov_diag.T.conj()
elif source_cov_type == CovarianceType.full:
return cov
elif source_cov_type == CovarianceType.sqrt:
return cov @ cov.T.conj()
else:
raise Exception("Not supported cov_type: {0}".format(source_cov_type.name))
def to_mixture_full_covariance(cov: np.ndarray, source_cov_type: CovarianceType) -> np.ndarray:
is_mixture = cov.ndim == 3
cov_list = cov if is_mixture else [cov]
return np.asarray(
list(map(lambda x: to_full_covariance(x, source_cov_type), cov_list))
)
def to_sqrt_covariance(cov: np.ndarray, source_cov_type: CovarianceType) -> np.ndarray:
if source_cov_type == CovarianceType.diag:
return np.linalg.cholesky(
matrix_utils.ensure_diagonal_matrix(cov)
)
elif source_cov_type == CovarianceType.sqrt_diag:
return matrix_utils.ensure_diagonal_matrix(cov)
elif source_cov_type == CovarianceType.full:
return np.linalg.cholesky(cov)
elif source_cov_type == CovarianceType.sqrt:
return cov
else:
raise Exception("Not supported cov_type: {value}".format(value=source_cov_type.name))
def to_mixture_sqrt_covariance(cov: np.ndarray, source_cov_type: CovarianceType) -> np.ndarray:
is_mixture = cov.ndim == 3
cov_list = cov if is_mixture else [cov]
if is_mixture:
return np.atleast_3d(
list(map(lambda x: to_sqrt_covariance(x, source_cov_type), cov_list))
)
return np.atleast_2d(
list(map(lambda x: to_sqrt_covariance(x, source_cov_type), cov_list))
)
def to_covariance_with_type(cov: np.ndarray, source_cov_type: CovarianceType, target_cov_type: CovarianceType) -> np.ndarray:
target_is_sqrt_like = is_sqrt_like(target_cov_type)
res = to_mixture_sqrt_covariance(cov, source_cov_type) if target_is_sqrt_like else to_mixture_full_covariance(cov, source_cov_type)
return np.atleast_2d(
|
np.squeeze(res)
|
numpy.squeeze
|
'''
Route script used for running Control-based continuation.
'''
# import required packages
import numpy as np
import scipy.linalg as la
import math
import dtLib.nonlinearcbc.nonlinearcbc2 as cbc2
from flask import render_template, request, redirect, Response, url_for, flash, send_file
from dtApp import app
from dtApp import date
import plotly
import plotly.graph_objs as go
import json
from plotly.subplots import make_subplots
from scipy.integrate import odeint
from pathlib import Path
@app.route('/nonlinearcbc', methods=['GET', 'POST']) #@app.route('/bristolcbc', methods=['GET', 'POST'])
def bristolcbc():
# define input data
pars_file = Path('dtApp/dtData/bris_pars.npy')
if pars_file.is_file():
# file exists
pars = np.load('dtApp/dtData/bris_pars.npy')
m1, m2, m3 = pars[0], pars[1], pars[2]
b1, b2, b3 = pars[3], pars[4], pars[5]
s1, s2, s3 = pars[6], pars[7], pars[8]
s13 = pars[9]
else:
m1, m2, m3 = 0.2, 0.2, 0.2 #masses
s1, s2, s3, s13 = 200.0, 200.0, 200.0, 200.0 #stiffnesses
b1, b2, b3 = 0.1, 0.1, 0.1 #viscous damping
sweep_file = Path('dtApp/dtData/sweep_pars.npy')
if sweep_file.is_file():
# file exists
sweep_pars = np.load('dtApp/dtData/sweep_pars.npy')
else:
sweep_pars = np.array([1.0, 50.0, 1.0, 80.0, 0.945])
#Mass, damping and stiffness matrices
MM = np.array([[m1, 0.0, 0.0], [0.0, m2, 0.0], [0.0, 0.0, m3]])
BB = np.array([[b1+b2, -b2, 0.0], [-b2, b2+b3, -b3], [0.0, -b3, b3]])
SS = np.array([[s1+s2, -s2, 0.0], [-s2, s2+s3, -s3], [0.0, -s3, s3]])
Minv = la.inv(MM)
IC = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
Minfreq = sweep_pars[0]
Maxfreq = sweep_pars[1]
Freq_list_def =
|
np.linspace(Minfreq, Maxfreq, 200)
|
numpy.linspace
|
#Data structures to hold domain information
from functools import reduce
import copy
import numba as nb
import numpy
import os
import pdb
import vtk
import evtk.hl as evtk
from evtk.vtk import VtkGroup
import accelerated_functions as af
import Boundaries.outer_2D_rectangular as ob
import Boundaries.inner_2D_rectangular as ib
import constants as c
import cylindrical_mesh_tools as cmt
from solver import location_indexes_inv
#Mesh (Abstract)(Association between Mesh and PIC):
#
#Definition = Defines the type of mesh.
#Attributes:
# +nPoints (int) = Number of points in the mesh.
# +boundaries ([Boundary]) = List of different boundaries that define the mesh.
# +volumes ([double]) = Volume of each node.
# +location ([int]) = indexes indicating all the nodes that are boundaries in the mesh. There might be different arrangement according to
# every type of mesh. (Repeated indices deprecated in 2020_11_20).
# +location_sat ([int]) = indexes indicating the nodes that are boundaries with the satellite.
# +direction_sat ([int]) = array of indexes of the size of 'location_sat', indicating for each node the direction towards the satellite.
# +area_sat ([double]) = array indicating the area that represents each satellite node.
#Methods:
# +setDomain() = This function, with the values provided by the boundary files, will create the mesh, by setting up volumes, nPoints, boundaries and any other subclass variable.
# +checkPositionInMesh([double, double] pos): [Boolean] = For each position it retuns a bool indicating wether the position lies inside the mesh.
# +getPosition([int] i): [double, double y] = For a each index return its real position.
# +getIndex([double,double] pos): [double,double] = For each real position returns its index value. Important to remember that the number of columns may vary
# depending on the actual type of mesh subclass used.
# +arrayToIndex([ind] array): [int, int] = For the indexes in the 1D array, obtain the indexes used for the particular mesh.
# +indexToArray([ind, ind] index): [int] = For the indexes used for the particular mesh, obtain the 1D version for the array.
# +createDistributionInCell(int ind, int num): [double, ..., double] position = Produces 'num' random positions uniformly distributed in the cell 'ind'.
# +checkCellAssignment([int] ind): [Boolean] = For each index in 'ind' it checkes whether that node correspond to a cell in the mesh.
# +reverseVTKOrdering(array): array = The array received as argument is ordered in such a way it can be stored ina VTK file.
# The result is returned as a new array.
# +vtkOrdering(array): array = The array received as argument comes with vtk ordering and is reshaped to be stored properly in the code.
# +vtkReader(): Reader = Return the reader from module vtk that is able to read the vtk file.
# +saveVTK(string filename, dict dictionary) = It calls the appropiate method in 'vtk' module to print the information of the system in a '.vtk' file.
# +loadSpeciesVTK(self, species) = It creates particles around every node that can match the preoladed density and velocity of that node. This will depend on each type of mesh.
# +print() = Print a VTK file / Matplotlib visualization of the mesh (points and connections between nodes). Also print volumes.
class Mesh (object):
def __init__(self):
setDomain()
def setDomain(self):
pass
def checkPositionInMesh(self, pos):
pass
def getPosition(self, ind):
pass
def getIndex(self, pos):
pass
def arrayToIndex(self, array):
pass
def indexToArray(self, ind):
pass
def createDistributionInCell( ind, num):
pass
def vtkOrdering(self, array):
pass
def reverseVTKOrdering(self, array):
pass
def vtkReader(self):
pass
def saveVTK(self, filename, dictionary):
pass
def loadSpeciesVTK(self, species):
pass
def print(self):
pass
#Mesh_recursive (Abstract):
#
#Definition = Abstract class that works as an interface for classes meant to be recursive.
#Attributes:
# +root (Boolean) = Identifies whether the object is the root of the tree or not.
# +id (String) = String that uniquely identifies the mesh among the meshes of the domain. The identity is assigned as:
# ID is organized as a chain of numbers separated by dashes, everything stored as a string. The first number is "0", indicating root of the tree.
# The second level is a 0-based index describing the position of the child in the parent mesh.
# For the third and successive levels, the same approach is applied, until the mesh being identified is finally reached.
# +start_ind (int) = node index in the parent mesh where this mesh starts.
# +children ([Mesh]) = List of all the children of the mesh.
#Methods:
# +flatIndexing((String) ID, (int) index): int flat_ind = The function receives a unique description of a node in the domain by the index of the node in its mesh (index), and the ID of such mesh (ID).
# Then, the function returns the index that represents the node under the 'Flat indexation' rule, counting from the mesh that executes the function.
# +executeFunctionByIndex((String) ID, (int) index, (method) func, *args, **kwargs): This function searches for the mesh that contains the node denoted by 'index' (Counting from the mesh that executes
# 'executeFunctionByIndex' for the first time) and then executes 'self.func(*args, **kwargs)'.
# +orderedIndexing((int) index): (String) ID, (int) index = This functions operates inversely to 'flatIndexing(ID, index)'. It receives a node identification, counting from the mesh that
# executes the function, and returns its unique identification by giving the index of the node in its correspondent mesh as well as the 'ID' of such mesh.
# +sortArrayByMeshes((ndarray) array, seedList = []) [ndarray] = This function receives the values for the nodes of the mesh and its NGs, ordered by 'Flat indexation rule',
# and returns a list of arrays with the same values of the original array but now sorted by 'Ordered rule', each array containing the values of one mesh.
# +executeFunctionByMeshes(Function func, List seedList = [], *args, **kwargs): List = This method receives a function (func) to be executed in all the nodes of the mesh tree.
# The argument function can receive normal arguments as well as keyword arguments. The method receives all the arguments intended for func in the same positional order, but for each type of argument,
# it receives all the arguments for the execution of func throughout the nodes of the tree, stored in a list ordered by the 'Ordered rule'. For keyword arguments, if func receives key = value,
# the method must receive key = [value0, value1, ...]. The method stores the results of the executions of func in the optional key argument 'seedList', and the results are also ordered by the
# 'Ordered rule'. These results can be either obtained by passing a list object to seedList or by capturing the return of the method, which is only possible if the node that called the function
# is the root of the tree.
# +sortPositionsByMeshes([double, double] pos, List seedList): List = This function receives an array of positions and organize the positions in ndarrays stored in a list ordered by the 'Ordered rule'.
class Mesh_recursive(object):
def __init__(self, n_children, n_root, n_id, n_start_ind, n_type):
n_type += " - Recursive"
self.root = n_root
self.id = n_id
self.start_ind = n_start_ind
self.children = n_children
self.accPoints = self.nPoints+reduce(lambda x, y: x+y.accPoints, self.children, 0)
#Unique features for the root
if self.root == True:
#Location sat
def func(mesh, acc = None):
if acc is None:
acc = [0]
local = mesh.location_sat + acc[0]
acc[0] += mesh.nPoints
for child in mesh.children:
local = numpy.append(local, func(child, acc = acc))
return local
self.overall_location_sat = func(self)
#Volumes
def func(mesh, attribute):
acc_list = numpy.copy(mesh.__getattribute__(attribute))
for child in mesh.children:
acc_list = numpy.append(acc_list, func(child, attribute), axis = 0)
return acc_list
self.volumes = func(self, "volumes")
self.overall_area_sat = func(self, "area_sat")
#Setting up potential and location_index_inv for use in the main program
if len(self.overall_location_sat) > 0:
test = location_indexes_inv([self.location_sat[0]], store = True, location = self.overall_location_sat)[0]
assert test == 0, "location_indexes_inv is not correctly set up"
# This function works well with numpy arrays as long as the indeces are all from the same mesh (which makes sense to have it only working like that)
# flatIndexing( String ID, [int] index): [int] = The function receives indices according to the index system of its parent mesh, identified by ID, and
# returns their flat indexation from the mesh that executed this function for the first time.
def flatIndexing(self, ID, index):
assert ID.startswith(self.id), "The function is being used in a branch of the tree where the node is not in"
if self.id == ID:
return index
else:
child_pos = int((ID.partition(self.id+"-")[2]).partition("-")[0])
acc = 0
for i in range(child_pos):
acc += self.children[i].accPoints
return self.nPoints + acc + self.children[child_pos].flatIndexing(ID, index)
def executeFunctionByIndex(self, index, func, *args, ind = None, **kwargs):
assert numpy.all(self.accPoints >= index), "The index passed as argument is invalid"
if numpy.all(index < self.nPoints):
return func(index, *args, **kwargs)
else:
c = self.nPoints+self.children[0].accPoints
i = 0
while i < len(self.children) and c <= index:
c += self.children[i].accPoints
i += 1
return self.children[i].executeFunctionByIndex(index-c+self.children[i].accPoints, func, *args, ind = ind, **kwargs)
def orderedIndexing(self, index):
def func(self, index):
return self.id, index
return self.executeFunctionByIndex(index, func)
def sortArrayByMeshes(self, array, seedList = None):
if type(array) == numpy.ndarray:
array = list(array)
seedList = []
seedList.append(numpy.asarray(array[:self.nPoints]))
del array[:self.nPoints]
for child in self.children:
child.sortArrayByMeshes(array, seedList = seedList)
return seedList
def executeFunctionByMeshes(self, func, seedList = None, *args, **kwargs):
if self.root:
seedList = []
seedList.append(func(*map(lambda x: x.pop(0), args), *map(lambda x: {x : kwargs[x].pop(0)}, kwargs)))
for child in self.children:
child.executeFunctionByMeshes(func, seedList = seedList, *args, **kwargs)
if self.root:
return seedList
def sortPositionsByMeshes(self, pos, seedList = None, return_ind = None, indexes = None, surface = False):
mask = numpy.zeros(numpy.shape(pos)[0])
for i in range(len(self.children)):
mask += (i+1)*self.children[i].checkPositionInMesh(pos, surface = surface).astype(numpy.int_)
if seedList is None:
seedList = []
seedList.append(pos[numpy.logical_not(mask.astype(numpy.bool_)),:])
if return_ind is not None:
if indexes is None:
indexes = numpy.arange(numpy.shape(pos)[0], dtype = 'uint32')
return_ind.append(indexes[numpy.logical_not(mask.astype(numpy.bool_))])
for i in range(len(self.children)):
if return_ind is None:
self.children[i].sortPositionsByMeshes(pos[mask == i+1], seedList = seedList, return_ind = None, indexes = None, surface = surface)
else:
self.children[i].sortPositionsByMeshes(pos[mask == i+1], seedList = seedList, return_ind = return_ind, indexes = indexes[mask == i+1], surface = surface)
if return_ind == None:
return seedList
else:
return seedList, return_ind
# +sortIndexByMeshes(ind, List seedList, int acc_index): [[int]] = This function organizes an array of indexes in flat-ordering into a list of arrays, where the arrays are organized with the tree order.
# Each array contains the nodes that correspond to each mesh. The rest of arguments are used only for recursive purposes.
# The 'shift' keyword indicates whether the indexes are shifted from flat indexation rule to the index values of each mesh or not.
def sortIndexByMeshes(self, ind, shift = True, seedList = None, accIndex = None):
if seedList is None and accIndex is None:
seedList = []
accIndex = [0]
c1 = numpy.greater_equal(ind, accIndex[0])
temp = accIndex[0]
accIndex[0] += self.nPoints
c2 = numpy.less(ind, accIndex[0])
c3 = numpy.logical_and(c1, c2)
if shift:
seedList.append(ind[c3]-temp)
else:
seedList.append(ind[c3])
ind = ind[numpy.logical_not(c3)]
for child in self.children:
child.sortIndexByMeshes(ind, shift = shift, seedList = seedList, accIndex = accIndex)
return seedList
# +groupIndexByPosition([int] ind, positions = False) = This function receives a list of indexes and groups them in a list of lists, where each list contains the nodes that
# refer to the same physical position (less than 'err' apart). If 'position = True', a numpy array is returned with the positions linked to the lists.
def groupIndexByPosition(self, ind, positions = False, seedList = None, posList = None, accIndex = None, err = 1e-4):
if self.root:
ind = self.sortIndexByMeshes(ind, shift = False)
ind_i = ind.pop(0)
posList = [super(Mesh_recursive, self).getPosition(ind_i)]
seedList = [[x] for x in ind_i]
accIndex = [self.nPoints]
else:
ind_i = ind.pop(0)-accIndex[0]
n_pos = super(Mesh_recursive, self).getPosition(ind_i)
limit = len(seedList)
for ind_ii, n_pos_i in zip(ind_i, n_pos):
ind_n = numpy.flatnonzero(numpy.linalg.norm(posList[0][:limit]-n_pos_i, axis = 1) < err)
if len(ind_n) > 0:
seedList[ind_n[0]].append(ind_ii+accIndex[0])
else:
seedList.append([ind_ii+accIndex[0]])
posList[0] = numpy.append(posList[0], n_pos_i.reshape((1,2)), axis = 0)
accIndex[0] += self.nPoints
for child in self.children:
child.groupIndexByPosition(ind, positions = positions, seedList = seedList, posList = posList, accIndex = accIndex)
if positions:
return seedList, posList[0]
else:
return seedList
# +vtkOrdering(array): array = The array received as argument is ordered in such a way it can be stored in a VTK file.
# The array received, if it is root, is the array including information of all the meshes. The function thus returns
# a list of numpy arrays, each one prepared for storing in a VTK file.
def vtkOrdering(self, array, accIndex = [0]):
if self.root:
array = self.sortArrayByMeshes(array)
accIndex = [0]
array[accIndex[0]] = super(Mesh_recursive,self).vtkOrdering(array[accIndex[0]])
accIndex[0] += 1
for child in self.children:
child.vtkOrdering(array, accIndex = accIndex)
return array
# +reverseVTKOrdering(array): array = The array received as argument is converted from VTK to numpy style.
def reverseVTKOrdering(self, array):
dims = numpy.shape(array)
if len(dims) == 1:
return array.reshape((self.nPoints), order = 'F')
else:
return array.reshape((self.nPoints, 3), order = 'F')[:,:2]
# def saveVTK(self, filename, dictionary, files = None, accIndex = None):
# if self.root:
# #Creating folder
# cwd, name = os.path.split(filename)
# path = os.path.join(cwd, 'mesh_components',name)
# try:
# os.makedirs(path)
# except FileExistsError:
# pass
# files = []
# accIndex = [0]
# #dictionary = {key: self.sortArrayByMeshes(value) for key, value in dictionary.items()}
# nmeshes = len(list(dictionary.values())[0])
# dicts = [dict() for i in range(nmeshes)]
# for key, value in dictionary.items():
# for i in range(nmeshes):
# dicts[i][key] = value[i]
# dictionary = dicts
# #Creation of individual files
# cwd, name = os.path.split(filename)
# filename_comp = os.path.join(cwd,'mesh_components',name,'{}_{}'.format(name, self.id))
# super(Mesh_recursive, self).saveVTK(filename_comp, dictionary[accIndex[0]])
# #TODO: Fix later: I need to change the extension of the file manually everytime I use a different file type
# files.append(filename_comp+'.vtr')
# accIndex[0] += 1
# #Recursion
# for child in self.children:
# child.saveVTK(filename, dictionary, files = files, accIndex = accIndex)
# if self.root:
# g = VtkGroup(filename)
# for comp in files:
# g.addFile(comp, sim_time = 0.0)
# g.save()
def saveVTK(self, filename, dictionary, files = None, accIndex = None):
if self.root:
##Creating folder
#cwd, name = os.path.split(filename)
#path = os.path.join(cwd, 'mesh_components',name)
#try:
# os.makedirs(path)
#except FileExistsError:
# pass
files = []
accIndex = [0]
#dictionary = {key: self.sortArrayByMeshes(value) for key, value in dictionary.items()}
nmeshes = len(list(dictionary.values())[0])
dicts = [dict() for i in range(nmeshes)]
for key, value in dictionary.items():
for i in range(nmeshes):
dicts[i][key] = value[i]
dictionary = dicts
#Creation of individual files
cwd, name = os.path.split(filename)
filename_comp = os.path.join(cwd, '{}_{}'.format(self.id, name))
super(Mesh_recursive, self).saveVTK(filename_comp, dictionary[accIndex[0]])
#TODO: Fix later: I need to change the extension of the file manually everytime I use a different file type
files.append(filename_comp+'.vtr')
accIndex[0] += 1
#Recursion
for child in self.children:
child.saveVTK(filename, dictionary, files = files, accIndex = accIndex)
#if self.root:
# g = VtkGroup(filename)
# for comp in files:
# g.addFile(comp, sim_time = 0.0)
# g.save()
#NOTE: Backup
# def orderedIndexing(self, index):
# assert self.accPoints <= index, "The index passed as argument is invalid"
# if index < self.nPoints:
# return self.id, index
# else:
# c = self.nPoints+self.children[0].accPoints
# i = 0
# while i < len(self.children) and c <= index:
# c += self.children[i].accPoints
# i += 1
# return self.children[i].orderedIndexing(index-c+self.children[i].accPoints)
#Mesh_2D_rm (Inherits from Mesh):
#
#Definition = Mesh class for a 2D rectangular mesh. The organization of the points will work as 0<=i<nx and 0<=j<ny. Also, for k parameter 0<=k<nPoints, k = nx*j+i.
#Attributes:
# +xmin (double) = Left limit of the domain (closest to the Sun).
# +xmax (double) = Right limit of the domain (farthest from the Sun).
# +ymin (double) = Bottom limit of the domain.
# +ymax (double) = Top limit of the domain.
# +depth (double) = Artificial thickness of the domain, to make it three-dimensional.
# +nx (int) = Number of nodes in the x direction.
# +ny (int) = Number of nodes in the y direction.
# +dx (float32) = Distance between adyacent horizontal nodes
# +dy (float32) = Distance between adyacent vertical nodes
# +boundaries ([Boundary]) = It is [Outer_2D_Rectangular].
# +Mesh class attributes.
#Methods:
# +Implementation of Mesh methods.
class Mesh_2D_rm (Mesh):
type = "2D_rm"
def __init__(self, xmin, xmax, ymin, ymax, dx, dy, depth, boundaries):
self.depth = depth
self.boundaries = boundaries
self.xmin = xmin
self.xmax = xmax
self.ymin = ymin
self.ymax = ymax
self.dx = dx
self.dy = dy
self.nx = numpy.rint((xmax-xmin)/dx+1).astype('uint32')
self.ny = numpy.rint((ymax-ymin)/dy+1).astype('uint32')
self.boundaries = boundaries
self.setDomain()
# +setDomain() = This function, with the values provided by the boundary files, will create the mesh, by setting up volumes, nPoints and any other subclass variable.
def setDomain(self):
self.nPoints = numpy.uint32(self.nx*self.ny)
self.volumes = (self.dx*self.dy*self.depth)*numpy.ones((self.nPoints), dtype = 'float32')
self.volumes[:self.nx] /= 2
self.volumes[self.nx*(self.ny-1):] /= 2
self.volumes[self.nx-1::self.nx] /= 2
self.volumes[:self.nx*self.ny:self.nx] /= 2
#Initializing the rest of the attributes of the outer boundary
self.boundaries[0].bottom = numpy.arange(0,self.nx, dtype = 'uint32')
b_areas = self.dx*self.depth*numpy.ones_like(self.boundaries[0].bottom)
b_directions = numpy.zeros_like(self.boundaries[0].bottom, dtype = numpy.uint8)
b_adjacent = numpy.append(self.boundaries[0].bottom[1:], self.boundaries[0].bottom[-1]+self.nx)
self.boundaries[0].left = numpy.arange(0, self.nx*self.ny, self.nx, dtype = 'uint32')
l_areas = self.dy*self.depth*numpy.ones_like(self.boundaries[0].left)
l_directions = 3*numpy.ones_like(self.boundaries[0].left, dtype = numpy.uint8)
l_adjacent = numpy.append(self.boundaries[0].left[1:], self.boundaries[0].left[-1]+1)
self.boundaries[0].right = numpy.arange(self.nx-1, self.nx*self.ny, self.nx, dtype = 'uint32')
r_areas = copy.copy(l_areas)
r_directions = numpy.ones_like(self.boundaries[0].right, dtype = numpy.uint8)
r_adjacent = self.boundaries[0].right+self.nx
r_adjacent[-1] = r_adjacent[-2]
self.boundaries[0].top = numpy.arange(self.nx*(self.ny-1), self.nx*self.ny, dtype = 'uint32')
t_areas = copy.copy(b_areas)
t_directions = 2*numpy.ones_like(self.boundaries[0].top, dtype = numpy.uint8)
t_adjacent = self.boundaries[0].top+1
t_adjacent[-1] = t_adjacent[-2]
self.boundaries[0].location, ind1 = numpy.unique(numpy.append(numpy.append(numpy.append(self.boundaries[0].bottom, \
self.boundaries[0].left),\
self.boundaries[0].right),\
self.boundaries[0].top), return_index = True)
ind2 = numpy.argsort(self.boundaries[0].location)
self.boundaries[0].location = self.boundaries[0].location[ind2]
self.boundaries[0].areas = numpy.append(numpy.append(numpy.append(b_areas,\
l_areas),\
r_areas),\
t_areas)[ind1][ind2]
self.boundaries[0].directions = numpy.append(numpy.append(numpy.append(b_directions,\
l_directions),\
r_directions),\
t_directions)[ind1][ind2]
adjacent_nodes = numpy.append(numpy.append(numpy.append(b_adjacent,\
l_adjacent),\
r_adjacent),\
t_adjacent)[ind1][ind2]
self.location = self.boundaries[0].location
self.location_sat = numpy.zeros((0), dtype = numpy.uint8) if self.boundaries[0].material == "space" else self.boundaries[0].location
self.area_sat = numpy.zeros((0)) if self.boundaries[0].material == "space" else self.boundaries[0].areas
self.direction_sat = numpy.zeros((0)) if self.boundaries[0].material == "space" else self.boundaries[0].directions
test = location_indexes_inv([self.location[0]], store = True, location = self.location)[0]
assert test == 0, "location_indexes_inv is not correctly set up"
adjacent_nodes = location_indexes_inv(adjacent_nodes, store = False)
self.boundaries[0].adjacent = [{self.boundaries[0].directions[i]:adjacent_nodes[i]} for i in range(len(adjacent_nodes))]
self.boundaries[0].adjacent[0].update({l_directions[0]: self.nx})
self.boundaries[0].adjacent[-1].update({2:self.boundaries[0].adjacent[-1][1]})
#Corners
corners = location_indexes_inv([self.boundaries[0].bottom[-1], self.boundaries[0].left[-1]], store = False)
self.boundaries[0].adjacent[corners[0]].update({1: self.boundaries[0].adjacent[corners[0]][0]})
self.boundaries[0].adjacent[corners[1]].update({2: self.boundaries[0].adjacent[corners[1]][3]})
self.adjacent_sat = [] if self.boundaries[0].material == "space" else self.boundaries[0].adjacent
#Setting up potential and location_index_inv for use in the main program
if len(self.location_sat) > 0:
test = location_indexes_inv([self.location_sat[0]], store = True, location = self.location_sat)[0]
assert test == 0, "location_indexes_inv is not correctly set up"
def checkPositionInMesh(self, pos, surface = False):
cumul = numpy.ones((pos.shape[0]), dtype = numpy.bool_)
for boundary in self.boundaries:
cumul = numpy.logical_and(cumul, boundary.checkPositionInBoundary(pos, surface = surface))
return cumul
# def checkPositionInMesh(self, pos, surface = False):
# xmin = self.xmin
# xmax = self.xmax
# ymin = self.ymin
# ymax = self.ymax
# if surface and self.boundaries[0].material != "space":
# return af.gleq_p(pos, xmin, xmax, ymin, ymax)
# else:
# return af.gl_p(pos, xmin, xmax, ymin, ymax)
# +getPosition([int] i): [double, double y] = For a each index return its real position.
def getPosition(self, ind):
index2D = self.arrayToIndex(ind)
dx = self.dx
dy = self.dy
xmin = self.xmin
ymin = self.ymin
posx, posy = numpy.around(af.getPosition_p(index2D[:,0], index2D[:,1], dx, dy, xmin, ymin), decimals = c.INDEX_PREC)
return numpy.append(posx[:,None], posy[:,None], axis = 1)
# +getIndex([double,double] pos): [double,double] = For each real position returns its index value.
def getIndex(self, pos):
dx = self.dx
dy = self.dy
xmin = self.xmin
ymin = self.ymin
indx, indy = af.getIndex_p(pos[:,0], pos[:,1], dx, dy, xmin, ymin)
return numpy.around(numpy.append(indx[:,None], indy[:,None], axis = 1), decimals = c.INDEX_PREC)
# +arrayToIndex([ind] array): [int, int] = For the indexes in the 1D array, obtain the indexes used for the particular mesh.
def arrayToIndex(self, array):
j, i = numpy.divmod(array, self.nx)
return numpy.append(i[:,None], j[:,None], axis = 1)
# +indexToArray([ind, ind] index): [int] = For the indexes used for the particular mesh, obtain the 1D version for the array.
def indexToArray(self, ind):
nx = self.nx
ind = ind.astype('uint32')
return af.indexToArray_p(ind[:,0], ind[:,1], nx)
# +createDistributionInCell([int] ind, [int] num): [double, ..., double] position = Produces 'num' random positions uniformly distributed in every cell in 'ind'.
def createDistributionInCell(self, ind, num, prec = 10**(-c.INDEX_PREC)):
pos = numpy.random.rand(numpy.sum(num), 2)
pos0 = numpy.repeat(self.getPosition(ind), num, axis = 0)
check = numpy.logical_and(pos0[:,0] < self.xmax, pos0[:,1] < self.ymax)
if numpy.all(check):
pos[:,0] *= (self.dx-prec)
pos[:,1] *= (self.dy-prec)
return pos0+pos
else:
raise Exception('These indexes do not correspond to cells inside the mesh: '+ numpy.flatnonzero(numpy.logical_not(check)))
# +checkCellAssignment([int] ind): [Boolean] = For each index in 'ind' it checkes whether that node correspond to a cell in the mesh.
def checkCellAssignment(self, ind):
bools = numpy.ones_like(ind, dtype = numpy.int8)
bools *= numpy.where(ind >= self.nx*(self.ny-1), 0, 1)
bools *= numpy.where(ind%self.nx == self.nx-1, 0, 1)
bools = bools.astype(numpy.bool_)
return bools
def particleReformNodes(self):
mask = numpy.ones((self.nPoints), dtype = numpy.bool_)
mask[self.location] = False
mask[self.nx*(self.ny-2):self.nx*(self.ny-1)-1] = False
mask[self.nx-2::self.nx] = False
return mask
# +vtkOrdering(array): array = The array received as argument is ordered in such a way it can be stored ina VTK file.
# The result is returned as a new array.
def vtkOrdering(self, array):
dims = numpy.shape(array)
if len(dims) == 1:
return array.reshape((self.nx, self.ny, 1), order = 'F')
else:
tpl = tuple(numpy.reshape(copy.copy(array[:,i]),(self.nx, self.ny, 1), order = 'F') for i in range(dims[1]))
if len(tpl) < 3:
for i in range (3-len(tpl)):
tpl += (numpy.zeros_like(array[:,0].reshape((self.nx,self.ny,1))),)
return tpl
# +reverseVTKOrdering(array): array = The array received as argument is converted from VTk style to numpy.
def reverseVTKOrdering(self, array):
dims = numpy.shape(array)
if len(dims) == 1:
return array.reshape((self.nPoints), order = 'F')
else:
return array.reshape((self.nPoints, 3), order = 'F')[:,:2]
# +vtkReader(): Reader = Return the reader from module vtk that is able to read the vtk file.
def vtkReader(self):
return vtk.vtkXMLRectilinearGridReader()
# +saveVTK(string filename, dict dictionary) = It calls the appropiate method in 'vtk' module to print the information of the system in a '.vtk' file.
def saveVTK(self, filename, dictionary):
i = numpy.arange(self.xmin, self.xmax+self.dx/2, self.dx)
j = numpy.arange(self.ymin, self.ymax+self.dy/2, self.dy)
temp = numpy.zeros((1), dtype = 'int32')
evtk.gridToVTK(filename, i, j, temp, pointData = dictionary)
# +loadSpeciesVTK(self, species) = It creates particles around every node that can match the preoladed density and velocity of that node. This will depend on each type of mesh.
def loadSpeciesVTK(self, species, pic):
##Preparing things for numpy functions use
#particles = (species.mesh_values.density*self.volumes/species.spwt).astype(int)
#ind = numpy.arange(self.nPoints)
#index = numpy.repeat(ind, particles)
##Setting up positions
#pos = self.getPosition(ind)[index]
#random = numpy.random.rand(*numpy.shape(pos))
#random += numpy.where(random == 0, 1e-3, 0)
#pos[:,0] += numpy.where(index%self.nx != 0, (random[:,0]-0.5)*self.dx, random[:,0]/2*self.dx)
#pos[:,0] -= numpy.where(index%self.nx == self.nx-1, random[:,0]/2*self.dx, 0)
#pos[:,1] += numpy.where(index>self.nx, (random[:,1]-0.5)*self.dy, random[:,1]/2*self.dy)
#pos[:,1] -= numpy.where(index>=self.nx*(self.ny-1), random[:,1]/2*self.dy, 0)
##Setting up velocities
#vel = self.boundaries[0].sampleIsotropicVelocity(self.boundaries[0].thermalVelocity(species.mesh_values.temperature, species.m), particles)
##Adding particles
#self.boundaries[0].addParticles(species, pos, vel)
#self.boundaries[0].updateTrackers(species, species.part_values.current_n)
#Preparing things for numpy functions use
#Volume
dv = self.dx*self.dy*self.depth
particles = (species.mesh_values.density[:self.nPoints]*dv/species.spwt).astype(int)
ind = numpy.arange(self.nPoints)
index = numpy.repeat(ind, particles)
#Setting up positions
pos = self.getPosition(index)
random = numpy.random.rand(*numpy.shape(pos))
random += numpy.where(random == 0, 1e-3, 0)
pos += (random-0.5)*numpy.asarray((self.dx,self.dy)).T
#Adding particles and thermal velocity
vel = self.boundaries[0].sampleIsotropicVelocity(self.boundaries[0].thermalVelocity(species.mesh_values.temperature[:self.nPoints], species.m), particles)
self.boundaries[0].addParticles(species, pos, vel)
#Clearing particles outside of boundaries
for boundary in self.boundaries:
boundary.applyParticleBoundary(species, 'open', old_position = None)
#Setting up shifted velocities
np = species.part_values.current_n
species.part_values.velocity[:np] += pic.gather(species.part_values.position[:np], species.mesh_values.velocity)
#Update trackers
self.boundaries[0].updateTrackers(species, species.part_values.current_n)
# +print() = Print a VTK file / Matplotlib visualization of the mesh (points and connections between nodes). Also print volumes.
def print(self):
cwd = os.path.split(os.getcwd())[0]
vtkstring = cwd+'/results/mesh'
self.saveVTK(vtkstring, {'volume' : self.vtkOrdering(self.volumes)})
#Mesh_2D_rm_sat (Inherits from Mesh):
#
#Definition = Mesh class for a 2D rectangular mesh with a rectangular satellite at its center.
# The organization of the points will work as 0<=i<nx and 0<=j<ny, but taking into account the hole for the sattelite.
#Attributes:
# +xmin (double) = Left limit of the domain (closest to the Sun).
# +xmax (double) = Right limit of the domain (farthest from the Sun).
# +ymin (double) = Bottom limit of the domain.
# +ymax (double) = Top limit of the domain.
# +xminsat (double) = Left limit of the satellite (closest to the Sun).
# +xmaxsat (double) = Right limit of the satellite (farthest from the Sun).
# +yminsat (double) = Bottom limit of the satellite.
# +ymaxsat (double) = Top limit of the satellite.
# +depth (double) = Artificial thickness of the domain, to make it three-dimensional.
# +dx (float32) = Distance between adyacent horizontal nodes
# +dy (float32) = Distance between adyacent vertical nodes
# +nx (int) = Number of nodes in the x direction.
# +ny (int) = Number of nodes in the y direction.
# +nxsat (int) = Number of nodes in the x direction.
# +nysat (int) = Number of nodes in the y direction.
# +boundaries ([Boundary]) = It is [Outer_2D_Rectangular, Inner_2D_Rectangular].
# +Mesh class attributes.
#Methods:
# +Implementation of Mesh methods.
class Mesh_2D_rm_sat (Mesh_2D_rm):
type = "2D_rm_sat"
def __init__(self, xmin, xmax, ymin, ymax,\
xminsat, xmaxsat, yminsat, ymaxsat,\
dx, dy, depth, boundaries):
self.xmin = xmin
self.xmax = xmax
self.ymin = ymin
self.ymax = ymax
self.xminsat = xminsat
self.xmaxsat = xmaxsat
self.yminsat = yminsat
self.ymaxsat = ymaxsat
self.dx = dx
self.dy = dy
self.nx = numpy.rint((xmax-xmin)/dx+1).astype('uint32')
self.ny = numpy.rint((ymax-ymin)/dy+1).astype('uint32')
self.nxsat = numpy.rint((xmaxsat-xminsat)/dx+1).astype('uint32')
self.nysat = numpy.rint((ymaxsat-yminsat)/dy+1).astype('uint32')
self.sat_i = numpy.rint(((yminsat-ymin)/dy)*self.nx+(self.xminsat-self.xmin)/self.dx).astype('uint32')
self.depth = depth
self.boundaries = boundaries
self.setDomain()
# +setDomain() = This function, with the values provided by the boundary files, will create the mesh, by setting up volumes, nPoints and any other subclass variable.
def setDomain(self):
self.nPoints = numpy.uint32(self.nx*self.ny)
#Initializing the rest of the attributes of the outer boundary
self.boundaries[0].bottom = numpy.arange(0,self.nx, dtype = 'uint32')
b_areas = self.dx*self.depth*numpy.ones_like(self.boundaries[0].bottom)
b_directions = numpy.zeros_like(self.boundaries[0].bottom, dtype = numpy.uint8)
b_adjacent = numpy.append(self.boundaries[0].bottom[1:], self.boundaries[0].bottom[-1]+self.nx)
self.boundaries[0].left = numpy.arange(0, self.nx*self.ny, self.nx, dtype = 'uint32')
l_areas = self.dy*self.depth*numpy.ones_like(self.boundaries[0].left)
l_directions = 3*numpy.ones_like(self.boundaries[0].left, dtype = numpy.uint8)
l_adjacent = numpy.append(self.boundaries[0].left[1:], self.boundaries[0].left[-1]+1)
self.boundaries[0].right = numpy.arange(self.nx-1, self.nx*self.ny, self.nx, dtype = 'uint32')
r_areas = copy.copy(l_areas)
r_directions = numpy.ones_like(self.boundaries[0].right, dtype = numpy.uint8)
r_adjacent = self.boundaries[0].right+self.nx
r_adjacent[-1] = r_adjacent[-2]
self.boundaries[0].top = numpy.arange(self.nx*(self.ny-1), self.nx*self.ny, dtype = 'uint32')
t_areas = copy.copy(b_areas)
t_directions = 2*numpy.ones_like(self.boundaries[0].top, dtype = numpy.uint8)
t_adjacent = self.boundaries[0].top+1
t_adjacent[-1] = t_adjacent[-2]
self.boundaries[0].location, ind1 = numpy.unique(numpy.append(numpy.append(numpy.append(self.boundaries[0].bottom, \
self.boundaries[0].left),\
self.boundaries[0].right),\
self.boundaries[0].top), return_index = True)
ind2 = numpy.argsort(self.boundaries[0].location)
self.boundaries[0].location = self.boundaries[0].location[ind2]
self.boundaries[0].areas = numpy.append(numpy.append(numpy.append(b_areas,\
l_areas),\
r_areas),\
t_areas)[ind1][ind2]
self.boundaries[0].directions = numpy.append(numpy.append(numpy.append(b_directions,\
l_directions),\
r_directions),\
t_directions)[ind1][ind2]
adjacent_nodes = numpy.append(numpy.append(numpy.append(b_adjacent,\
l_adjacent),\
r_adjacent),\
t_adjacent)[ind1][ind2]
adjacent_nodes = location_indexes_inv(adjacent_nodes, store = True, location = self.boundaries[0].location)
self.boundaries[0].adjacent = [{self.boundaries[0].directions[i]:adjacent_nodes[i]} for i in range(len(adjacent_nodes))]
self.boundaries[0].adjacent[0].update({l_directions[0]: self.nx})
self.boundaries[0].adjacent[-1].update({2:self.boundaries[0].adjacent[-1][1]})
#Corners
corners = location_indexes_inv([self.boundaries[0].bottom[-1], self.boundaries[0].left[-1]], store = False)
self.boundaries[0].adjacent[corners[0]].update({1: self.boundaries[0].adjacent[corners[0]][0]})
self.boundaries[0].adjacent[corners[1]].update({2: self.boundaries[0].adjacent[corners[1]][3]})
#Satellite borders
topleft = self.sat_i+(self.nysat-1)*self.nx
self.boundaries[1].bottom = numpy.arange(self.sat_i, self.sat_i+self.nxsat, dtype = 'uint32')
b_areas = self.dx*self.depth*numpy.ones_like(self.boundaries[1].bottom)
b_directions = numpy.zeros_like(self.boundaries[1].bottom, dtype = numpy.uint8)
b_adjacent = numpy.append(self.boundaries[1].bottom[1:], self.boundaries[1].bottom[-1]+self.nx)
self.boundaries[1].left = numpy.arange(self.sat_i, topleft+self.nx, self.nx, dtype = 'uint32')
l_areas = self.dy*self.depth*numpy.ones_like(self.boundaries[1].left)
l_directions = 3*numpy.ones_like(self.boundaries[1].left, dtype = numpy.uint8)
l_adjacent = numpy.append(self.boundaries[1].left[1:], self.boundaries[1].left[-1]+1)
self.boundaries[1].right = numpy.arange(self.sat_i+self.nxsat-1, topleft+self.nxsat-1+self.nx, self.nx, dtype = 'uint32')
r_areas = copy.copy(l_areas)
r_directions = numpy.ones_like(self.boundaries[1].right, dtype = numpy.uint8)
r_adjacent = self.boundaries[1].right+self.nx
r_adjacent[-1] = r_adjacent[-2]
self.boundaries[1].top = numpy.arange(topleft, topleft+self.nxsat, dtype = 'uint32')
t_areas = copy.copy(b_areas)
t_directions = 2+b_directions
t_adjacent = self.boundaries[1].top+1
t_adjacent[-1] = t_adjacent[-2]
self.boundaries[1].location, ind1 = numpy.unique(numpy.append(numpy.append(numpy.append(self.boundaries[1].bottom, \
self.boundaries[1].left),\
self.boundaries[1].right),\
self.boundaries[1].top), return_index = True)
ind2 = numpy.argsort(self.boundaries[1].location)
self.boundaries[1].location = self.boundaries[1].location[ind2]
self.boundaries[1].areas = numpy.append(numpy.append(numpy.append(b_areas,\
l_areas),\
r_areas),\
t_areas)[ind1][ind2]
self.boundaries[1].directions = numpy.append(numpy.append(numpy.append(b_directions,\
l_directions),\
r_directions),\
t_directions)[ind1][ind2]
adjacent_nodes = numpy.append(numpy.append(numpy.append(b_adjacent,\
l_adjacent),\
r_adjacent),\
t_adjacent)[ind1][ind2]
adjacent_nodes = location_indexes_inv(adjacent_nodes, store = True, location = self.boundaries[1].location)
self.boundaries[1].adjacent = [{self.boundaries[1].directions[i]:adjacent_nodes[i]} for i in range(len(adjacent_nodes))]
#Corners
self.boundaries[1].adjacent[0].update({l_directions[0]: self.nxsat})
corners = location_indexes_inv([self.boundaries[1].bottom[-1], self.boundaries[1].left[-1]], store = False)
self.boundaries[1].adjacent[corners[0]].update({1: self.boundaries[1].adjacent[corners[0]][0]})
self.boundaries[1].adjacent[corners[1]].update({2: self.boundaries[1].adjacent[corners[1]][3]})
self.boundaries[1].adjacent[-1].update({2:self.boundaries[1].adjacent[-1][1]})
#Little correction to make the bottom-left node account as an impact in the left side of the spacecraft
self.boundaries[1].directions[0] = int(3)
self.boundaries[1].ind_inner = numpy.concatenate(tuple(numpy.arange(self.boundaries[1].left[i]+1, self.boundaries[1].right[i])\
for i in range(1, int(self.nysat-1))))
#Volume
self.volumes = (self.dx*self.dy*self.depth)*numpy.ones((self.nPoints), dtype = 'float32')
numpy.divide.at(self.volumes , self.boundaries[0].location, 2)
numpy.divide.at(self.volumes , self.boundaries[1].location, 2)
#Corners of the outer boundary
numpy.divide.at(self.volumes, [self.boundaries[0].bottom[0],\
self.boundaries[0].bottom[-1],\
self.boundaries[0].top[0],\
self.boundaries[0].top[-1]], 2)
#Corners of the satellite
numpy.multiply.at(self.volumes, [self.boundaries[1].bottom[0],\
self.boundaries[1].bottom[-1],\
self.boundaries[1].top[0],\
self.boundaries[1].top[-1]], 3/2)
#Locations of borders in the mesh
self.location = numpy.append(self.boundaries[0].location, self.boundaries[1].location)
indices = numpy.argsort(self.location)
self.location = self.location[indices]
#Location of the satellite
self.location_sat = numpy.zeros((0), dtype ='uint32')
self.area_sat = numpy.zeros((0), dtype ='float32')
self.direction_sat = numpy.zeros((0), dtype ='uint8')
self.adjacent_sat = []
if self.boundaries[0].material != "space":
self.location_sat = numpy.append(self.location_sat, self.boundaries[0].location)
self.area_sat = numpy.append(self.area_sat, self.boundaries[0].areas)
self.direction_sat = numpy.append(self.direction_sat, self.boundaries[0].directions)
self.adjacent_sat.extend(self.boundaries[0].adjacent)
if self.boundaries[1].material != "space":
self.location_sat = numpy.append(self.location_sat, self.boundaries[1].location)
self.area_sat = numpy.append(self.area_sat, self.boundaries[1].areas)
self.direction_sat = numpy.append(self.direction_sat, self.boundaries[1].directions)
self.adjacent_sat.extend(self.boundaries[1].adjacent)
if self.boundaries[0].material != "space" and boundaries[1].material != "space":
self.location_sat = self.location_sat[indices]
self.area_sat = self.area_sat[indices]
self.direction_sat = self.direction_sat[indices]
#TODO: Needs to be fixed later. The values of the dictionaries are wrong.
self.adjacent_sat = [self.adjacent_sat[i] for i in range(len(indices))]
#Setting up potential and location_index_inv for use in the main program
if len(self.location_sat) > 0:
test = location_indexes_inv([self.sat_i], store = True, location = self.location_sat)[0]
assert test == 0, "location_indexes_inv is not correctly set up"
def particleReformNodes(self):
mask = super().particleReformNodes()
mask[self.boundaries[1].ind_inner] = False
mask[self.boundaries[1].location] = False
bottom = True if self.sat_i > self.nx else False
left = True if self.sat_i%self.nx != 0 else False
right = True if (self.sat_i+self.nxsat-1)%self.nx != self.nx-1 else False
top = True if self.sat_i+self.nysat*self.nx < self.nPoints else False
if bottom:
mask[self.sat_i-self.nx:self.sat_i-self.nx+self.nxsat] = False
if left:
mask[self.sat_i-1:self.sat_i-1+self.nx*self.nysat:self.nx] = False
if right:
mask[self.sat_i+self.nxsat:self.sat_i+self.nxsat+self.nx*self.nysat:self.nx] = False
if top:
mask[self.sat_i+self.nx*self.nysat:self.sat_i+self.nx*self.nysat+self.nxsat] = False
if bottom and left:
mask[self.sat_i-self.nx-1] = False
if bottom and right:
mask[self.sat_i+self.nxsat] = False
if top and left:
mask[self.sat_i-1+self.nx*self.nysat] = False
if top and right:
mask[self.sat_i+self.nxsat+self.nx*self.nysat] = False
return mask
#Mesh_2D_rm_sat_HET (Inherits from Mesh_2D_rm_sat):
#
#Definition = Mesh class for a 2D rectangular mesh with a rectangular satellite at its center, which contains a Hall Effect Thruster.
# The organization of the points will work as 0<=i<nx and 0<=j<ny, but taking into account the hole for the sattelite.
#Attributes:
# +boundaries ([Boundary]) = It is [Outer_2D_Rectangular, Inner_2D_Rectangular, Inner_1D_HET].
# +Mesh_2D_rm_sat attributes class attributes.
#Methods:
# +Implementation of Mesh methods.
class Mesh_2D_rm_sat_HET (Mesh_2D_rm_sat):
type = "2D_rm_sat_HET"
def __init__(self, xmin, xmax, ymin, ymax,\
xminsat, xmaxsat, yminsat, ymaxsat,\
dx, dy, depth, boundaries):
super().__init__(xmin, xmax, ymin, ymax, xminsat, xmaxsat, yminsat, ymaxsat, dx, dy, depth, boundaries)
self.setDomain()
# +setDomain() = This function, with the values provided by the boundary files, will create the mesh, by setting up volumes, nPoints and any other subclass variable.
def setDomain(self):
super().setDomain()
#Initializing the rest of the attributes of the HET boundary
center = self.boundaries[1].top[len(self.boundaries[1].top)//2]
boundary_nx = numpy.rint((self.boundaries[2].xmax-self.boundaries[2].xmin)/self.dx).astype('uint32')
half_nx = boundary_nx//2
self.boundaries[2].location = numpy.arange(center-half_nx, center+half_nx+1, dtype = 'uint32')
self.boundaries[2].directions = 2*numpy.ones_like(self.boundaries[2].location, dtype = 'uint8')
self.boundaries[2].exit_nodes = numpy.asarray([self.boundaries[2].location[0], self.boundaries[2].location[-1]])
self.boundaries[2].exit_pot_nodes = numpy.asarray([self.boundaries[2].location[0], self.boundaries[2].location[1],\
self.boundaries[2].location[-2], self.boundaries[2].location[-1]])
#Mesh_2D_rm_separateBorders (Inherits from Mesh_2D_rm):
#
#Definition = Mesh class for a 2D rectangular mesh. The organization of the points will work as 0<=i<nx and 0<=j<ny. Also, for k parameter 0<=k<nPoints, k = nx*j+i.
# It differes from 'Mesh_2D_rm' in that instead of a single rectangular boundary, they bondaries are 4 1D boundaries, organized as
# [bottom, right, top, left].
#Attributes:
# +xmin (double) = Left limit of the domain (closest to the Sun).
# +xmax (double) = Right limit of the domain (farthest from the Sun).
# +ymin (double) = Bottom limit of the domain.
# +ymax (double) = Top limit of the domain.
# +depth (double) = Artificial thickness of the domain, to make it three-dimensional.
# +nx (int) = Number of nodes in the x direction.
# +ny (int) = Number of nodes in the y direction.
# +dx (float32) = Distance between adyacent horizontal nodes
# +dy (float32) = Distance between adyacent vertical nodes
# +boundaries ([Boundary]) = It is [Outer_1D_Rectangular x 4], with the order [bottom, right, top, left].
# +Mesh class attributes.
#Methods:
# +Implementation of Mesh methods.
class Mesh_2D_rm_separateBorders(Mesh_2D_rm):
type = "2D_rm_separateBorders"
def __init__(self, xmin, xmax, ymin, ymax, dx, dy, depth, boundaries):
super().__init__(xmin, xmax, ymin, ymax, dx, dy, depth, boundaries)
# +setDomain() = This function, with the values provided by the boundary files, will create the mesh, by setting up volumes, nPoints and any other subclass variable.
def setDomain(self):
self.nPoints = numpy.uint32(self.nx*self.ny)
self.volumes = (self.dx*self.dy*self.depth)*numpy.ones((self.nPoints), dtype = 'float32')
self.volumes[:self.nx] /= 2
self.volumes[self.nx*(self.ny-1):] /= 2
self.volumes[self.nx-1::self.nx] /= 2
self.volumes[:self.nx*self.ny:self.nx] /= 2
#Initializing the boundaries
#Bottom
self.boundaries[0].location = numpy.arange(0,self.nx, dtype = 'uint32')
self.boundaries[0].areas = self.dx*self.depth*numpy.ones_like(self.boundaries[0].location)
self.boundaries[0].areas[[0,-1]] /= 2
self.boundaries[0].directions = numpy.zeros_like(self.boundaries[0].location, dtype = numpy.uint8)
self.boundaries[0].adjacent = [{0:i+1} for i in range(self.nx)]
self.boundaries[0].adjacent[-1][0] = self.boundaries[0].adjacent[-2][0]
#Right
self.boundaries[1].location = numpy.arange(self.nx-1, self.nx*self.ny, self.nx, dtype = 'uint32')
self.boundaries[1].areas = self.dy*self.depth*numpy.ones_like(self.boundaries[1].location)
self.boundaries[1].areas[[0,-1]] /= 2
self.boundaries[1].directions = numpy.ones_like(self.boundaries[1].location, dtype = numpy.uint8)
self.boundaries[1].adjacent = [{1:i+1} for i in range(self.ny)]
self.boundaries[1].adjacent[-1][1] = self.boundaries[1].adjacent[-2][1]
#Top
self.boundaries[2].location = numpy.arange(self.nx*(self.ny-1), self.nx*self.ny, dtype = 'uint32')
self.boundaries[2].areas = self.dx*self.depth*numpy.ones_like(self.boundaries[2].location)
self.boundaries[2].areas[[0,-1]] /= 2
self.boundaries[2].directions = 2*numpy.ones_like(self.boundaries[2].location, dtype = numpy.uint8)
self.boundaries[2].adjacent = [{2:i+1} for i in range(self.nx)]
self.boundaries[2].adjacent[-1][2] = self.boundaries[2].adjacent[-2][2]
#Left
self.boundaries[3].location = numpy.arange(0, self.nx*self.ny, self.nx, dtype = 'uint32')
self.boundaries[3].areas = self.dy*self.depth*numpy.ones_like(self.boundaries[3].location)
self.boundaries[3].areas[[0,-1]] /= 2
self.boundaries[3].directions = 3*numpy.ones_like(self.boundaries[3].location, dtype = numpy.uint8)
self.boundaries[3].adjacent = [{3:i+1} for i in range(self.ny)]
self.boundaries[3].adjacent[-1][3] = self.boundaries[3].adjacent[-2][3]
#Set up the rest of the attributes and check for satellite borders
self.location, ind1 = numpy.unique(numpy.append(numpy.append(numpy.append(self.boundaries[0].location, \
self.boundaries[1].location),\
self.boundaries[2].location),\
self.boundaries[3].location), return_index = True)
ind2 = numpy.argsort(self.location)
self.location = self.location[ind2]
self.location_sat = numpy.zeros((0), dtype = numpy.uint8)
self.area_sat = numpy.zeros((0))
self.direction_sat = numpy.zeros((0), dtype = numpy.uint8)
self.adjacent_sat = []
for boundary in self.boundaries:
if boundary.material != 'space':
self.location_sat = numpy.append(self.location_sat, boundary.location)
self.area_sat = numpy.append(self.area_sat, boundary.areas)
self.direction_sat = numpy.append(self.direction_sat, boundary.directions)
for dic in boundary.adjacent:
self.adjacent_sat.append({key: boundary.location[value] for key, value in dic.items()})
self.location_sat, ind1 = numpy.unique(self.location_sat, return_index = True)
ind2 = numpy.argsort(self.location_sat)
self.location_sat = self.location_sat[ind2]
self.area_sat = self.area_sat[ind1][ind2]
self.direction_sat = self.direction_sat[ind1][ind2]
#Setting up location_index_inv for use in the main program
if len(self.location_sat) > 0:
test = location_indexes_inv([self.location_sat[0]], store = True, location = self.location_sat)[0]
assert test == 0, "location_indexes_inv is not correctly set up"
temp = []
for i in range(len(ind2)):
dic = self.adjacent_sat[ind1[ind2[i]]]
temp.append({})
temp[-1].update({key:location_indexes_inv([value], store = False)[0] for key, value in dic.items()})
self.adjacent_sat = temp
#Corners
if 0 in self.direction_sat and 3 in self.direction_sat:
self.adjacent_sat[0].update({3:self.nx})
if 0 in self.direction_sat and 1 in self.direction_sat:
corner = location_indexes_inv([self.boundaries[0].location[-1]], store = False)[0]
self.adjacent_sat[corner].update({1: self.adjacent_sat[corner][0]})
if 2 in self.direction_sat and 3 in self.direction_sat:
corner = location_indexes_inv([self.boundaries[3].location[-1]], store = False)[0]
self.adjacent_sat[corner].update({3: self.adjacent_sat[corner][3]})
#Mesh_2D_cm (Inherits from Mesh_2D_rm):
#
#Definition = Mesh class for a 2D cylindrical (z-r) mesh. The organization of the points will work as 0<=i<nx and 0<=j<ny. Also, for k parameter 0<=k<nPoints, k = nx*j+i.
#Attributes:
# +xmin (double) = Left limit of the domain (closest to the Sun).
# +xmax (double) = Right limit of the domain (farthest from the Sun).
# +ymin (double) = Bottom limit of the domain.
# +ymax (double) = Top limit of the domain.
# +nx (int) = Number of nodes in the x direction.
# +ny (int) = Number of nodes in the y direction.
# +dx (float32) = Distance between adyacent horizontal nodes
# +dy (float32) = Distance between adyacent vertical nodes
# +boundaries ([Boundary]) = It is [Outer_2D_Cylindrical].
# +Mesh class attributes.
#Methods:
# +Implementation of Mesh methods.
class Mesh_2D_cm (Mesh_2D_rm):
type = "2D_cm"
def __init__(self, xmin, xmax, ymin, ymax, dx, dy, boundaries):
self.boundaries = boundaries
self.xmin = xmin
self.xmax = xmax
self.ymin = ymin
self.ymax = ymax
self.dx = dx
self.dy = dy
self.nx = numpy.rint((xmax-xmin)/dx+1).astype('uint32')
self.ny = numpy.rint((ymax-ymin)/dy+1).astype('uint32')
self.boundaries = boundaries
self.setDomain()
# +setDomain() = This function, with the values provided by the boundary files, will create the mesh, by setting up volumes, nPoints and any other subclass variable.
def setDomain(self):
self.nPoints = numpy.uint32(self.nx*self.ny)
y = (numpy.arange(self.nPoints)//self.nx)*self.dy+self.ymin
if self.ymin == 0.0:
y[:self.nx] = self.dy/8
self.volumes = 2*numpy.pi*y*self.dy*self.dx
self.volumes[:self.nx] /= 1 if self.ymin == 0.0 else 2
self.volumes[self.nx*(self.ny-1):] /= 2
self.volumes[self.nx-1::self.nx] /= 2
self.volumes[:self.nx*self.ny:self.nx] /= 2
#Initializing the rest of the attributes of the outer boundary
self.boundaries[0].bottom = numpy.arange(0,self.nx, dtype = 'uint32')
if self.ymin == 0.0:
b_areas = 2*numpy.pi*y[:self.nx:self.nx-1]*self.dx
b_directions = numpy.zeros_like(b_areas, dtype = numpy.uint8)
b_adjacent = numpy.append(self.boundaries[0].bottom[0]+self.nx, self.boundaries[0].bottom[-1]+self.nx)
else:
b_areas = 2*numpy.pi*y[:self.nx]*self.dx
b_directions = numpy.zeros_like(self.boundaries[0].bottom, dtype = numpy.uint8)
b_adjacent = numpy.append(self.boundaries[0].bottom[1:], self.boundaries[0].bottom[-1]+self.nx)
self.boundaries[0].left = numpy.arange(0, self.nx*self.ny, self.nx, dtype = 'uint32')
l_areas = 2*numpy.pi*y[::self.nx]*self.dy
l_directions = 3*numpy.ones_like(self.boundaries[0].left, dtype = numpy.uint8)
l_adjacent = numpy.append(self.boundaries[0].left[1:], self.boundaries[0].left[-1]+1)
self.boundaries[0].right = numpy.arange(self.nx-1, self.nx*self.ny, self.nx, dtype = 'uint32')
r_areas = copy.copy(l_areas)
r_directions = numpy.ones_like(self.boundaries[0].right, dtype = numpy.uint8)
r_adjacent = self.boundaries[0].right+self.nx
r_adjacent[-1] = r_adjacent[-2]
self.boundaries[0].top = numpy.arange(self.nx*(self.ny-1), self.nx*self.ny, dtype = 'uint32')
t_areas = 2*numpy.pi*y[self.nx*(self.ny-1):]*self.dx
t_directions = 2*numpy.ones_like(self.boundaries[0].top, dtype = numpy.uint8)
t_adjacent = self.boundaries[0].top+1
t_adjacent[-1] = t_adjacent[-2]
if self.ymin == 0.0:
self.boundaries[0].location, ind1 = numpy.unique(numpy.append(numpy.append(numpy.append(self.boundaries[0].bottom[0::self.nx-1], \
self.boundaries[0].left),\
self.boundaries[0].right),\
self.boundaries[0].top), return_index = True)
else:
self.boundaries[0].location, ind1 = numpy.unique(numpy.append(numpy.append(numpy.append(self.boundaries[0].bottom, \
self.boundaries[0].left),\
self.boundaries[0].right),\
self.boundaries[0].top), return_index = True)
ind2 = numpy.argsort(self.boundaries[0].location)
self.boundaries[0].location = self.boundaries[0].location[ind2]
self.boundaries[0].areas = numpy.append(numpy.append(numpy.append(b_areas,\
l_areas),\
r_areas),\
t_areas)[ind1][ind2]
self.boundaries[0].directions = numpy.append(numpy.append(numpy.append(b_directions,\
l_directions),\
r_directions),\
t_directions)[ind1][ind2]
adjacent_nodes = numpy.append(numpy.append(numpy.append(b_adjacent,\
l_adjacent),\
r_adjacent),\
t_adjacent)[ind1][ind2]
self.location = self.boundaries[0].location
self.location_sat = numpy.zeros((0), dtype = numpy.uint8) if self.boundaries[0].material == "space" else self.boundaries[0].location
self.area_sat = numpy.zeros((0)) if self.boundaries[0].material == "space" else self.boundaries[0].areas
self.direction_sat = numpy.zeros((0)) if self.boundaries[0].material == "space" else self.boundaries[0].directions
test = location_indexes_inv([self.location[0]], store = True, location = self.location)[0]
assert test == 0, "location_indexes_inv is not correctly set up"
adjacent_nodes = location_indexes_inv(adjacent_nodes, store = False)
self.boundaries[0].adjacent = [{self.boundaries[0].directions[i]:adjacent_nodes[i]} for i in range(len(adjacent_nodes))]
self.boundaries[0].adjacent[0].update({l_directions[0]: self.nx})
self.boundaries[0].adjacent[-1].update({2:self.boundaries[0].adjacent[-1][1]})
#Corners
corners = location_indexes_inv([self.boundaries[0].bottom[-1], self.boundaries[0].left[-1]], store = False)
self.boundaries[0].adjacent[corners[0]].update({1: self.boundaries[0].adjacent[corners[0]][0]})
self.boundaries[0].adjacent[corners[1]].update({2: self.boundaries[0].adjacent[corners[1]][3]})
self.adjacent_sat = [] if self.boundaries[0].material == "space" else self.boundaries[0].adjacent
#Setting up potential and location_index_inv for use in the main program
if len(self.location_sat) > 0:
test = location_indexes_inv([self.location_sat[0]], store = True, location = self.location_sat)[0]
assert test == 0, "location_indexes_inv is not correctly set up"
def particleReformNodes(self):
mask = numpy.ones((self.nPoints), dtype = numpy.bool_)
mask[self.location] = False
mask[:self.nx] = False
mask[self.nx*(self.ny-2):self.nx*(self.ny-1)-1] = False
mask[self.nx-2::self.nx] = False
return mask
# +reverseVTKOrdering(array): array = The array received as argument is converted from VTk style to numpy.
def reverseVTKOrdering(self, array):
dims = numpy.shape(array)
if len(dims) == 1:
return array.reshape((self.nPoints), order = 'F')
else:
return array.reshape((self.nPoints, 3), order = 'F')
# +loadSpeciesVTK(self, species) = It creates particles around every node that can match the preoladed density and velocity of that node. This will depend on each type of mesh.
def loadSpeciesVTK(self, species, pic, prec = 10**(-c.INDEX_PREC)):
#Number of particles created
particles = (species.mesh_values.density[:self.nPoints]*self.volumes[:self.nPoints]/species.spwt).astype(int)
ind = numpy.arange(self.nPoints)
#Setting up positions
pos = super(Mesh_recursive, self).getPosition(ind)
rmin = numpy.where(pos[:,1] == self.ymin, pos[:,1], pos[:,1]-self.dy/2)
rmax = numpy.where(pos[:,1] == self.ymax, pos[:,1], pos[:,1]+self.dy/2)
pos = numpy.repeat(pos, particles, axis = 0)
pos[:,1] = cmt.randomYPositions_2D_cm(particles, rmin, rmax)
random = numpy.random.rand(numpy.shape(pos)[0])
shifts = numpy.where((pos[:,0]-self.xmin) < prec, random*self.dx/2, (random-0.5)*self.dx)
shifts -= numpy.where((pos[:,0]-self.xmax) > -prec, random*self.dx/2, 0)
pos[:,0] += shifts
pos[:,1] += numpy.where((pos[:,1] - self.ymin) < prec, prec, 0)
pos[:,1] += numpy.where((pos[:,1] - self.ymax) > -prec, -prec, 0)
#Adding particles and thermal velocity
vel = self.boundaries[0].sampleIsotropicVelocity(self.boundaries[0].thermalVelocity(species.mesh_values.temperature[:self.nPoints], species.m), particles)
self.boundaries[0].addParticles(species, pos, vel)
#Clearing particles outside of boundaries
for boundary in self.boundaries:
boundary.applyParticleBoundary(species, 'open', old_position = None)
#Setting up shifted velocities
np = species.part_values.current_n
species.part_values.velocity[:np] += pic.gather(species.part_values.position[:np], species.mesh_values.velocity)
#Update trackers
self.boundaries[0].updateTrackers(species, species.part_values.current_n)
#Mesh_2D_cm_sat (Inherits from Mesh_2D_cm):
#
#Definition = Mesh class for a 2D cylindrical (z-r) mesh with a cylindrical satellite at its center.
# The organization of the points will work as 0<=i<nx and 0<=j<ny, but taking into account the hole for the sattelite.
# x represents 'z' and y represents 'r'.
#Attributes:
# +xmin (double) = Left limit of the domain (closest to the Sun).
# +xmax (double) = Right limit of the domain (farthest from the Sun).
# +ymin (double) = Bottom limit of the domain.
# +ymax (double) = Top limit of the domain.
# +xminsat (double) = Left limit of the satellite (closest to the Sun).
# +xmaxsat (double) = Right limit of the satellite (farthest from the Sun).
# +yminsat (double) = Bottom limit of the satellite.
# +ymaxsat (double) = Top limit of the satellite.
# +dx (float32) = Distance between adyacent horizontal nodes
# +dy (float32) = Distance between adyacent vertical nodes
# +nx (int) = Number of nodes in the x direction.
# +ny (int) = Number of nodes in the y direction.
# +nxsat (int) = Number of nodes in the x direction.
# +nysat (int) = Number of nodes in the y direction.
# +boundaries ([Boundary]) = It is [Outer_2D_Cylindrical, Inner_2D_Cylindrical].
# +Mesh class attributes.
#Methods:
# +Implementation of Mesh methods.
class Mesh_2D_cm_sat (Mesh_2D_cm):
type = "2D_cm_sat"
def __init__(self, xmin, xmax, ymin, ymax,\
xminsat, xmaxsat, yminsat, ymaxsat,\
dx, dy, boundaries):
self.xmin = xmin
self.xmax = xmax
self.ymin = ymin
self.ymax = ymax
self.xminsat = xminsat
self.xmaxsat = xmaxsat
self.yminsat = yminsat
self.ymaxsat = ymaxsat
self.dx = dx
self.dy = dy
self.nx = numpy.rint((xmax-xmin)/dx+1).astype('uint32')
self.ny = numpy.rint((ymax-ymin)/dy+1).astype('uint32')
self.nxsat =
|
numpy.rint((xmaxsat-xminsat)/dx+1)
|
numpy.rint
|
import torch
import numpy as np
import warnings
from scipy.stats import entropy
from sklearn.neighbors import NearestNeighbors
import torch.nn as nn
from numpy.linalg import norm
import sys,os
import torch.nn.functional as F
#from Common.Const import GPU
from torch.autograd import Variable, grad
sys.path.append(os.path.join(os.getcwd(),"metrics"))
from pointops import pointops_util
# Import CUDA version of approximate EMD, from https://github.com/zekunhao1995/pcgan-pytorch/
# from StructuralLosses.match_cost import match_cost
# from StructuralLosses.nn_distance import nn_distance
from torch.autograd import Variable
from Common.modules import pairwise_dist
from torch.distributions import Beta
from CD_EMD.emd_ import emd_module
from CD_EMD.cd.chamferdist import ChamferDistance as CD
import functools
from numpy import ones,zeros
def dist_o2l(p1, p2):
# distance from origin to the line defined by (p1, p2)
p12 = p2 - p1
u12 = p12 / np.linalg.norm(p12)
l_pp = np.dot(-p1, u12)
pp = l_pp*u12 + p1
return np.linalg.norm(pp)
def para_count(models):
count = 0
for model in models:
count += sum(param.numel() for param in model.parameters())
return count
class AverageValueMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0.0
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0.0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
# # Import CUDA version of CD, borrowed from https://github.com/ThibaultGROUEIX/AtlasNet
# try:
# from . chamfer_distance_ext.dist_chamfer import chamferDist
# CD = chamferDist()
# def distChamferCUDA(x,y):
# return CD(x,y,gpu)
# except:
class CrossEntropyLoss(nn.Module):
def __init__(self, smoothing=True):
super(CrossEntropyLoss, self).__init__()
self.smoothing = smoothing
def forward(self, preds, gts):
gts = gts.contiguous().view(-1)
if self.smoothing:
eps = 0.2
n_class = preds.size(1)
one_hot = torch.zeros_like(preds).scatter(1, gts.view(-1, 1), 1)
one_hot = one_hot * (1 - eps) + (1 - one_hot) * eps / (n_class - 1)
log_prb = F.log_softmax(preds, dim=1)
loss = -(one_hot * log_prb).sum(dim=1).mean()
else:
loss = F.cross_entropy(preds, gts, reduction='mean')
return loss
class ChamferLoss(nn.Module):
def __init__(self):
super(ChamferLoss, self).__init__()
self.use_cuda = torch.cuda.is_available()
def forward(self,preds,gts):
P = self.batch_pairwise_dist(gts, preds)
mins, _ = torch.min(P, 1)
loss_1 = torch.sum(mins)
mins, _ = torch.min(P, 2)
loss_2 = torch.sum(mins)
return loss_1 + loss_2
def batch_pairwise_dist(self,x,y):
bs, num_points_x, points_dim = x.size()
_, num_points_y, _ = y.size()
#xx = torch.bmm(x, x.transpose(2,1))
xx = torch.sum(x ** 2, dim=2, keepdim=True)
yy = torch.sum(y ** 2, dim=2, keepdim=True)
xy = -2 * torch.bmm(x, y.permute(0, 2, 1))
dist = xy + xx + yy.permute(0, 2, 1) # [B, N, N]
return dist
yy = torch.bmm(y, y.transpose(2,1))
zz = torch.bmm(x, y.transpose(2,1))
if self.use_cuda:
dtype = torch.cuda.LongTensor
else:
dtype = torch.LongTensor
diag_ind_x = torch.arange(0, num_points_x).type(dtype)
diag_ind_y = torch.arange(0, num_points_y).type(dtype)
#brk()
rx = xx[:, diag_ind_x, diag_ind_x].unsqueeze(1).expand_as(zz.transpose(2,1))
ry = yy[:, diag_ind_y, diag_ind_y].unsqueeze(1).expand_as(zz)
P = (rx.transpose(2,1) + ry - 2*zz)
return P
# def batch_pairwise_dist(self,x,y):
#
# bs, num_points_x, points_dim = x.size()
# _, num_points_y, _ = y.size()
#
# xx = torch.sum(x ** 2, dim=2, keepdim=True)
# yy = torch.sum(y ** 2, dim=2, keepdim=True)
# yy = yy.permute(0, 2, 1)
#
# xi = -2 * torch.bmm(x, y.permute(0, 2, 1))
# dist = xi + xx + yy # [B, N, N]
# return dist
def dist_simple(x,y,loss="l2"):
if loss == "l2":
dist = torch.sum((x - y) ** 2, dim=-1).sum(dim=1).float()
else:
dist = torch.sum(torch.abs(x - y), dim=-1).sum(dim=1).float()
return dist.mean()
def distChamferCUDA(x, y):
cd = CD()
cd0, cd1, _, _ = cd(x, y)
return nn_distance(x, y)
def emd_approx(sample, ref):
B, N, N_ref = sample.size(0), sample.size(1), ref.size(1)
# import ipdb
# ipdb.set_trace()
assert N == N_ref, "Not sure what would EMD do in this case"
emd = match_cost(sample, ref) # (B,)
emd_norm = emd / float(N) # (B,)
return emd_norm
def CD_loss(x, y):
dists_forward, dists_backward = nn_distance(x, y)
dists_forward = torch.mean(dists_forward,dim=1)
dists_backward = torch.mean(dists_backward,dim=1)
cd_dist = torch.mean(dists_forward+dists_backward)
return cd_dist
def EMD_loss(sample, ref):
B, N, N_ref = sample.size(0), sample.size(1), ref.size(1)
# import ipdb
# ipdb.set_trace()
assert N == N_ref, "Not sure what would EMD do in this case"
emd = match_cost(sample, ref) # (B,)
emd_norm = emd / float(N) # (B,)
return emd_norm
def compute_mean_covariance(points):
bs, ch, nump = points.size()
# ----------------------------------------------------------------
mu = points.mean(dim=-1, keepdim=True) # Bx3xN -> Bx3x1
# ----------------------------------------------------------------
tmp = points - mu.repeat(1, 1, nump) # Bx3xN - Bx3xN -> Bx3xN
tmp_transpose = tmp.transpose(1, 2) # Bx3xN -> BxNx3
covariance = torch.bmm(tmp, tmp_transpose)
covariance = covariance / nump
return mu, covariance # Bx3x1 Bx3x3
def get_local_pair(pt1, pt2):
pt1_batch, pt1_N, pt1_M = pt1.size()
pt2_batch, pt2_N, pt2_M = pt2.size()
# pt1: Bx3xM pt2: Bx3XN (N > M)
# print('pt1: {} pt2: {}'.format(pt1.size(), pt2.size()))
new_xyz = pt1.transpose(1, 2).contiguous() # Bx3xM -> BxMx3
pt1_trans = pt1.transpose(1, 2).contiguous() # Bx3xM -> BxMx3
pt2_trans = pt2.transpose(1, 2).contiguous() # Bx3xN -> BxNx3
K=20
group = pointops_util.Gen_QueryAndGroupXYZ(radius=None, nsample=K, use_xyz=False)
g_xyz1 = group(pt1_trans, new_xyz) # Bx3xMxK
# print('g_xyz1: {}'.format(g_xyz1.size()))
g_xyz2 = group(pt2_trans, new_xyz) # Bx3xMxK
# print('g_xyz2: {}'.format(g_xyz2.size()))
g_xyz1 = g_xyz1.transpose(1, 2).contiguous().view(-1, 3, K) # Bx3xMxK -> BxMx3xK -> (BM)x3xK
# print('g_xyz1: {}'.format(g_xyz1.size()))
g_xyz2 = g_xyz2.transpose(1, 2).contiguous().view(-1, 3, K) # Bx3xMxK -> BxMx3xK -> (BM)x3xK
# print('g_xyz2: {}'.format(g_xyz2.size()))
# print('====================== FPS ========================')
# print(pt1.shape,g_xyz1.shape)
# print(pt2.shape,g_xyz2.shape)
mu1, var1 = compute_mean_covariance(g_xyz1)
mu2, var2 = compute_mean_covariance(g_xyz2)
# print('mu1: {} var1: {}'.format(mu1.size(), var1.size()))
# print('mu2: {} var2: {}'.format(mu2.size(), var2.size()))
# --------------------------------------------------
# like_mu12 = self.shape_loss_fn(mu1, mu2)
# like_var12 = self.shape_loss_fn(var1, var2)
# ----------------------------------------------------
# =========$$$ CD loss $$$===============
# print("p1,p2:",pt1.shape,pt2.shape)
# print("mu2:",mu1.shape,mu2.shape,pt1_batch,pt1_N,pt1_M)
mu1 = mu1.view(pt1_batch, -1, 3)
mu2 = mu2.view(pt2_batch, -1, 3)
var1 = var1.view(pt1_batch, -1, 9)
var2 = var2.view(pt2_batch, -1, 9)
chamfer_loss = ChamferLoss()
like_mu12 = chamfer_loss(mu1, mu2) / float(pt1_M)
like_var12 = chamfer_loss(var1, var2) / float(pt1_M)
# print('mu: {} var: {}'.format(like_mu12.item(), like_var12.item()))
return like_mu12, like_var12
# Borrow from https://github.com/ThibaultGROUEIX/AtlasNet
def distChamfer(a, b):
x, y = a, b
bs, num_points, points_dim = x.size()
xx = torch.bmm(x, x.transpose(2, 1))
yy = torch.bmm(y, y.transpose(2, 1))
zz = torch.bmm(x, y.transpose(2, 1))
diag_ind = torch.arange(0, num_points).to(a).long()
rx = xx[:, diag_ind, diag_ind].unsqueeze(1).expand_as(xx)
ry = yy[:, diag_ind, diag_ind].unsqueeze(1).expand_as(yy)
P = (rx.transpose(2, 1) + ry - 2 * zz)
return P.min(1)[0], P.min(2)[0]
def EMD_CD(sample_pcs, ref_pcs, batch_size, accelerated_cd=False, reduced=True):
N_sample = sample_pcs.shape[0]
N_ref = ref_pcs.shape[0]
assert N_sample == N_ref, "REF:%d SMP:%d" % (N_ref, N_sample)
cd_lst = []
emd_lst = []
iterator = range(0, N_sample, batch_size)
for b_start in iterator:
b_end = min(N_sample, b_start + batch_size)
sample_batch = sample_pcs[b_start:b_end]
ref_batch = ref_pcs[b_start:b_end]
if accelerated_cd:
dl, dr = distChamferCUDA(sample_batch, ref_batch)
else:
dl, dr = distChamfer(sample_batch, ref_batch)
cd_lst.append(dl.mean(dim=1) + dr.mean(dim=1))
emd_batch = emd_approx(sample_batch, ref_batch)
emd_lst.append(emd_batch)
if reduced:
cd = torch.cat(cd_lst).mean()
emd = torch.cat(emd_lst).mean()
else:
cd = torch.cat(cd_lst)
emd = torch.cat(emd_lst)
results = {
'MMD-CD': cd,
'MMD-EMD': emd,
}
return results
def _pairwise_EMD_CD_(sample_pcs, ref_pcs, batch_size, accelerated_cd=True):
N_sample = sample_pcs.shape[0]
N_ref = ref_pcs.shape[0]
all_cd = []
all_emd = []
iterator = range(N_sample)
for sample_b_start in iterator:
sample_batch = sample_pcs[sample_b_start]
cd_lst = []
emd_lst = []
for ref_b_start in range(0, N_ref, batch_size):
ref_b_end = min(N_ref, ref_b_start + batch_size)
ref_batch = ref_pcs[ref_b_start:ref_b_end]
batch_size_ref = ref_batch.size(0)
sample_batch_exp = sample_batch.view(1, -1, 3).expand(batch_size_ref, -1, -1)
sample_batch_exp = sample_batch_exp.contiguous()
if accelerated_cd:
dl, dr = distChamferCUDA(sample_batch_exp, ref_batch)
else:
dl, dr = distChamfer(sample_batch_exp, ref_batch)
cd_lst.append((dl.mean(dim=1) + dr.mean(dim=1)).view(1, -1))
emd_batch = emd_approx(sample_batch_exp, ref_batch)
emd_lst.append(emd_batch.view(1, -1))
cd_lst = torch.cat(cd_lst, dim=1)
emd_lst = torch.cat(emd_lst, dim=1)
all_cd.append(cd_lst)
all_emd.append(emd_lst)
all_cd = torch.cat(all_cd, dim=0) # N_sample, N_ref
all_emd = torch.cat(all_emd, dim=0) # N_sample, N_ref
return all_cd, all_emd
# Adapted from https://github.com/xuqiantong/GAN-Metrics/blob/master/framework/metric.py
def knn(Mxx, Mxy, Myy, k, sqrt=False):
n0 = Mxx.size(0)
n1 = Myy.size(0)
label = torch.cat((torch.ones(n0), torch.zeros(n1))).to(Mxx)
M = torch.cat((torch.cat((Mxx, Mxy), 1), torch.cat((Mxy.transpose(0, 1), Myy), 1)), 0)
if sqrt:
M = M.abs().sqrt()
INFINITY = float('inf')
val, idx = (M + torch.diag(INFINITY * torch.ones(n0 + n1).to(Mxx))).topk(k, 0, False)
count = torch.zeros(n0 + n1).to(Mxx)
for i in range(0, k):
count = count + label.index_select(0, idx[i])
pred = torch.ge(count, (float(k) / 2) * torch.ones(n0 + n1).to(Mxx)).float()
s = {
'tp': (pred * label).sum(),
'fp': (pred * (1 - label)).sum(),
'fn': ((1 - pred) * label).sum(),
'tn': ((1 - pred) * (1 - label)).sum(),
}
s.update({
'precision': s['tp'] / (s['tp'] + s['fp'] + 1e-10),
'recall': s['tp'] / (s['tp'] + s['fn'] + 1e-10),
'acc_t': s['tp'] / (s['tp'] + s['fn'] + 1e-10),
'acc_f': s['tn'] / (s['tn'] + s['fp'] + 1e-10),
'acc': torch.eq(label, pred).float().mean(),
})
return s
def lgan_mmd_cov(all_dist):
N_sample, N_ref = all_dist.size(0), all_dist.size(1)
min_val_fromsmp, min_idx = torch.min(all_dist, dim=1)
min_val, _ = torch.min(all_dist, dim=0)
mmd = min_val.mean()
mmd_smp = min_val_fromsmp.mean()
cov = float(min_idx.unique().view(-1).size(0)) / float(N_ref)
cov = torch.tensor(cov).to(all_dist)
return {
'lgan_mmd': mmd,
'lgan_cov': cov,
'lgan_mmd_smp': mmd_smp,
}
def compute_all_metrics(sample_pcs, ref_pcs, batch_size, accelerated_cd=False):
results = {}
M_rs_cd, M_rs_emd = _pairwise_EMD_CD_(ref_pcs, sample_pcs, batch_size, accelerated_cd=accelerated_cd)
res_cd = lgan_mmd_cov(M_rs_cd.t())
results.update({
"%s-CD" % k: v for k, v in res_cd.items()
})
res_emd = lgan_mmd_cov(M_rs_emd.t())
results.update({
"%s-EMD" % k: v for k, v in res_emd.items()
})
M_rr_cd, M_rr_emd = _pairwise_EMD_CD_(ref_pcs, ref_pcs, batch_size, accelerated_cd=accelerated_cd)
M_ss_cd, M_ss_emd = _pairwise_EMD_CD_(sample_pcs, sample_pcs, batch_size, accelerated_cd=accelerated_cd)
# 1-NN results
one_nn_cd_res = knn(M_rr_cd, M_rs_cd, M_ss_cd, 1, sqrt=False)
results.update({
"1-NN-CD-%s" % k: v for k, v in one_nn_cd_res.items() if 'acc' in k
})
one_nn_emd_res = knn(M_rr_emd, M_rs_emd, M_ss_emd, 1, sqrt=False)
results.update({
"1-NN-EMD-%s" % k: v for k, v in one_nn_emd_res.items() if 'acc' in k
})
return results
def compute_all_metrics2(sample_pcs, ref_pcs, normalize=False):
from Common.point_operation import normalize_point_cloud
gen_clouds_buf = sample_pcs
ref_clouds_buf = ref_pcs
if normalize:
gen_clouds_buf = gen_clouds_buf.cpu().numpy()
# gen_clouds_inds = set(np.arange(gen_clouds_buf.shape[0]))
# nan_gen_clouds_inds = set(np.isnan(gen_clouds_buf).sum(axis=(1, 2)).nonzero()[0])
# gen_clouds_inds = list(gen_clouds_inds - nan_gen_clouds_inds)
# dup_gen_clouds_inds = np.random.choice(gen_clouds_inds, size=len(nan_gen_clouds_inds))
# gen_clouds_buf[list(nan_gen_clouds_inds)] = gen_clouds_buf[dup_gen_clouds_inds]
gen_clouds_buf = normalize_point_cloud(gen_clouds_buf)
gen_clouds_buf = torch.from_numpy(gen_clouds_buf).cuda()
gg_cds = pairwise_CD(gen_clouds_buf, gen_clouds_buf)
tt_cds = pairwise_CD(ref_clouds_buf, ref_clouds_buf)
gt_cds = pairwise_CD(gen_clouds_buf, ref_clouds_buf)
metrics = {}
jsd = JSD(gen_clouds_buf.cpu().numpy(), ref_clouds_buf.cpu().numpy(),
clouds1_flag='gen', clouds2_flag='ref', warning=False)
cd_covs = COV(gt_cds)
cd_mmds = MMD(gt_cds)
cd_1nns = KNN(gg_cds, gt_cds, tt_cds, 1)
metrics = {
"JSD": jsd,
"COV-CD": cd_covs,
"MMD-CD": cd_mmds,
"1NN-CD": cd_1nns,
}
return metrics
def f_score(predicted_clouds, true_clouds, threshold=0.001):
ld, rd = distChamferCUDA(predicted_clouds, true_clouds)
precision = 100. * (rd < threshold).float().mean(1)
recall = 100. * (ld < threshold).float().mean(1)
return 2. * precision * recall / (precision + recall + 1e-7)
def get_voxel_occ_dist(all_clouds, clouds_flag='gen', res=28, bound=0.5, bs=128, warning=True):
if np.any(np.fabs(all_clouds) > bound) and warning:
print('{} clouds out of cube bounds: [-{}; {}]'.format(clouds_flag, bound, bound))
n_nans = np.isnan(all_clouds).sum()
if n_nans > 0:
print('{} NaN values in point cloud tensors.'.format(n_nans))
p2v_dist = np.zeros((res, res, res), dtype=np.uint64)
step = 1. / res
v_bs = -0.5 + np.arange(res + 1) * step
nbs = all_clouds.shape[0] // bs + 1
for i in range(nbs):
clouds = all_clouds[bs * i:bs * (i + 1)]
preiis = clouds[:, :, 0].reshape(1, -1)
preiis = np.logical_and(v_bs[:28].reshape(-1, 1) <= preiis, preiis < v_bs[1:].reshape(-1, 1))
iis = preiis.argmax(0)
iis_values = preiis.sum(0) > 0
prejjs = clouds[:, :, 1].reshape(1, -1)
prejjs = np.logical_and(v_bs[:28].reshape(-1, 1) <= prejjs, prejjs < v_bs[1:].reshape(-1, 1))
jjs = prejjs.argmax(0)
jjs_values = prejjs.sum(0) > 0
prekks = clouds[:, :, 2].reshape(1, -1)
prekks = np.logical_and(v_bs[:28].reshape(-1, 1) <= prekks, prekks < v_bs[1:].reshape(-1, 1))
kks = prekks.argmax(0)
kks_values = prekks.sum(0) > 0
values = np.uint64(np.logical_and(np.logical_and(iis_values, jjs_values), kks_values))
|
np.add.at(p2v_dist, (iis, jjs, kks), values)
|
numpy.add.at
|
# Copyright (c) 2021 <NAME>
"""
BackTester类根据传入信号以及交易逻辑进行交易
开发日志
2021-09-07
-- 更新:BackTester类统计pnl序列的平均日收益,最大回撤,标准差,夏普比,最长亏损时间
2021-09-11
-- 修复:回测时剔除涨停板
2021-09-21
-- 修复:回测是要根据上一期的持仓确定涨停板是否剔除,有可能涨停的股票是有持仓的
"""
import numpy as np
import datetime
class BackTester:
def __init__(self, data, signal=None):
"""
:param signal: 信号矩阵
:param date: Data类
"""
self.signal = signal
self.data = data
self.pnl = [] # pnl序列
self.cumulated_pnl = [] # 累计pnl
self.market_pnl = [] # 如果纯多头,这里存市场的pnl,以比较超额收益
self.market_cumulated_pnl = []
self.max_dd = 0 # 统计pnl序列的最大回撤
self.mean_pnl = 0 # 平均pnl
self.std = 0 # 统计pnl序列的标准差
self.sharp_ratio = 0 # 夏普比
self.max_loss_time = 0 # 最长亏损时间
self.log = [] # 记录每一个具体的交易日给出的股票
def cal_stats(self):
self.std = np.std(self.pnl)
self.mean_pnl = np.mean(self.pnl)
self.sharp_ratio = self.mean_pnl / self.std if self.std > 0 else 0
max_dd = 0
max_pnl = 0
max_loss_time = 0
loss_time = 0
for i in self.cumulated_pnl:
if i > max_pnl:
max_pnl = i
loss_time = 0
else:
if max_pnl - i > max_dd:
max_dd = max_pnl - i
loss_time += 1
if loss_time > max_loss_time:
max_loss_time = loss_time
self.max_dd = max_dd
self.max_loss_time = max_loss_time
def long_short(self, start_date=None, end_date=None, signal=None, n=0): # 多空策略
"""
:param signal: 可传入自定义信号
:param start_date: 开始日期
:param end_date: 结束日期
:param n: 进入股票数量,0表示使用top
:return:
"""
self.pnl = []
self.cumulated_pnl = []
self.market_pnl = []
self.market_cumulated_pnl = []
if start_date is None:
start_date = str(self.data.start_date)
if end_date is None:
end_date = str(self.data.end_date)
start, end = self.data.get_real_date(start_date, end_date)
if signal is None:
signal = self.signal
if n != 0: # 暂时不管
return
else:
for i in range(start, end + 1):
tmp = signal[i].copy()
tmp[self.data.top[i]] -= np.mean(tmp[self.data.top[i]])
tmp[self.data.top[i] & (tmp > 0)] /= np.sum(tmp[self.data.top[i] & (tmp > 0)])
tmp[self.data.top[i] & (tmp < 0)] /= -np.sum(tmp[self.data.top[i] & (tmp < 0)])
self.pnl.append(np.sum(tmp[self.data.top[i]] * self.data.ret[i + 1, self.data.top[i]]) / 2)
if not self.cumulated_pnl:
self.cumulated_pnl.append(np.sum(tmp[self.data.top[i]] * self.data.ret[i + 1,
self.data.top[i]]) / 2)
else:
self.cumulated_pnl.append(
self.cumulated_pnl[-1] + np.sum(tmp[self.data.top[i]] * self.data.ret[i + 1,
self.data.top[i]]) / 2)
self.cal_stats()
def long(self, start_date=None, end_date=None, n=0, signal=None, zt_filter=False,
position_mode='weighted'): # 多头策略
"""
:param start_date: 开始日期
:param end_date: 结束日期
:param n: 进入股票数量,0表示使用top
:param signal: 可以传入一个自定义的signal,默认使用自身的signal
:param zt_filter: 是否过滤涨停
:param position_mode: 仓位配置模式
:return:
"""
self.pnl = []
self.cumulated_pnl = []
self.market_pnl = []
self.market_cumulated_pnl = []
if start_date is None:
start_date = str(self.data.start_date)
if end_date is None:
end_date = str(self.data.end_date)
start, end = self.data.get_real_date(start_date, end_date)
if signal is None:
signal = self.signal.copy()
pos = np.array([i for i in range(len(self.data.top[0]))]) # 记录位置
last_pos = None # 记录上一次持仓的股票
if n != 0: # 暂时不管
return
else:
for i in range(start, end + 1):
tmp = signal[i].copy()
tmp[self.data.top[i]] -= np.mean(tmp[self.data.top[i]])
tmp[self.data.top[i] & (tmp < 0)] = 0
tmp_ret = self.data.ret[i + 1, self.data.top[i] & (tmp > 0)].copy()
# tmp[self.data.top[i] & (self.data.ret[i] >= 0.099)] = 0 # 剔除涨停板
tmp[self.data.top[i] & (tmp > 0)] /= np.sum(tmp[self.data.top[i] & (tmp > 0)])
sig_tmp = tmp[self.data.top[i] & (tmp > 0)].copy()
this_pos = pos[self.data.top[i] & (tmp > 0)].copy()
if zt_filter:
if last_pos is not None:
for j in range(len(this_pos)):
if (self.data.ret[i, self.data.top[i] & (tmp > 0)][j] > 0.099) and (this_pos[j]
not in last_pos):
sig_tmp[j] = 0
this_pos[j] = -1
else:
for j in range(len(this_pos)):
if self.data.ret[i, self.data.top[i] & (tmp > 0)][j] > 0.099:
sig_tmp[j] = 0
this_pos[j] = -1
last_pos = this_pos.copy()
self.pnl.append(np.sum(sig_tmp * tmp_ret) -
np.mean(self.data.ret[i + 1, self.data.top[i]]))
if not self.cumulated_pnl:
self.cumulated_pnl.append(np.sum(sig_tmp * tmp_ret) -
np.mean(self.data.ret[i + 1, self.data.top[i]]))
else:
self.cumulated_pnl.append(
self.cumulated_pnl[-1] + np.sum(sig_tmp * tmp_ret) -
np.mean(self.data.ret[i + 1, self.data.top[i]]))
self.cal_stats()
def long_top_n(self, start_date=None, end_date=None, n=0, signal=None, zt_filter=True,
position_mode='weighted'): # 做多预测得分最高的n只股票
"""
:param start_date: 开始日期
:param end_date: 结束日期
:param n: 做多多少只股票,默认按照top做多
:param signal: 可以传入一个自定义的signal,默认使用自身的signal
:param zt_filter: 是否过滤涨停
:param position_mode: 仓位配置模式
:return:
"""
self.pnl = []
self.cumulated_pnl = []
self.market_pnl = []
self.market_cumulated_pnl = []
# 验证用功能:记录每一天具体的给出的股票代码和实际的收益率
self.log = []
if start_date is None:
start_date = str(self.data.start_date)
if end_date is None:
end_date = str(self.data.end_date)
start, end = self.data.get_real_date(start_date, end_date)
if signal is None:
signal = self.signal.copy()
pos = np.array([i for i in range(len(self.data.top[0]))]) # 记录位置
last_pos = None # 记录上一次持仓的股票
if n != 0:
zt = []
in_zt = []
zt_ret = []
zt_w = []
for i in range(start, end + 1):
tmp = signal[i].copy()
tmp[self.data.top[i]] -= np.mean(tmp[self.data.top[i]])
tmp[self.data.top[i] & (tmp > 0)] /=
|
np.sum(tmp[self.data.top[i] & (tmp > 0)])
|
numpy.sum
|
import warnings
import numpy as np
import scipy.ndimage
def get_det_coords(size: int, spacing: float) -> np.ndarray:
"""Compute pixel-center positions for 2D detector
The centers of the pixels of a detector are usually not aligned to
a pixel grid. If we center the detector at the origin, odd images
will have a pixel at zero, whereas even images will have two pixels
next to the actual center.
Parameters
----------
size: int
Size of the detector (number of detection points).
spacing: float
Distance between two detection points in pixels.
Returns
-------
arr: 1D ndarray
Pixel coordinates.
"""
latmax = (size / 2 - .5) * spacing
lat = np.linspace(-latmax, latmax, size, endpoint=True)
return lat
def get_fan_coords(size: int, spacing: float, distance: float,
numang: int) -> np.ndarray:
""" Compute equispaced angular coordinates for 2d detector
The centers of the pixels of a detector are usually not aligned to
a pixel grid. If we center the detector at the origin, odd images
will have a pixel at zero, whereas even images will have two pixels
next to the actual center.
We want to compute an equispaced array of `numang` angles that go
between the centers of the outmost far pixels of the detector.
Parameters
----------
size: int
Size of the detector (number of detection points).
spacing: float
Distance between two detection points in pixels.
distance: float
Axial distance from the detector to the angular measurement
position in pixels.
numang: int
Number of angles.
Returns
-------
angles, latpos: two 1D ndarrays
Angles and pixel coordinates at the detector.
Notes
-----
Actually one would not need to define spacing and distance, but for
convenience, these parameters are separated and an arbitrary uint
'pixel' is defined.
"""
# Compute the angular positions of the outmost pixels
latmax = (size / 2 - .5) * spacing
angmax = abs(np.arctan2(latmax, distance))
# equispaced angles
angles = np.linspace(-angmax, angmax, numang, endpoint=True)
latang = np.tan(angles) * distance
latang[0] = -latmax
latang[-1] = latmax
return angles, latang
def radon_fan(arr, det_size: int, det_spacing: float = 1, shift_size: int = 1,
lS=1, lD=None, return_ang: bool = False,
count=None, max_count=None) -> np.ndarray:
r"""Compute the Radon transform for a fan beam geometry
In contrast to :func:`radon_parallel`, this function uses (1)
a fan-beam geometry (the integral is taken along rays that meet
at one point), and (2) translates the object laterally instead
of rotating it. The result is sometimes referred to as 'linogram'.
Problem sketch::
x
^
|
----> z
source object detector
/ . (+det_size/2, lD)
(0,-lS) ./ (0,0) .
\ .
\ . (-det_size/2, lD)
The algorithm computes all angular projections for discrete
movements of the object. The position of the object is changed such
that its lower boundary starts at (det_size/2, 0) and its upper
boundary ends at (-det_size/2, 0) at increments of `shift_size`.
Parameters
----------
arr: ndarray, shape (N,N)
the input image.
det_size: int
The total detector size in pixels. The detector centered to the
source. The axial position of the detector is the center of the
pixels on the far right of the object.
det_spacing: float
Distance between detection points in pixels.
shift_size: int
The amount of pixels that the object is shifted between
projections.
lS: multiples of 0.5
Source position relative to the center of `arr`. lS >= 1.
lD: int
Detector position relative to the center `arr`. Default is N/2.
return_ang: bool
Also return the angles corresponding to the detector pixels.
count, max_count: multiprocessing.Value or `None`
Can be used to monitor the progress of the algorithm.
Initially, the value of `max_count.value` is incremented
by the total number of steps. At each step, the value
of `count.value` is incremented.
Returns
-------
outarr: ndarray of floats, shape (N+det_size,det_size)
Linogram of the input image. Where N+det_size determines the
lateral position of the sample.
See Also
--------
scipy.ndimage.interpolation.rotate :
The interpolator used to rotate the image.
"""
N = arr.shape[0]
if lD is None:
lD = N / 2
lS = abs(lS)
lDS = lS + lD
numsteps = int(np.ceil((N + det_size) / shift_size))
if max_count is not None:
with max_count.get_lock():
max_count.value += numsteps + 2
# First, create a zero-padded version of the input image such that
# its center is the source - this is necessary because we can
# only rotate through the center of the image.
# arrc = np.pad(arr, ((0,0),(N+2*lS-1,0)), mode="constant")
if lS <= N / 2:
pad = 2 * lS
arrc = np.pad(arr, ((0, 0), (pad, 0)), mode="constant")
# The center is where either b/w 2 pixels or at one pixel,
# as it is for the input image.
else:
pad = N / 2 + lS
if pad % 1 == 0: # even
pad = int(pad)
# We padd to even number of pixels.
# The center of the image is between two pixels (horizontally).
arrc = np.pad(arr, ((0, 0), (pad, 0)), mode="constant")
else: # odd
# We padd to odd number of pixels.
# The center of the image is a pixel.
pad = int(np.floor(pad))
arrc = np.pad(arr, ((0, 0), (pad, 0)), mode="constant")
# Lateral and axial coordinates of detector pixels
axi = np.ones(det_size) * lDS
# minus .5 because we use the center of the pixel
if det_size % 2 == 0: # even
even = True
else: # odd
even = False
if count is not None:
with count.get_lock():
count.value += 1
lat = get_det_coords(det_size, det_spacing)
angles =
|
np.arctan2(lat, axi)
|
numpy.arctan2
|
# -*- coding: utf-8 -*-
"""
Tests for Results.predict
"""
import numpy as np
import pandas as pd
from numpy.testing import assert_allclose, assert_equal
import pandas.util.testing as pdt
from statsmodels.regression.linear_model import OLS
from statsmodels.genmod.generalized_linear_model import GLM
class CheckPredictReturns(object):
def test_2d(self):
res = self.res
data = self.data
fitted = res.fittedvalues.iloc[1:10:2]
pred = res.predict(data.iloc[1:10:2])
pdt.assert_index_equal(pred.index, fitted.index)
assert_allclose(pred.values, fitted.values, rtol=1e-13)
# plain dict
xd = dict(zip(data.columns, data.iloc[1:10:2].values.T))
pred = res.predict(xd)
assert_equal(pred.index, np.arange(len(pred)))
assert_allclose(pred.values, fitted.values, rtol=1e-13)
def test_1d(self):
# one observation
res = self.res
data = self.data
pred = res.predict(data.iloc[:1])
pdt.assert_index_equal(pred.index, data.iloc[:1].index)
assert_allclose(pred.values, res.fittedvalues[0], rtol=1e-13)
fittedm = res.fittedvalues.mean()
xmean = data.mean()
pred = res.predict(xmean.to_frame().T)
assert_equal(pred.index, np.arange(1))
assert_allclose(pred, fittedm, rtol=1e-13)
# Series
pred = res.predict(data.mean())
assert_equal(pred.index, np.arange(1))
assert_allclose(pred.values, fittedm, rtol=1e-13)
# dict with scalar value (is plain dict)
# Note: this warns about dropped nan, even though there are None -FIXED
pred = res.predict(data.mean().to_dict())
assert_equal(pred.index, np.arange(1))
assert_allclose(pred.values, fittedm, rtol=1e-13)
def test_nopatsy(self):
res = self.res
data = self.data
fitted = res.fittedvalues.iloc[1:10:2]
# plain numpy array
pred = res.predict(res.model.exog[1:10:2], transform=False)
assert_allclose(pred, fitted.values, rtol=1e-13)
# pandas DataFrame
x = pd.DataFrame(res.model.exog[1:10:2],
index = data.index[1:10:2],
columns=res.model.exog_names)
pred = res.predict(x)
pdt.assert_index_equal(pred.index, fitted.index)
assert_allclose(pred.values, fitted.values, rtol=1e-13)
# one observation - 1-D
pred = res.predict(res.model.exog[1], transform=False)
assert_allclose(pred, fitted.values[0], rtol=1e-13)
# one observation - pd.Series
pred = res.predict(x.iloc[0])
pdt.assert_index_equal(pred.index, fitted.index[:1])
assert_allclose(pred.values[0], fitted.values[0], rtol=1e-13)
class TestPredictOLS(CheckPredictReturns):
@classmethod
def setup_class(cls):
nobs = 30
np.random.seed(987128)
x = np.random.randn(nobs, 3)
y = x.sum(1) + np.random.randn(nobs)
index = ['obs%02d' % i for i in range(nobs)]
# add one extra column to check that it doesn't matter
cls.data = pd.DataFrame(np.round(np.column_stack((y, x)), 4),
columns='y var1 var2 var3'.split(),
index=index)
cls.res = OLS.from_formula('y ~ var1 + var2', data=cls.data).fit()
class TestPredictGLM(CheckPredictReturns):
@classmethod
def setup_class(cls):
nobs = 30
np.random.seed(987128)
x = np.random.randn(nobs, 3)
y = x.sum(1) + np.random.randn(nobs)
index = ['obs%02d' % i for i in range(nobs)]
# add one extra column to check that it doesn't matter
cls.data = pd.DataFrame(np.round(np.column_stack((y, x)), 4),
columns='y var1 var2 var3'.split(),
index=index)
cls.res = GLM.from_formula('y ~ var1 + var2', data=cls.data).fit()
def test_predict_offset(self):
res = self.res
data = self.data
fitted = res.fittedvalues.iloc[1:10:2]
offset = np.arange(len(fitted))
fitted = fitted + offset
pred = res.predict(data.iloc[1:10:2], offset=offset)
pdt.assert_index_equal(pred.index, fitted.index)
assert_allclose(pred.values, fitted.values, rtol=1e-13)
# plain dict
xd = dict(zip(data.columns, data.iloc[1:10:2].values.T))
pred = res.predict(xd, offset=offset)
assert_equal(pred.index, np.arange(len(pred)))
assert_allclose(pred.values, fitted.values, rtol=1e-13)
# offset as pandas.Series
data2 = data.iloc[1:10:2].copy()
data2['offset'] = offset
pred = res.predict(data2, offset=data2['offset'])
pdt.assert_index_equal(pred.index, fitted.index)
assert_allclose(pred.values, fitted.values, rtol=1e-13)
# check nan in exog is ok, preserves index matching offset length
data2 = data.iloc[1:10:2].copy()
data2['offset'] = offset
data2.iloc[0, 1] = np.nan
pred = res.predict(data2, offset=data2['offset'])
pdt.assert_index_equal(pred.index, fitted.index)
fitted_nan = fitted.copy()
fitted_nan[0] = np.nan
|
assert_allclose(pred.values, fitted_nan.values, rtol=1e-13)
|
numpy.testing.assert_allclose
|
###############################################################################
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
###############################################################################
import sys
import numbers
import numpy as np
from onnx import numpy_helper, mapping, onnx_pb
from .common.onnx_ops import apply_identity, apply_reshape, OnnxOperatorBuilder
from .funcbook import converter_func, set_converters
from .proto import onnx_proto
from .proto.tfcompat import tensorflow
class TYPES:
# tf-node types:
Identity = 'Identity'
Const = 'Const'
Any = 'Any'
All = 'All'
Cast = 'Cast'
ConcatV2 = 'ConcatV2'
ResizeBilinear = 'ResizeBilinear'
ResizeNearestNeighbor = 'ResizeNearestNeighbor'
Round = 'Round'
Shape = 'Shape'
StridedSlice = 'StridedSlice'
TopKV2 = 'TopKV2'
# converter internal types:
TD_Reshape = '_reshape_timedistributed'
def _cal_tensor_value(tensor): # type: (tensorflow.Tensor)->np.ndarray
node = tensor.op
if node.type not in ["Const", "ConstV2"]:
return None
make_ndarray = tensorflow.make_ndarray
np_arr = make_ndarray(node.get_attr("value"))
return np_arr
def _cal_tensor_shape(tensor):
if len(tensor.shape) > 0 and hasattr(tensor.shape[0], 'value'):
return [x.value for x in tensor.shape]
else:
return list(tensor.shape)
def _to_onnx_type(dt_type):
# TensorFlow data types intergrate seamlessly with numpy
return mapping.NP_TYPE_TO_TENSOR_TYPE[
|
np.dtype(dt_type.as_numpy_dtype)
|
numpy.dtype
|
from numpy.core import shape_base
import latte
import numpy as np
import pytest
from latte.functional.disentanglement import sap
class TestSAP:
def test_continuous_below_thresh(self):
z = np.zeros(shape=(16, 8))
z[:, 0] = np.arange(16, dtype=float)
a = np.arange(16, dtype=float)[:, None]
sap_score = sap.sap(z, a)
np.testing.assert_array_almost_equal(sap_score, [1.0])
def test_continuous_above_thresh(self):
z = np.zeros(shape=(16, 2))
z[:, 0] = np.arange(16, dtype=float)
z[:, 1] = [
0.0,
1.0,
0.0,
1.0,
0.0,
1.0,
0.0,
1.0,
0.0,
1.0,
0.0,
1.0,
0.0,
1.0,
0.0,
1.0,
]
a = np.arange(16, dtype=float)[:, None]
sap_score = sap.sap(z, a)
np.testing.assert_array_almost_equal(sap_score, [0.988235294])
def test_continuous_above_thresh_regdim_miss(self):
z = np.zeros(shape=(16, 2))
z[:, 0] = np.arange(16, dtype=float)
z[:, 1] = [
0.0,
1.0,
0.0,
1.0,
0.0,
1.0,
0.0,
1.0,
0.0,
1.0,
0.0,
1.0,
0.0,
1.0,
0.0,
1.0,
]
a = np.arange(16, dtype=float)[:, None]
sap_score = sap.sap(z, a, reg_dim=[1])
np.testing.assert_array_almost_equal(sap_score, [-0.988235294])
def test_continuous_above_thresh_regdim_match(self):
z = np.zeros(shape=(16, 2))
z[:, 0] = np.arange(16, dtype=float)
z[:, 1] = [
0.0,
1.0,
0.0,
1.0,
0.0,
1.0,
0.0,
1.0,
0.0,
1.0,
0.0,
1.0,
0.0,
1.0,
0.0,
1.0,
]
a = np.arange(16, dtype=float)[:, None]
sap_score = sap.sap(z, a, reg_dim=[0])
np.testing.assert_array_almost_equal(sap_score, [0.988235294])
def test_discrete(self):
z = np.zeros(shape=(16, 2))
z[:, 0] = np.linspace(0, 1, 16, endpoint=True)
z[:, 1] =
|
np.zeros(shape=(16,))
|
numpy.zeros
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2017 <NAME> <<EMAIL>>
#
# Distributed under terms of the MIT license.
"""Compute dynamic properties."""
import logging
from typing import Dict, Optional, Union
import numpy as np
import rowan
from ..frame import Frame
from ..molecules import Molecule
from ..util import create_freud_box, zero_quaternion
from ._util import TrackedMotion, molecule2particles
np.seterr(divide="raise", invalid="raise", over="raise")
logger = logging.getLogger(__name__)
YamlValue = Union[str, float, int]
class Dynamics:
"""Compute dynamic properties of a simulation.
Args:
timestep: The timestep on which the configuration was taken.
box: The lengths of each side of the simulation cell including
any tilt factors.
position: The positions of the molecules
with shape ``(nmols, 3)``. Even if the simulation is only 2D,
all 3 dimensions of the position need to be passed.
orientation: The orientations of all the
molecules as a quaternion in the form ``(w, x, y, z)``. If no
orientation is supplied then no rotational quantities are
calculated.
molecule: The molecule for which to compute the dynamics quantities.
This is used to compute the structural relaxation for all particles.
wave_number: The wave number of the maximum peak in the Fourier
transform of the radial distribution function. If None this is
calculated from the initial configuration.
"""
_all_quantities = [
"time",
"mean_displacement",
"msd",
"mfd",
"alpha",
"scattering_function",
"com_struct",
"mean_rotation",
"msr",
"rot1",
"rot2",
"alpha_rot",
"gamma",
"overlap",
"struct",
]
def __init__(
self,
timestep: int,
box: np.ndarray,
position: np.ndarray,
orientation: Optional[np.ndarray] = None,
molecule: Optional[Molecule] = None,
wave_number: Optional[float] = None,
angular_resolution=360,
) -> None:
"""Initialise Dynamics.
Args:
timestep: The timestep at which the reference frame was created.
box: The lengths of the simulation cell
position: The initial position of each particle.
orientation: The initial orientation of each particle.
molecule: The molecule which is represented by each position and orientation.
wave_number: The wave number corresponding to the maximum in the
static structure factor.
angular_resolution: The angular resolution of the intermediate scattering function.
"""
if position.shape[0] == 0:
raise RuntimeError("Position must contain values, has length of 0.")
if molecule is None:
is2d = True
self.mol_vector = None
else:
is2d = molecule.dimensions == 2
self.mol_vector = molecule.positions
self.motion = TrackedMotion(
create_freud_box(box, is_2D=is2d), position, orientation
)
self.timestep = timestep
self.num_particles = position.shape[0]
self.wave_number = wave_number
if self.wave_number is not None:
angles = np.linspace(
0, 2 * np.pi, num=angular_resolution, endpoint=False
).reshape((-1, 1))
self.wave_vector = (
np.concatenate([np.cos(angles), np.sin(angles)], axis=1) * wave_number
)
@classmethod
def from_frame(
cls,
frame: Frame,
molecule: Optional[Molecule] = None,
wave_number: Optional[float] = None,
) -> "Dynamics":
"""Initialise the Dynamics class from a Frame object.
There is significant overlap between the frame class and the dynamics class,
so this is a convenience method to make the initialisation simpler.
"""
return cls(
frame.timestep,
frame.box,
frame.position,
frame.orientation,
molecule=molecule,
wave_number=wave_number,
)
@property
def delta_translation(self):
return self.motion.delta_translation
@property
def delta_rotation(self):
return self.motion.delta_rotation
def add(self, position: np.ndarray, orientation: Optional[np.ndarray] = None):
"""Update the state of the dynamics calculations by adding a Frame.
This updates the motion of the particles, comparing the positions and
orientations of the current frame with the previous frame, adding the difference
to the total displacement. This approach allows for tracking particles over
periodic boundaries, or through larger rotations assuming that there are
sufficient frames to capture the information. Each single displacement obeys the
minimum image convention, so for large time intervals it is still possible to
have missing information.
Args:
position: The updated position of each particle
orientation: The updated orientation of each particle, represented as a quaternion
"""
self.motion.add(position, orientation)
def add_frame(self, frame: Frame):
"""Update the state of the dynamics calculations by adding a Frame.
This updates the motion of the particles, comparing the positions and
orientations of the current frame with the previous frame, adding the difference
to the total displacement. This approach allows for tracking particles over
periodic boundaries, or through larger rotations assuming that there are
sufficient frames to capture the information. Each single displacement obeys the
minimum image convention, so for large time intervals it is still possible to
have missing information.
Args:
frame: The configuration containing the current particle information.
"""
self.motion.add(frame.position, frame.orientation)
def compute_displacement(self) -> np.ndarray:
"""Compute the translational displacement for each particle."""
return np.linalg.norm(self.delta_translation, axis=1)
def compute_displacement2(self) -> np.ndarray:
"""Compute the squared displacement for each particle."""
return np.square(self.delta_translation).sum(axis=1)
def compute_msd(self) -> float:
"""Compute the mean squared displacement."""
return self.compute_displacement2().mean()
def compute_mfd(self) -> float:
"""Compute the fourth power of displacement."""
return np.square(self.compute_displacement2()).mean()
def compute_alpha(self) -> float:
r"""Compute the non-Gaussian parameter alpha for translational motion in 2D.
.. math::
\alpha = \frac{\langle \Delta r^4\rangle}
{2\langle \Delta r^2 \rangle^2} -1
"""
disp2 = self.compute_displacement2()
try:
return np.square(disp2).mean() / (2 * np.square((disp2).mean())) - 1
except FloatingPointError:
with np.errstate(invalid="ignore"):
res = np.square(disp2).mean() / (2 * np.square((disp2).mean())) - 1
np.nan_to_num(res, copy=False)
return res
def compute_time_delta(self, timestep: int) -> int:
"""Time difference between initial frame and timestep."""
return timestep - self.timestep
def compute_rotation(self) -> np.ndarray:
"""Compute the rotational motion for each particle."""
return np.linalg.norm(self.delta_rotation, axis=1)
def compute_rotation2(self) -> np.ndarray:
"""Compute the rotation from the initial frame."""
return np.square(self.delta_rotation).sum(axis=1)
def compute_mean_rotation(self) -> float:
"""Compute the rotation from the initial frame."""
return self.compute_rotation().mean()
def compute_msr(self) -> float:
"""Compute the mean squared rotation from the initial frame."""
return self.compute_rotation2().mean()
def compute_isf(self) -> float:
"""Compute the intermediate scattering function."""
return np.cos(np.dot(self.wave_vector, self.delta_translation[:, :2].T)).mean()
def compute_rotational_relax1(self) -> float:
r"""Compute the first-order rotational relaxation function.
.. math::
C_1(t) = \langle \hat{\mathbf{e}}(0) \cdot \hat{\mathbf{e}}(t) \rangle
Return:
float: The rotational relaxation
"""
return np.cos(self.compute_rotation()).mean()
def compute_rotational_relax2(self) -> float:
r"""Compute the second rotational relaxation function.
.. math::
C_1(t) = \langle 2(\hat{\mathbf{e}}(0) \cdot \hat{\mathbf{e}}(t))^2 - 1 \rangle
Return:
float: The rotational relaxation
"""
return np.mean(2 * np.square(np.cos(self.compute_rotation())) - 1)
def compute_alpha_rot(self) -> float:
r"""Compute the non-Gaussian parameter alpha for rotational motion in 2D.
Rotational motion in 2D, is a single dimension of rotational motion, hence the
use of a different divisor than translational motion.
.. math::
\alpha = \frac{\langle \Delta \theta^4\rangle}
{3\langle \Delta \theta^2 \rangle^2} -1
"""
theta2 = self.compute_rotation2()
try:
return np.square(theta2).mean() / (3 * np.square((theta2).mean())) - 1
except FloatingPointError:
with np.errstate(invalid="ignore"):
res =
|
np.square(theta2)
|
numpy.square
|
import numpy as np
training_data = [
{'x': np.array([0.5819, 0.3683, 0.4413]), 'y': 0.3675},
{'x': np.array([0.5257, 0.8034, 0.3369]), 'y': 0.4502},
{'x': np.array([0.5464, 0.5087, 0.2151]), 'y': 0.323},
{'x': np.array([0.0368, 0.4483, 0.1082]), 'y': 0.0932},
{'x': np.array([0.3923, 0.8577, 0.844 ]), 'y': 0.554},
{'x': np.array([0.2598, 0.8384, 0.4308]), 'y': 0.3847},
{'x': np.array([0.2948, 0.7698, 0.8535]), 'y': 0.4811},
{'x': np.array([0.3932, 0.6851, 0.5644]), 'y': 0.4076},
{'x': np.array([0.3504, 0.1667, 0.0203]), 'y': 0.1258},
{'x': np.array([0.5699, 0.083 , 0.2821]), 'y': 0.2843},
{'x': np.array([0.5214, 0.3075, 0.7003]), 'y': 0.3978},
{'x': np.array([0.5106, 0.3842, 0.2181]), 'y': 0.2775},
{'x': np.array([0.9681, 0.3554, 0.6043]), 'y': 0.6357},
{'x': np.array([0.0159, 0.516 , 0.7315]), 'y': 0.2727},
{'x': np.array([0.202 , 0.4277, 0.1312]), 'y': 0.1447},
{'x': np.array([0.4517, 0.6635, 0.0862]), 'y': 0.2951},
{'x': np.array([0.5695, 0.4303, 0.7988]), 'y': 0.471},
{'x': np.array([0.1438, 0.4769, 0.561 ]), 'y': 0.2537},
{'x': np.array([0.8781, 0.005 , 0.5077]), 'y': 0.5148},
{'x': np.array([0.2462, 0.9468, 0.6733]), 'y': 0.4972},
{'x': np.array([0.5648, 0.7944, 0.5051]), 'y': 0.5097},
{'x': np.array([0.0151, 0.5057, 0.9673]), 'y': 0.333},
{'x': np.array([0.9351, 0.3965, 0.8533]), 'y': 0.6879},
{'x': np.array([0.1393, 0.442 , 0.567 ]), 'y': 0.2452},
{'x': np.array([0.5434, 0.1117, 0.0418]), 'y': 0.2087},
{'x': np.array([0.5808, 0.4009, 0.4434]), 'y': 0.3743},
{'x': np.array([0.0573, 0.6268, 0.9303]), 'y': 0.3717},
{'x': np.array([0.0879, 0.2472, 0.5177]), 'y': 0.1804},
{'x': np.array([0.8892, 0.4163, 0.7102]), 'y': 0.6231},
{'x': np.array([0.9976, 0.0016, 0.8722]), 'y': 0.695},
{'x': np.array([0.075 , 0.5708, 0.9446]), 'y': 0.3626},
{'x': np.array([0.9911, 0.4114, 0.1397]), 'y': 0.5387},
{'x': np.array([0.5167, 0.6452, 0.3098]), 'y': 0.3772},
{'x': np.array([0.5823, 0.1991, 0.9728]), 'y': 0.4848},
{'x': np.array([0.3766, 0.5462, 0.0057]), 'y': 0.2047},
{'x': np.array([0.2625, 0.2555, 0.7608]), 'y': 0.3029},
{'x': np.array([0.5443, 0.6129, 0.6856]), 'y': 0.4799},
{'x': np.array([0.1287, 0.5737, 0.4872]), 'y': 0.2565},
{'x': np.array([0.5684, 0.9465, 0.4019]), 'y': 0.5549},
{'x': np.array([0.2328, 0.7245, 0.1978]), 'y': 0.2648},
{'x': np.array([0.914 , 0.9175, 0.4937]), 'y': 0.7611},
{'x': np.array([0.2271, 0.4644, 0.8575]), 'y': 0.3572},
{'x': np.array([0.0864, 0.0804, 0.3205]), 'y': 0.1122},
{'x': np.array([0.1798, 0.0815, 0.8586]), 'y': 0.2857},
{'x': np.array([0.5004, 0.8261, 0.6381]), 'y': 0.5298},
{'x': np.array([0.7573, 0.8823, 0.1121]), 'y': 0.5441},
{'x': np.array([0.8791, 0.2571, 0.9163]), 'y': 0.6431},
{'x': np.array([0.0732, 0.4323, 0.7305]), 'y': 0.2672},
{'x': np.array([0.0034, 0.1835, 0.2874]), 'y': 0.0873},
{'x': np.array([0.7829, 0.2822, 0.6289]), 'y': 0.51},
{'x': np.array([0.9289, 0.8584, 0.9859]), 'y': 0.8752},
{'x': np.array([0.2879, 0.3621, 0.4345]), 'y': 0.2419},
{'x': np.array([0.859 , 0.468 , 0.3035]), 'y': 0.5065},
{'x': np.array([0.3386, 0.3234, 0.891 ]), 'y': 0.3761},
{'x': np.array([0.4942, 0.9679, 0.5773]), 'y': 0.5791},
{'x': np.array([0.3298, 0.2964, 0.3284]), 'y': 0.217},
{'x': np.array([0.9949, 0.9716, 0.5551]), 'y': 0.8616},
{'x': np.array([0.6914, 0.9031, 0.5679]), 'y': 0.6401},
{'x': np.array([0.5477, 0.9153, 0.9674]), 'y': 0.6816},
{'x': np.array([0.7822, 0.5467, 0.7356]), 'y': 0.5972},
{'x': np.array([0.0617, 0.8124, 0.2402]), 'y': 0.2593},
{'x': np.array([0.0033, 0.6644, 0.7327]), 'y': 0.3167},
{'x': np.array([0.1184, 0.7044, 0.3024]), 'y': 0.2486},
{'x': np.array([0.012 , 0.2953, 0.3404]), 'y': 0.1182},
{'x': np.array([0.5689, 0.5353, 0.2987]), 'y': 0.3635},
{'x': np.array([0.5426, 0.2256, 0.7156]), 'y': 0.3999},
{'x': np.array([0.9523, 0.5509, 0.7318]), 'y': 0.7065},
{'x': np.array([0.5466, 0.3827, 0.5994]), 'y': 0.3962},
{'x': np.array([0.8549, 0.3651, 0.0881]), 'y': 0.4229},
{'x': np.array([0.6769, 0.1411, 0.9955]), 'y': 0.5334},
{'x': np.array([0.3948, 0.8252, 0.464 ]), 'y': 0.4381},
{'x': np.array([0.6073, 0.6924, 0.4008]), 'y': 0.4614},
{'x': np.array([0.136 , 0.4414, 0.7052]), 'y': 0.2813},
{'x': np.array([0.9094, 0.7285, 0.5405]), 'y': 0.6869},
{'x': np.array([0.8051, 0.8431, 0.7533]), 'y': 0.7264},
{'x': np.array([0.4946, 0.2798, 0.3238]), 'y': 0.2802},
{'x': np.array([0.7026, 0.448 , 0.1602]), 'y': 0.3711},
{'x': np.array([0.5262, 0.2316, 0.5341]), 'y': 0.3443},
{'x': np.array([0.6447, 0.0107, 0.423 ]), 'y': 0.3573},
{'x': np.array([0.9458, 0.7168, 0.0561]), 'y': 0.5768},
{'x': np.array([0.2762, 0.0242, 0.6527]), 'y': 0.2612},
{'x': np.array([0.8588, 0.579 , 0.9023]), 'y': 0.6987},
{'x': np.array([0.4085, 0.9815, 0.6949]), 'y': 0.5817},
{'x': np.array([0.274 , 0.4376, 0.6946]), 'y': 0.3231},
{'x': np.array([0.3026, 0.4449, 0.5889]), 'y': 0.3067},
{'x': np.array([0.3634, 0.4367, 0.3362]), 'y': 0.2596},
{'x': np.array([0.8218, 0.4028, 0.0488]), 'y': 0.3996},
{'x': np.array([0.6126, 0.9342, 0.0632]), 'y': 0.479},
{'x': np.array([0.4159, 0.4047, 0.5318]), 'y': 0.3258},
{'x': np.array([0.2581, 0.3702, 0.865 ]), 'y': 0.3487},
{'x': np.array([0.9556, 0.4297, 0.3273]), 'y': 0.5681},
{'x': np.array([0.6467, 0.3666, 0.432 ]), 'y': 0.3969},
{'x': np.array([0.1882, 0.1628, 0.5189]), 'y': 0.2024},
{'x': np.array([0.2447, 0.6555, 0.0429]), 'y': 0.2017},
{'x': np.array([0.9605, 0.5576, 0.7954]), 'y': 0.7313},
{'x': np.array([0.2844, 0.131 , 0.8107]), 'y': 0.3111},
{'x': np.array([0.6513, 0.8081, 0.455 ]), 'y': 0.5449},
{'x': np.array([0.3039, 0.0487, 0.2701]), 'y': 0.1688},
{'x': np.array([0.7149, 0.6344, 0.4245]), 'y': 0.5032},
{'x': np.array([0.1934, 0.9614, 0.6691]), 'y': 0.4859},
{'x': np.array([0.9782, 0.9374, 0.7458]), 'y': 0.8832},
{'x': np.array([0.6589, 0.3404, 0.8188]), 'y': 0.5022},
{'x': np.array([0.9996, 0.0406, 0.3657]), 'y': 0.5606},
{'x': np.array([0.5866, 0.7489, 0.223 ]), 'y': 0.4254},
{'x': np.array([0.8548, 0.0654, 0.236 ]), 'y': 0.4279},
{'x': np.array([0.0599, 0.2245, 0.3526]), 'y': 0.125},
{'x': np.array([0.5582, 0.8149, 0.572 ]), 'y': 0.5334},
{'x': np.array([0.5848, 0.7718, 0.4005]), 'y': 0.4816},
{'x': np.array([0.7732, 0.555 , 0.8219]), 'y': 0.6177},
{'x': np.array([0.7937, 0.3883, 0.6229]), 'y': 0.5339},
{'x': np.array([0.8657, 0.6768, 0.9154]), 'y': 0.7396},
{'x': np.array([0.9966, 0.4677, 0.9739]), 'y': 0.7804},
{'x': np.array([0.7549, 0.5367, 0.7743]), 'y': 0.5889},
{'x': np.array([0.9174, 0.4121, 0.7512]), 'y': 0.6519},
{'x': np.array([0.2975, 0.727 , 0.1467]), 'y': 0.2748},
{'x': np.array([0.5051, 0.7208, 0.6813]), 'y': 0.4997},
{'x': np.array([0.9716, 0.7796, 0.033 ]), 'y': 0.614},
{'x': np.array([0.2522, 0.1755, 0.3215]), 'y': 0.1719},
{'x': np.array([0.6301, 0.8266, 0.5715]), 'y': 0.5735},
{'x': np.array([0.9852, 0.7718, 0.6734]), 'y': 0.7927},
{'x': np.array([0.5307, 0.7507, 0.7803]), 'y': 0.5497},
{'x': np.array([0.4445, 0.5047, 0.9589]), 'y': 0.4769},
{'x': np.array([0.2088, 0.9341, 0.5953]), 'y': 0.4572},
{'x': np.array([0.2311, 0.458 , 0.6427]), 'y': 0.2992},
{'x': np.array([0.2855, 0.9732, 0.7678]), 'y': 0.5501},
{'x': np.array([0.4491, 0.7721, 0.3696]), 'y': 0.4122},
{'x': np.array([0.1781, 0.1101, 0.5302]), 'y': 0.1983},
{'x': np.array([0.3195, 0.9199, 0.6077]), 'y': 0.4923},
{'x': np.array([0.9729, 0.4863, 0.5731]), 'y': 0.6603},
{'x': np.array([0.0357, 0.431 , 0.686 ]), 'y': 0.2442},
{'x': np.array([0.5236, 0.9087, 0.1052]), 'y': 0.4354},
{'x': np.array([0.5903, 0.0541, 0.0645]), 'y': 0.2345},
{'x': np.array([0.5923, 0.95 , 0.1293]), 'y': 0.4949},
{'x': np.array([0.1322, 0.7993, 0.0558]), 'y': 0.2249},
{'x':
|
np.array([0.5932, 0.4393, 0.8538])
|
numpy.array
|
import cv2
import numpy as np
import matplotlib.pyplot as plt
import time
import matplotlib
base_path = os.path.dirname(os.path.abspath(__file__))
img_path = os.path.join(
base_path, "empty.jpg"
)
video_path = os.path.join(base_path, "trafficvideo.mp4")
input_img=cv2.imread("img_path")
cap= cv2.VideoCapture('video_path')
frame_width = int(cap.get(3))
frame_height = int(cap.get(4))
img_coord=np.float32([[976,223],[1264,220],[453,863],[1473,865]])
new_coor=
|
np.float32([[472,52],[800,52],[472,830],[800,830]])
|
numpy.float32
|
import os,sys
import numpy as np
import random
import multiprocessing
import concurrent.futures
from concurrent.futures import ProcessPoolExecutor
NCPU = 4
os.environ["CUDA_VISIBLE_DEVICES"] = "3"
##################### input parameters ###############################
FILEDIR = sys.argv[1]
INDIR = sys.argv[2] # native npz dir
nb_epochs = int(sys.argv[3])
n2d_layers = int(sys.argv[4])
n2d_filters = int(sys.argv[5])
method = sys.argv[6]
# FILEDIR = "/dl/suhong/project/TemptrRosetta/DB/DB13989"
# INDIR = "/dl/yangjy/project/distance/npz" # native npz dir
# nb_epochs = 1
# n2d_layers = 11
# n2d_filters = 8
test_file = "%s/test_lst"%(FILEDIR)
all_file = "%s/list"%(FILEDIR)
TEMPDIR = "%s/temp_npz1"%(FILEDIR) # template npz dir
# test set
with open(test_file) as f:
test_ids = f.read().splitlines()
# all set: containing train and test list
with open(all_file) as f:
IDs = f.read().splitlines()
maxseq = 20000
minseq = 1
dmax = 20.0
dmin = 2.0
nbins = 36
kmin = 6
bins = np.linspace(dmin, dmax, nbins+1)
bins180 = np.linspace(0.0, np.pi, 13)
bins360 = np.linspace(-np.pi, np.pi, 25)
def npz_loader(ID, DIR = INDIR, DIR2 = TEMPDIR):
name = DIR + '/' + ID + '.npz'
name_temp = DIR2 + '/' + ID + '_T01' + '.npz'
npz = np.load(name)
npz_temp = np.load(name_temp) # temp npz
# print(name_temp) #test
#### native npz objective ####
# dist
dist = npz['dist']
dist[dist<0.001] = 999.9
# bin distance matrix
dbin = np.digitize(dist, bins).astype(np.uint8)
dbin[dbin > nbins] = 0
# bin omega
obin = np.digitize(npz['omega'], bins360).astype(np.uint8)
obin[dbin == 0] = 0
# bin theta
tbin = np.digitize(npz['theta_asym'], bins360).astype(np.uint8)
tbin[dbin == 0] = 0
# bin phi
pbin = np.digitize(npz['phi_asym'], bins180).astype(np.uint8)
pbin[dbin == 0] = 0
#### template npz objective ####
# dist (Cb-Cb)
d = npz_temp['dist']
d[d<0.001] = 999.9
tdbin = np.digitize(d, bins).astype(np.uint8)
tdbin[tdbin > nbins] = 0
# omega
om = npz_temp['omega']
tobin = np.digitize(om, bins360).astype(np.uint8)
tobin[tdbin == 0] = 0
# theta_asym
th_asym = npz_temp['theta_asym']
ttbin = np.digitize(th_asym, bins360).astype(np.uint8)
ttbin[tdbin == 0] = 0
# phi_asym
ph_asym = npz_temp['phi_asym']
tpbin = np.digitize(ph_asym, bins180).astype(np.uint8)
tpbin[tdbin == 0] = 0
# template weight
weight = npz_temp['weight']
return {'mask1d' : npz['mask1d'],
'msa' : npz['msa'][:maxseq,:],
'dbin' : dbin,
'obin' : obin,
'tbin' : tbin,
'pbin' : pbin,
'dist' : dist,
'bbpairs' : npz['bbpairs'],
'd' : tdbin,
'om' : tobin,
'th_asym' : ttbin,
'ph_asym' : tpbin,
'weight' : weight,
'label' : ID}
# judge whether or not len(native.npz) == len(temp.npz)
remove_ids = []
for i in range(len(IDs)):
nat_npz = np.load(INDIR + '/' + IDs[i] + '.npz')
tem_npz = np.load(TEMPDIR + '/' + IDs[i] + '_T01' + '.npz')
nat_len = nat_npz['dist'].shape[0]
tem_len = tem_npz['dist'].shape[0]
if nat_len != tem_len:
remove_ids.append(IDs[i])
for i in range(len(remove_ids)):
print(remove_ids[i])
sub_test_ids = list(set(test_ids)-set(remove_ids))
sub_IDs = list(set(IDs)-set(remove_ids))
train_ids = list(set(sub_IDs) - set(sub_test_ids))
train = []
test = []
s = 0.0
# load data in parallel using NCPU threads
with ProcessPoolExecutor(max_workers=NCPU) as executor:
futures_test = [executor.submit(npz_loader, (ID)) for ID in sub_test_ids]
results_test = concurrent.futures.wait(futures_test)
for f in futures_test:
test.append(f.result())
for key,arr in test[-1].items():
if key != "label":
s += arr.nbytes
with ProcessPoolExecutor(max_workers=NCPU) as executor:
futures_train = [executor.submit(npz_loader, (ID)) for ID in train_ids]
results_train = concurrent.futures.wait(futures_train)
for f in futures_train:
train.append(f.result())
for key,arr in train[-1].items():
if key != "label":
s += arr.nbytes
print('# {:d} proteins, {:.2f}gb'.format(len(train)+len(test), s / 1024**3))
print('# Training set: ', len(train))
print('# Testing set: ', len(test))
# save model
# PREFIX = "Train" + str(len(train)) + "_Test" + str(len(test))
PREFIX = method
CHK = "/dl/suhong/project/TemptrRosetta/models/model." + PREFIX + '-layer%s'%(n2d_layers) + '-filter%s'%(n2d_filters) + '-epoch%s'%(nb_epochs)
sys.stdout.flush()
import tensorflow as tf
config = tf.compat.v1.ConfigProto(
gpu_options = tf.compat.v1.GPUOptions(per_process_gpu_memory_fraction=0.50)
)
lr = 0.0001
l2_coef = 0.0001
window2d = 3
maxlen = 260
wmin = 0.8
ns = 21
# 1-hot MSA to PSSM
def msa2pssm(msa1hot, w):
beff = tf.reduce_sum(w)
f_i = tf.reduce_sum(w[:,None,None]*msa1hot, axis=0) / beff + 1e-9
h_i = tf.reduce_sum( -f_i * tf.math.log(f_i), axis=1)
return tf.concat([f_i, h_i[:,None]], axis=1)
# randomly subsample the alignment
def subsample_msa(msa):
nr, nc = msa.shape
# do not subsample shallow MSAs
if nr < 10:
return msa
# number of sequences to subsample
n = int(10.0**np.random.uniform(np.log10(nr))-10.0)
if n <= 0:
return np.reshape(msa[0], [1,nc])
# n unique indices
sample = sorted(random.sample(range(1, nr-1), n))
# stack first sequence with n randomly selected ones
msa_new = np.vstack([msa[0][None,:], msa[1:][sample]])
return msa_new.astype(np.uint8)
# reweight MSA based on cutoff
def reweight(msa1hot, cutoff):
with tf.name_scope('reweight'):
id_min = tf.cast(tf.shape(msa1hot)[1], tf.float32) * cutoff
id_mtx = tf.tensordot(msa1hot, msa1hot, [[1,2], [1,2]])
id_mask = id_mtx > id_min
w = 1.0/tf.reduce_sum(tf.cast(id_mask, dtype=tf.float32),-1)
return w
def fast_dca(msa1hot, weights, penalty = 4.5):
nr = tf.shape(msa1hot)[0]
nc = tf.shape(msa1hot)[1]
ns = tf.shape(msa1hot)[2]
with tf.name_scope('covariance'):
x = tf.reshape(msa1hot, (nr, nc * ns))
num_points = tf.reduce_sum(weights) - tf.sqrt(tf.reduce_mean(weights))
mean = tf.reduce_sum(x * weights[:,None], axis=0, keepdims=True) / num_points
x = (x - mean) * tf.sqrt(weights[:,None])
cov = tf.matmul(tf.transpose(x), x)/num_points
with tf.name_scope('inv_convariance'):
cov_reg = cov + tf.eye(nc * ns) * penalty / tf.sqrt(tf.reduce_sum(weights))
inv_cov = tf.linalg.inv(cov_reg)
# reduce entropy effect
#wd = tf.linalg.diag_part(w)
#w = w/tf.sqrt(wd[None,:]*wd[:,None])
x1 = tf.reshape(inv_cov,(nc, ns, nc, ns))
x2 = tf.transpose(x1, [0,2,1,3])
features = tf.reshape(x2, (nc, nc, ns * ns))
x3 = tf.sqrt(tf.reduce_sum(tf.square(x1[:,:-1,:,:-1]),(1,3))) * (1-tf.eye(nc))
apc = tf.reduce_sum(x3,0,keepdims=True) * tf.reduce_sum(x3,1,keepdims=True) / tf.reduce_sum(x3)
contacts = (x3 - apc) * (1-tf.eye(nc))
return tf.concat([features, contacts[:,:,None]], axis=2)
def cont_acc(pred, dist, frac = 1.0):
nc = dist.shape[0]
if nc < 24:
return 0.0
w = np.sum(pred[0,:,:,1:13], axis=-1)
idx = np.array([[i,j,w[i,j]] for i in range(nc) for j in range(i+24,nc)])
n = int(nc*frac)
top = idx[np.argsort(idx[:,2])][-n:, :2].astype(int)
ngood = np.sum([dist[r[0],r[1]]<8.0 for r in top])
return 1.0 * ngood / n
def get_fdict(item, flag):
# native npz objective
L = item['msa'].shape[1]
msa_ = subsample_msa(item['msa'])
theta_ = item['tbin']
omega_ = item['obin']
phi_ = item['pbin']
dist_ = item['dbin']
mask1d_ = item['mask1d']
bbpairs_ = item['bbpairs']
# template npz objective
d_ = item['d']
om_ = item['om']
th_ = item['th_asym']
ph_ = item['ph_asym']
w_ = item['weight']
start = 0
stop = L
nres = L
# slice vertically if sequence is too long
if L > maxlen:
nres = np.random.randint(150,maxlen)
start = np.random.randint(L-maxlen+1)
stop = start + nres
item['start'] = start
item['stop'] = stop
feed_dict = {
ncol: nres,
nrow: msa_.shape[0],
msa: msa_[:,start:stop],
theta: theta_[start:stop,start:stop],
omega: omega_[start:stop,start:stop],
phi: phi_[start:stop,start:stop],
dist: dist_[start:stop,start:stop],
mask1d: mask1d_[start:stop],
bbpairs: bbpairs_[start:stop,start:stop],
d: d_[start:stop,start:stop],
om: om_[start:stop,start:stop],
th: th_[start:stop,start:stop],
ph: ph_[start:stop,start:stop],
w_temp: w_,
is_train: flag}
return feed_dict
activation = tf.nn.elu
conv2d = tf.layers.conv2d
with tf.Graph().as_default():
with tf.name_scope('input'):
ncol = tf.compat.v1.placeholder(dtype=tf.int32, shape=())
nrow = tf.compat.v1.placeholder(dtype=tf.int32, shape=())
msa = tf.compat.v1.placeholder(dtype=tf.uint8, shape=(None,None))
mask1d = tf.compat.v1.placeholder(dtype=tf.float32, shape=(None))
bbpairs = tf.compat.v1.placeholder(dtype=tf.uint8, shape=(None,None))
dist = tf.compat.v1.placeholder(dtype=tf.uint8, shape=(None,None))
omega = tf.compat.v1.placeholder(dtype=tf.uint8, shape=(None,None))
theta = tf.compat.v1.placeholder(dtype=tf.uint8, shape=(None,None))
phi = tf.compat.v1.placeholder(dtype=tf.uint8, shape=(None,None))
d = tf.compat.v1.placeholder(dtype=tf.float32, shape=(None,None))
om = tf.compat.v1.placeholder(dtype=tf.float32, shape=(None,None))
th = tf.compat.v1.placeholder(dtype=tf.float32, shape=(None,None))
ph = tf.compat.v1.placeholder(dtype=tf.float32, shape=(None,None))
w_temp = tf.compat.v1.placeholder(dtype=tf.float32, shape=())
is_train = tf.compat.v1.placeholder(tf.bool, name='is_train')
######################## convert inputs to 1-hot ######################
msa1hot = tf.one_hot(msa, ns, dtype=tf.float32)
dist1hot = tf.one_hot(dist, nbins+1, dtype=tf.float32)
omega1hot = tf.one_hot(omega, 25, dtype=tf.float32)
theta1hot = tf.one_hot(theta, 25, dtype=tf.float32)
phi1hot = tf.one_hot(phi, 13, dtype=tf.float32)
bb1hot = tf.one_hot(bbpairs, 3, dtype=tf.float32)
########################### collect features ###########################
###### msa ########
# 1d
w = reweight(msa1hot, wmin)
neff = tf.reduce_sum(w)
f1d_seq = msa1hot[0,:,:20]
f1d_pssm = msa2pssm(msa1hot, w)
f1d = tf.concat(values=[f1d_seq, f1d_pssm], axis=1)
f1d = tf.expand_dims(f1d, axis=0)
f1d = tf.reshape(f1d, [1,ncol,42])
# 2d
f2d_dca = tf.cond(nrow>1, lambda: fast_dca(msa1hot, w), lambda: tf.zeros([ncol,ncol,442], tf.float32))
f2d_dca = tf.expand_dims(f2d_dca, axis=0)
f2d_query = tf.concat([tf.tile(f1d[:,:,None,:], [1,1,ncol,1]),
tf.tile(f1d[:,None,:,:], [1,ncol,1,1]),
f2d_dca], axis=-1)
###### template ########
# 2d
tem_d = tf.expand_dims(tf.cast(d[:,:,None],dtype=tf.float32),axis=0)
tem_om = tf.expand_dims(tf.cast(om[:,:,None],dtype=tf.float32),axis=0)
tem_th = tf.expand_dims(tf.cast(th[:,:,None],dtype=tf.float32),axis=0)
tem_ph = tf.expand_dims(tf.cast(ph[:,:,None],dtype=tf.float32),axis=0)
f2d_temp = tf.concat([tem_d,tem_om,tem_th,tem_ph],axis=-1) # (1,L,L,4) tf.uint8
############# connect query and one template###################
#f2d = tf.concat([f2d_query * 1.0, f2d_temp * w_temp],axis=-1)
#f2d = tf.reshape(f2d, [1,ncol,ncol,442+2*42+4])
############## connect 1d and 2d (trRosetta)###############
f2d = f2d_query
f2d = tf.reshape(f2d, [1,ncol,ncol,442+2*42])
############# only template #######################
# f2d = tf.reshape(f2d_temp, [1,ncol,ncol,4])
layers2d = [f2d]
layers2d.append(conv2d(layers2d[-1], n2d_filters, 1, padding='SAME'))
layers2d.append(tf.contrib.layers.instance_norm(layers2d[-1]))
layers2d.append(activation(layers2d[-1]))
# D.Jones resnet
dilation = 1
for _ in range(n2d_layers):
layers2d.append(conv2d(layers2d[-1], n2d_filters, window2d, padding='SAME', dilation_rate=dilation))
layers2d.append(tf.contrib.layers.instance_norm(layers2d[-1]))
layers2d.append(activation(layers2d[-1]))
layers2d.append(tf.keras.layers.Dropout(rate=0.15)(layers2d[-1], training=is_train))
layers2d.append(conv2d(layers2d[-1], n2d_filters, window2d, padding='SAME', dilation_rate=dilation))
layers2d.append(tf.contrib.layers.instance_norm(layers2d[-1]))
layers2d.append(activation(layers2d[-1] + layers2d[-7]))
dilation *= 2
if dilation > 16:
dilation = 1
# loss on theta
logits_theta = conv2d(layers2d[-1], 25, 1, padding='SAME')
out_theta = tf.nn.softmax_cross_entropy_with_logits_v2(labels=theta1hot, logits=logits_theta)
loss_theta = tf.reduce_mean(out_theta)
# loss on phi
logits_phi = conv2d(layers2d[-1], 13, 1, padding='SAME')
out_phi = tf.nn.softmax_cross_entropy_with_logits_v2(labels=phi1hot, logits=logits_phi)
loss_phi = tf.reduce_mean(out_phi)
# symmetrize
layers2d.append(0.5 * (layers2d[-1] + tf.transpose(layers2d[-1], perm=[0,2,1,3])))
# loss on distances
logits_dist = conv2d(layers2d[-1], nbins+1, 1, padding='SAME')
out_dist = tf.nn.softmax_cross_entropy_with_logits_v2(labels=dist1hot, logits=logits_dist)
loss_dist = tf.reduce_mean(out_dist)
prob_dist = tf.nn.softmax(logits_dist)
# loss on beta-strand pairings
logits_bb = conv2d(layers2d[-1], 3, 1, padding='SAME')
out_bb = tf.nn.softmax_cross_entropy_with_logits_v2(labels=bb1hot, logits=logits_bb)
loss_bb = tf.reduce_mean(out_bb)
# loss on omega
logits_omega = conv2d(layers2d[-1], 25, 1, padding='SAME')
out_omega = tf.nn.softmax_cross_entropy_with_logits_v2(labels=omega1hot, logits=logits_omega)
loss_omega = tf.reduce_mean(out_omega)
#
# L2 penalty
#
vars = tf.trainable_variables()
lossL2 = tf.add_n([tf.nn.l2_loss(v) for v in vars if v.name not
in ['bias', 'gamma', 'b', 'g', 'beta']]) * l2_coef
#
# optimizer
#
opt = tf.train.AdamOptimizer(learning_rate=lr) #optimization
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
# training op
with tf.control_dependencies(update_ops):
train_op = opt.minimize(loss_dist + loss_bb + loss_theta + loss_phi + loss_omega + lossL2)
total_parameters=np.sum([np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()])
print("tot. params: " + str(total_parameters))
init_op = tf.group(tf.global_variables_initializer(),
tf.local_variables_initializer())
saver = tf.train.Saver()
with tf.Session(config=config) as sess:
sess.run(init_op)
val_loss_best = 999.9
out_name = method + '_' + "Test" + str(len(test))
with open("/dl/suhong/project/TemptrRosetta/output/acc/%s.result"%(out_name), 'wt') as fid:
for epoch in range(nb_epochs):
# training
random.shuffle(train) #rearrange train list
train_losses = []
for item in train:
_, ldist, ltheta, lphi, lomega = sess.run([train_op, loss_dist, loss_theta, loss_phi, loss_omega],
feed_dict=get_fdict(item, 1))
train_losses.append([ldist,ltheta,lphi,lomega])
# testing
val_losses = []
vacc_dist = []
random.shuffle(test)
for item in test:
ldist, ltheta, lphi, lomega, pdist, neff_ = sess.run([loss_dist, loss_theta, loss_phi, loss_omega, prob_dist, neff],
feed_dict = get_fdict(item, 0))
item['pdist'] = pdist
item['neff'] = neff_
val_losses.append([ldist,ltheta,lphi,lomega])
start = item['start']
stop = item['stop']
vacc_dist.append(cont_acc(item['pdist'], item['dist'][start:stop,start:stop]))
# output each test result in a new file
acc = cont_acc(item['pdist'], item['dist'][start:stop,start:stop])
out_str = item['label'] + '\t' + str('{:.5f}'.format(acc))
fid.writelines(out_str+"\n")
fid.writelines('epoch=%s\tval_acc=%s'%(str(epoch),str('{:.5f}'.format(np.average(vacc_dist))))+"\n")
train_losses =
|
np.array(train_losses)
|
numpy.array
|
import librosa as lb
import numpy as np
class OQM:
def __init__(self, clean_utt = '', noise_utt = ''):
""" Generic Objective quality measure class for initializing variables and loading
the speech samples
Attributes:
c_utt (np array of float or path to clean audio) representing the info for clean utterance
n_utt (np array of float or path to clean audio) representing the info for noisy utterance
c_sr (integer) sample rate for clean utterance
n_sr (integer) sample rate for noisy utterance
"""
self.c_utt = clean_utt
self.n_utt = noise_utt
self.c_sr = 0
self.n_sr = 0
def load(self, clean_utt,noise_utt):
"""Function to load the clean and noisy utterance if the path is specified.
OR if numpy arrays are passed to the function then it just resamples it with
sample rate equal to 16000(its a requirement for the PESQ class to work properly)
Args:
file_name (string): name of a file to read from
Returns:
None
"""
if isinstance(clean_utt, str) and isinstance(noise_utt, str):
self.c_utt, self.c_sr = lb.load(clean_utt, sr=16000)
self.n_utt, self.n_sr = lb.load(noise_utt, sr=16000)
self.c_utt = np.array(self.c_utt).reshape(-1)
self.n_utt =
|
np.array(self.n_utt)
|
numpy.array
|
import numpy as np
import scipy.signal
import matplotlib
import matplotlib.pyplot as plt
plt.switch_backend('agg')
import sys
import pandas as pd
import os
import pickle
from multiprocessing import Pool
from itertools import repeat
import utils.EventDetection_utils as EventDetection_utils
import utils.general_utils as general_utils
import utils.plot_utils as plot_utils
import utils.plot_setting as plot_setting
import utils.list_twoP_exp as list_twoP_exp
import utils.list_behavior as list_behavior
import utils.math_utils as math_utils
experiments=list_twoP_exp.SS36112_CO2puff_BW_analysis
# event_max_dur=EventDetection_utils.event_max_dur
# event_min_dur=EventDetection_utils.event_min_dur
event_max_dur=2.5
event_min_dur=0.27
bsl_dur_s=EventDetection_utils.bsl_s
def count_ROIs_from_folder(exp_lists_current_fly):
for date, genotype, fly, recrd_num in exp_lists_current_fly:
Gal4=genotype.split('-')[0]
flyDir = NAS_AN_Proj_Dir +'03_general_2P_exp/'+ Gal4 +'/2P/'+ date+'/'+genotype+'-'+fly+'/'
dataDir = flyDir+genotype+'-'+fly+'-'+recrd_num + '/'
pathForDic = dataDir+'/output/'
outDirGCevt = pathForDic + 'GCevt_based_JposBehBallAnalysis/'
folder_content=os.listdir(outDirGCevt)
print('folder_content', folder_content)
ROI_folder_list=[]
for content in folder_content:
if content[:4]=='ROI#':
ROI_folder_list.append(content)
ROI_count=len(ROI_folder_list)
print('ROI_folder_list', ROI_folder_list, ROI_count, 'ROIs found!')
break
return ROI_count
def make_structure_as_modelStructure(ori_list, exempl_list):
if len(ori_list)!=len(exempl_list):
anticipate_list=[]
for i,v in enumerate(exempl_list):
anticipate_list.append([])
return anticipate_list
else:
return ori_list
def add_NaNtail_to_each_Evt(Evt_list):
Evt_len_list=[]
for evt in Evt_list:
# print('np.shape(evt', np.shape(evt))
Evt_len_list.append(np.shape(evt)[0])
max_EvtLen=max(Evt_len_list)
# print('max_EvtLen', max_EvtLen)
EvtNaNtail_list=[]
for evt in Evt_list:
evt_NaNtail=math_utils.add_NaN_tail(evt, max_EvtLen)
#print('len evt_NaNtail', len(evt_NaNtail))
EvtNaNtail_list.append(evt_NaNtail)
return EvtNaNtail_list
def mp_worker_for_CI_mean_trace(idx, dataset, confidence, least_realNum_amount, behFreq):
dataset=np.asarray(dataset)
# print('shape dataset', np.shape(dataset))
# print('idx', idx)
# print('shape dataset', np.shape(dataset))
# print('least_realNum_amount', least_realNum_amount)
# print('len dataset', len(dataset))
if behFreq==0:
behFreq=1
data=np.array(dataset[:, idx])
#print('shape data', np.shape(data))
non_nan_counts=data.size - np.count_nonzero(np.isnan(data))
if non_nan_counts > least_realNum_amount:
nonnan_data_list=data[np.logical_not(np.isnan(data))]
(mean, down_CI, up_CI) = math_utils.compute_CI_and_mean(nonnan_data_list, confidence=confidence)
return mean, down_CI, up_CI
else:
return np.nan, np.nan, np.nan
def compute_CI_and_mean_trace_w_BehfreqCorrec(dataset, confidence=0.95, least_realNum_amount=4, behFreq=1):
idx_range=[i for i in range(0,np.shape(dataset)[1])]
# print('colums_range', colums_range)
p=Pool()
mean_downCI_upCI = p.starmap(mp_worker_for_CI_mean_trace, zip(idx_range, repeat(dataset), repeat(confidence), repeat(least_realNum_amount), repeat(behFreq)))
#mean_trace, down_err_trace, up_err_trace = p.starmap(worker_for_CI_mean_trace, args_for_pool)
p.close()
p.join()
del p
#print('shape mean_downCI_upCI', np.shape(mean_downCI_upCI))
mean_trace=np.asarray(mean_downCI_upCI)[:,0]
down_err_trace=np.asarray(mean_downCI_upCI)[:,1]
up_err_trace=np.asarray(mean_downCI_upCI)[:,2]
return mean_trace, down_err_trace, up_err_trace
def find_maxGC_perROI(exp_lists_per_fly):
print('Finding amximum GC among all recordings per fly')
GC_set_per_fly=[]
for date, genotype, fly, recrd_num in exp_lists_per_fly:
Gal4=genotype.split('-')[0]
#dataDir = '/Users/clc/Documents/EPFL/NeLy/Data/ANproj/20180824/SS25451-tdTomGC6fopt-fly1/SS25451-tdTomGC6fopt-fly1-007'
flyDir = NAS_AN_Proj_Dir + Gal4 +'/2P/'+ date+'/'+genotype+'-'+fly+'/'
dataDir = NAS_AN_Proj_Dir + Gal4 +'/2P/' + date+'/'+genotype+'-'+fly+'/'+genotype+'-'+fly+'-'+recrd_num + '/'
pathForDic = dataDir+'/output/'
Beh_Jpos_GC_DicData=general_utils.open_Beh_Jpos_GC_DicData(pathForDic)
GC_set = Beh_Jpos_GC_DicData['GCset']
for ROI_i, GC_trace in enumerate(GC_set):
if len(GC_set_per_fly)!=len(GC_set):
GC_set_per_fly.append([])
print('len GC_set_per_fly', len(GC_set_per_fly))
for ROI_i, GC_trace in enumerate(GC_set):
GC_set_per_fly[ROI_i].extend(GC_trace)
maxGC_per_fly=[]
for ROI_i, GC_trace in enumerate(GC_set_per_fly):
max_GC=max(GC_trace)
maxGC_per_fly.append(max_GC)
print('maxGC_per_fly', maxGC_per_fly)
return maxGC_per_fly
def save_GCevt_fly_dic(save_dir, filename):
print('Saving GCevent-based.dic')
GCevt_dic_fly={}
GCevt_dic_fly.update({'samplingFreq':samplingFreq})
GCevt_dic_fly.update({'GC_evt_fly':GC_evt_fly})
GCevt_dic_fly.update({'time_evt_fly':time_evt_fly})
GCevt_dic_fly.update({'rest_evt_fly':rest_evt_fly})
GCevt_dic_fly.update({'walk_evt_fly':walk_evt_fly})
GCevt_dic_fly.update({'eye_groom_evt_fly':eye_groom_evt_fly})
GCevt_dic_fly.update({'foreleg_groom_evt_fly':foreleg_groom_evt_fly})
GCevt_dic_fly.update({'antennae_groom_evt_list':antennae_groom_evt_list})
GCevt_dic_fly.update({'hindleg_groom_evt_list':hindleg_groom_evt_list})
GCevt_dic_fly.update({'Abd_groom_evt_list':Abd_groom_evt_list})
GCevt_dic_fly.update({'PER_evt_list':PER_evt_list})
GCevt_dic_fly.update({'F_groom_evt_list':F_groom_evt_list})
GCevt_dic_fly.update({'H_groom_evt_list':H_groom_evt_list})
GCevt_dic_fly.update({'CO2puff_evt_list':CO2puff_evt_list})
GCevt_dic_fly.update({'velForw_evt_list':velForw_evt_list})
GCevt_dic_fly.update({'velSide_evt_list':velSide_evt_list})
GCevt_dic_fly.update({'velTurn_evt_list':velTurn_evt_list})
for item, value in J_Pos_evt.items():
GCevt_dic_fly.update({item:value})
pickle.dump( GCevt_dic, open( save_dir + filename, "wb" ) )
return
def save_GCevt_fly_dic(save_dir, filename):
print('Saving GCevent-based.dic')
GCevt_dic_fly={}
GCevt_dic_fly.update({'samplingFreq':samplingFreq})
GCevt_dic_fly.update({'GC_evt_fly':GC_evt_fly})
GCevt_dic_fly.update({'time_evt_fly':time_evt_fly})
GCevt_dic_fly.update({'rest_evt_fly':rest_evt_fly})
GCevt_dic_fly.update({'walk_evt_fly':walk_evt_fly})
GCevt_dic_fly.update({'eye_groom_evt_fly':eye_groom_evt_fly})
GCevt_dic_fly.update({'foreleg_groom_evt_fly':foreleg_groom_evt_fly})
GCevt_dic_fly.update({'antennae_groom_evt_list':antennae_groom_evt_list})
GCevt_dic_fly.update({'hindleg_groom_evt_fly':hindleg_groom_evt_list})
GCevt_dic_fly.update({'Abd_groom_evt_fly':Abd_groom_evt_list})
GCevt_dic_fly.update({'PER_evt_fly':PER_evt_list})
GCevt_dic_fly.update({'F_groom_evt_fly':F_groom_evt_list})
GCevt_dic_fly.update({'H_groom_evt_fly':H_groom_evt_list})
GCevt_dic_fly.update({'CO2_evt_fly':CO2puff_evt_list})
GCevt_dic_fly.update({'velForw_evt_fly':velForw_evt_list})
GCevt_dic_fly.update({'velSide_evt_fly':velSide_evt_list})
GCevt_dic_fly.update({'velTurn_evt_fly':velTurn_evt_list})
GCevt_dic_fly.update({'JposEvt_fly':JposEvt_fly})
pickle.dump( GCevt_dic, open( save_dir + filename, "wb" ) )
return
##main##
NAS_Dir=general_utils.NAS_Dir
NAS_AN_Proj_Dir=general_utils.NAS_AN_Proj_Dir
dataAnalysisType=[
('CO2evt_long_dic'),
('CO2evt_short_dic'),
('BWwoCO2evt_dic'),
('BWwCO2evt_dic'),
]
for datatype in dataAnalysisType:
experiments_group_per_fly=general_utils.group_expList_per_fly(experiments)
print('experiments_group_per_fly', experiments_group_per_fly)
count_evt_fly=[]
beh_photo_fps=30
least_realNum_amount=3
if datatype=='CO2evt_long_dic':
inputfilename='CO2evt_long_dic.pkl'
outputfilename='CO2evt_long_based_BallRot_fly'
elif datatype=='CO2evt_short_dic':
inputfilename='CO2evt_short_dic.pkl'
outputfilename='CO2evt_short_based_BallRot_fly'
elif datatype=='BWwoCO2evt_dic':
inputfilename='BWwoCO2evt_dic.pkl'
outputfilename='BWwoCO2evt_based_BallRot_fly'
elif datatype=='BWwCO2evt_dic':
inputfilename='BWwCO2evt_dic.pkl'
outputfilename='BWwCO2evt_based_BallRot_fly'
for exp_lists_per_fly in experiments_group_per_fly:
print('Processing ', exp_lists_per_fly ,'....')
## All events
GC_evt_fly=[]
time_evt_fly=[]
rest_evt_fly=[]
f_walk_evt_fly=[]
b_walk_evt_fly=[]
eye_groom_evt_fly=[]
antennae_groom_evt_fly=[]
foreleg_groom_evt_fly=[]
hindleg_groom_evt_fly=[]
Abd_groom_evt_fly=[]
PER_evt_fly=[]
F_groom_evt_fly=[]
H_groom_evt_fly=[]
CO2_evt_fly=[]
BW_beyong_CO2puff_evt_fly=[]
velForw_evt_fly=[]
velSide_evt_fly=[]
velTurn_evt_fly=[]
# maxGC_per_fly_list=find_maxGC_perROI(exp_lists_per_fly)
#Process each recording
for date, genotype, fly, recrd_num in exp_lists_per_fly:
Gal4=genotype.split('-')[0]
#dataDir = '/Users/clc/Documents/EPFL/NeLy/Data/ANproj/20180824/SS25451-tdTomGC6fopt-fly1/SS25451-tdTomGC6fopt-fly1-007'
flyDir = NAS_AN_Proj_Dir +'03_general_2P_exp/'+ Gal4 +'/2P/'+ date+'/'+genotype+'-'+fly+'/'
dataDir = flyDir+genotype+'-'+fly+'-'+recrd_num + '/'
pathForDic = dataDir+'/output/'
print('dataDir', dataDir)
# inputfilename='CO2evt_all_dic.pkl'
# outputfilename='CO2evt_all_based_relativeBehFreq_fly'
# inputfilename='CO2evt_short_dic.pkl'
# outputfilename='CO2evt_short_based_relativeBehFreq_fly'
# inputfilename='CO2evt_long_dic.pkl'
# outputfilename='CO2evt_long_based_relativeBehFreq_fly'
# inputfilename='CO2_BW_BWwoCO2evt_dic.pkl'
# outputfilename='BWevt_beyong_CO2puff_based_relativeBehFreq_fly'
# inputfilename='BWevt_dic.pkl'
# outputfilename='BWevt_based_relativeBehFreq_fly'
outDirCO2evt = NAS_AN_Proj_Dir + 'output/FigS5-CO2puff_BW_analysis_SS36112/' +Gal4 +'/2P/'+ date+'/'+genotype+'-'+fly+'/'+genotype+'-'+fly+'-'+recrd_num + '/output/CO2evt_based_BehBallAnalysis/'
if os.path.exists(outDirCO2evt+inputfilename):
evt_dic=EventDetection_utils.read_evt_dic(outDirCO2evt, inputfilename)
else:
print('No CO2evt_dic.pkl exists mainly due to no CO2 event is detected ...')
continue
evt_startIdx_list=evt_dic['evt_startIdx_list']
evt_endIdx_list=evt_dic['evt_endIdx_list']
samplingFreq=evt_dic['samplingFreq']
GC_evt_set_list=evt_dic['GC_evt_set_list']
time_evt_list=evt_dic['time_evt_list']
rest_evt_list=evt_dic['rest_evt_list']
f_walk_evt_list=evt_dic['f_walk_evt_list']
b_walk_evt_list=evt_dic['b_walk_evt_list']
eye_groom_evt_list=evt_dic['eye_groom_evt_list']
antennae_groom_evt_list=evt_dic['antennae_groom_evt_list']
foreleg_groom_evt_list=evt_dic['foreleg_groom_evt_list']
print('shape foreleg_groom_evt_list', np.shape(foreleg_groom_evt_list))
hindleg_groom_evt_list=evt_dic['hindleg_groom_evt_list']
Abd_groom_evt_list=evt_dic['Abd_groom_evt_list']
PER_evt_list=evt_dic['PER_evt_list']
F_groom_evt_list=evt_dic['F_groom_evt_list']
H_groom_evt_list=evt_dic['H_groom_evt_list']
CO2puff_evt_list=evt_dic['CO2puff_evt_list']
BW_beyong_CO2puff_evt_list=evt_dic['BW_beyong_CO2puff_evt_list']
velForw_evt_list=evt_dic['velForw_evt_list']
velSide_evt_list=evt_dic['velSide_evt_list']
velTurn_evt_list=evt_dic['velTurn_evt_list']
print(len(GC_evt_set_list), 'GC event found')
model_list_struc=[]
for i in range(0,len(GC_evt_set_list)):
model_list_struc.append([])
GC_evt_fly=make_structure_as_modelStructure(GC_evt_fly, model_list_struc)
print('shape GC_evt_fly', len(GC_evt_fly))
# time_evt_fly=make_structure_as_modelStructure(time_evt_fly, model_list_struc)
# rest_evt_fly=make_structure_as_modelStructure(rest_evt_fly, model_list_struc)
# walk_evt_fly=make_structure_as_modelStructure(walk_evt_fly, model_list_struc)
# eye_groom_evt_fly=make_structure_as_modelStructure(eye_groom_evt_fly, model_list_struc)
# antennae_groom_evt_fly=make_structure_as_modelStructure(antennae_groom_evt_fly, model_list_struc)
# foreleg_groom_evt_fly=make_structure_as_modelStructure(foreleg_groom_evt_fly, model_list_struc)
# hindleg_groom_evt_fly=make_structure_as_modelStructure(hindleg_groom_evt_fly, model_list_struc)
# Abd_groom_evt_fly=make_structure_as_modelStructure(Abd_groom_evt_fly, model_list_struc)
# PER_evt_fly=make_structure_as_modelStructure(PER_evt_fly, model_list_struc)
# F_groom_evt_fly=make_structure_as_modelStructure(F_groom_evt_fly, model_list_struc)
# H_groom_evt_fly=make_structure_as_modelStructure(H_groom_evt_fly, model_list_struc)
# CO2_evt_fly=make_structure_as_modelStructure(CO2_evt_fly, model_list_struc)
# velForw_evt_fly=make_structure_as_modelStructure(velForw_evt_fly, model_list_struc)
# velSide_evt_fly=make_structure_as_modelStructure(velSide_evt_fly, model_list_struc)
# velTurn_evt_fly=make_structure_as_modelStructure(velTurn_evt_fly, model_list_struc)
## Pool events of all recordings per fly
for ROI_i in range(0,len(GC_evt_set_list)):
GC_evt_fly[ROI_i].extend(GC_evt_set_list[ROI_i])
time_evt_fly.extend(time_evt_list)
rest_evt_fly.extend(rest_evt_list)
f_walk_evt_fly.extend(f_walk_evt_list)
b_walk_evt_fly.extend(b_walk_evt_list)
eye_groom_evt_fly.extend(eye_groom_evt_list)
antennae_groom_evt_fly.extend(antennae_groom_evt_list)
foreleg_groom_evt_fly.extend(foreleg_groom_evt_list)
hindleg_groom_evt_fly.extend(hindleg_groom_evt_list)
Abd_groom_evt_fly.extend(Abd_groom_evt_list)
PER_evt_fly.extend(PER_evt_list)
F_groom_evt_fly.extend(F_groom_evt_list)
H_groom_evt_fly.extend(H_groom_evt_list)
CO2_evt_fly.extend(CO2puff_evt_list)
BW_beyong_CO2puff_evt_fly.extend(BW_beyong_CO2puff_evt_list)
velForw_evt_fly.extend(velForw_evt_list)
velSide_evt_fly.extend(velSide_evt_list)
velTurn_evt_fly.extend(velTurn_evt_list)
# ## For preparing the plot of each recording
# GC_evt_set_NaNtail_list=[]
# for i in range(0, len(GC_evt_set_list)):
# GC_evt_set_NaNtail_list.append([])
# GC_evtNaNtail=add_NaNtail_to_each_Evt(GC_evt_set_list[i])
# GC_evt_set_NaNtail_list[i].extend(GC_evtNaNtail)
# time_evtNaNtail_list=add_NaNtail_to_each_Evt(time_evt_list)
# rest_evtNaNtail_list=add_NaNtail_to_each_Evt(rest_evt_list)
# walk_evtNaNtail_list=add_NaNtail_to_each_Evt(walk_evt_list)
# eye_groom_evtNaNtail_list=add_NaNtail_to_each_Evt(eye_groom_evt_list)
# antennae_groom_evtNaNtail_list=add_NaNtail_to_each_Evt(antennae_groom_evt_list)
# foreleg_groom_evtNaNtail_list=add_NaNtail_to_each_Evt(foreleg_groom_evt_list)
# hindleg_groom_evtNaNtail_list=add_NaNtail_to_each_Evt(hindleg_groom_evt_list)
# Abd_groom_evtNaNtail_list=add_NaNtail_to_each_Evt(Abd_groom_evt_list)
# PER_evtNaNtail_list=add_NaNtail_to_each_Evt(PER_evt_list)
# F_groom_evtNaNtail_list=add_NaNtail_to_each_Evt(F_groom_evt_list)
# H_groom_evtNaNtail_list=add_NaNtail_to_each_Evt(H_groom_evt_list)
# CO2puff_evtNaNtail_list=add_NaNtail_to_each_Evt(CO2puff_evt_list)
# velForw_evtNaNtail_list=add_NaNtail_to_each_Evt(velForw_evt_list)
# velSide_evtNaNtail_list=add_NaNtail_to_each_Evt(velSide_evt_list)
# velTurn_evtNaNtail_list=add_NaNtail_to_each_Evt(velTurn_evt_list)
# print('shape GC_evt_set_NaNtail_list', np.shape(GC_evt_set_NaNtail_list))
# print('shape CO2puff_evtNaNtail_list', np.shape(CO2puff_evtNaNtail_list))
# print('Computing mean and CI for CO2 events ...')
# GC_mean_set, GC_downCI_Set, GC_upCI_set=[],[],[]
# for i in range(0, len(GC_evt_set_NaNtail_list)):
# GC_evtNaNtail_list=GC_evt_set_NaNtail_list[i]
# GC_mean_set.append([])
# GC_downCI_Set.append([])
# GC_upCI_set.append([])
# GC_mean, GC_down_CI, GC_up_CI = compute_CI_and_mean_trace_w_BehfreqCorrec(GC_evtNaNtail_list, confidence=0.95, least_realNum_amount=least_realNum_amount)
# GC_mean_set[i].extend(GC_mean)
# GC_downCI_Set[i].extend(GC_down_CI)
# GC_upCI_set[i].extend(GC_up_CI)
# rest_mean, rest_down_CI, rest_up_CI = compute_CI_and_mean_trace_w_BehfreqCorrec(rest_evtNaNtail_list, confidence=0.95, least_realNum_amount=least_realNum_amount, behFreq=1)
# walk_mean, walk_down_CI, walk_up_CI = compute_CI_and_mean_trace_w_BehfreqCorrec(walk_evtNaNtail_list, confidence=0.95, least_realNum_amount=least_realNum_amount, behFreq=1)
# eye_groom_mean, eye_groom_down_CI, eye_groom_up_CI = compute_CI_and_mean_trace_w_BehfreqCorrec(eye_groom_evtNaNtail_list, confidence=0.95, least_realNum_amount=least_realNum_amount, behFreq=1)
# antennae_groom_mean, antennae_groom_down_CI, antennae_groom_up_CI = compute_CI_and_mean_trace_w_BehfreqCorrec(antennae_groom_evtNaNtail_list, confidence=0.95, least_realNum_amount=least_realNum_amount, behFreq=1)
# foreleg_groom_mean, foreleg_groom_down_CI, foreleg_groom_up_CI = compute_CI_and_mean_trace_w_BehfreqCorrec(foreleg_groom_evtNaNtail_list, confidence=0.95, least_realNum_amount=least_realNum_amount, behFreq=1)
# hindleg_groom_mean, hindleg_groom_down_CI, hindleg_groom_up_CI = compute_CI_and_mean_trace_w_BehfreqCorrec(hindleg_groom_evtNaNtail_list, confidence=0.95, least_realNum_amount=least_realNum_amount, behFreq=1)
# Abd_groom_mean, Abd_groom_down_CI, Abd_groom_up_CI = compute_CI_and_mean_trace_w_BehfreqCorrec(Abd_groom_evtNaNtail_list, confidence=0.95, least_realNum_amount=least_realNum_amount, behFreq=1)
# PER_mean, PER_down_CI, PER_up_CI = compute_CI_and_mean_trace_w_BehfreqCorrec(PER_evtNaNtail_list, confidence=0.95, least_realNum_amount=least_realNum_amount, behFreq=1)
# F_groom_mean, F_groom_down_CI, F_groom_up_CI = compute_CI_and_mean_trace_w_BehfreqCorrec(F_groom_evtNaNtail_list, confidence=0.95, least_realNum_amount=least_realNum_amount, behFreq=1)
# H_groom_mean, H_groom_down_CI, H_groom_up_CI = compute_CI_and_mean_trace_w_BehfreqCorrec(H_groom_evtNaNtail_list, confidence=0.95, least_realNum_amount=least_realNum_amount, behFreq=1)
# CO2puff_mean, CO2puff_down_CI, CO2puff_up_CI = compute_CI_and_mean_trace_w_BehfreqCorrec(CO2puff_evtNaNtail_list, confidence=0.95, least_realNum_amount=least_realNum_amount, behFreq=1)
# velForw_mean, velForw_down_CI, velForw_up_CI = compute_CI_and_mean_trace_w_BehfreqCorrec(velForw_evtNaNtail_list, confidence=0.95, least_realNum_amount=least_realNum_amount)
# velSide_mean, velSide_down_CI, velSide_up_CI = compute_CI_and_mean_trace_w_BehfreqCorrec(velSide_evtNaNtail_list, confidence=0.95, least_realNum_amount=least_realNum_amount)
# velTurn_mean, velTurn_down_CI, velTurn_up_CI = compute_CI_and_mean_trace_w_BehfreqCorrec(velTurn_evtNaNtail_list, confidence=0.95, least_realNum_amount=least_realNum_amount)
# print('shape CO2puff_mean', np.shape(CO2puff_mean))
# print('shape CO2puff_down_CI', np.shape(CO2puff_down_CI))
# print('shape GC_mean_set', np.shape(GC_mean_set))
# CO2_Evt_beh_trace_plot=[GC_mean_set[0], rest_mean, walk_mean, eye_groom_mean, antennae_groom_mean, foreleg_groom_mean, hindleg_groom_mean, Abd_groom_mean, PER_mean, CO2puff_mean, velForw_mean, velSide_mean, velTurn_mean]
# CO2_Evt_beh_subtitle=['GC_mean_set[0]', 'rest_mean', 'walk_mean', 'eye_groom_mean', 'antennae_groom_mean', 'foreleg_groom_mean', 'hindleg_groom_mean', 'Abd_groom_mean', 'PER_mean', 'CO2puff_mean', 'velForw_mean', 'velSide_mean', 'velTurn_mean']
# plot_utils.Plot_traces(CO2_Evt_beh_trace_plot, outDirCO2evt, 'CO2evt_Beh.png', subtitle_list=CO2_Evt_beh_subtitle, plot_mode='row_by_row')
# len_sort_time_evt_list=sorted(time_evt_list, key=len)
# time_boundary=[0, len_sort_time_evt_list[-1][-1]-len_sort_time_evt_list[-1][0]]
# print('time_boundary', time_boundary)
# plot_utils.Plot_CO2Evt_avg_err(time_boundary, \
# GC_mean_set, GC_downCI_Set, GC_upCI_set,\
# rest_mean, rest_down_CI, rest_up_CI,\
# walk_mean, walk_down_CI, walk_up_CI,\
# eye_groom_mean, eye_groom_down_CI, eye_groom_up_CI,\
# antennae_groom_mean, antennae_groom_down_CI, antennae_groom_up_CI,\
# foreleg_groom_mean, foreleg_groom_down_CI, foreleg_groom_up_CI,\
# hindleg_groom_mean, hindleg_groom_down_CI, hindleg_groom_up_CI,\
# Abd_groom_mean, Abd_groom_down_CI, Abd_groom_up_CI,\
# PER_mean, PER_down_CI, PER_up_CI,\
# CO2puff_mean, CO2puff_down_CI, CO2puff_up_CI,\
# velForw_mean, velForw_down_CI, velForw_up_CI,\
# velSide_mean, velSide_down_CI, velSide_up_CI,\
# velTurn_mean, velTurn_down_CI, velTurn_up_CI,\
# 'CO2evt_based_relativeBehFreq.png', outDirCO2evt)
## Plotting event per fly
for date, genotype, fly, recrd_num in exp_lists_per_fly:
Gal4=genotype.split('-')[0]
outDirCO2evtSumFly = NAS_AN_Proj_Dir + 'output/FigS5-CO2puff_BW_analysis_SS36112/plots/'
if not os.path.exists(outDirCO2evtSumFly):
os.makedirs(outDirCO2evtSumFly)
print('\nProcessing in fly pool event', date, genotype, fly, '\n')
GC_evt_set_NaNtail_fly=[]
for i in range(0, len(GC_evt_fly)):
GC_evt_set_NaNtail_fly.append([])
GC_evtNaNtail_fly=add_NaNtail_to_each_Evt(GC_evt_fly[i])
GC_evt_set_NaNtail_fly[i].extend(GC_evtNaNtail_fly)
time_evtNaNtail_fly=add_NaNtail_to_each_Evt(time_evt_fly)
rest_evtNaNtail_fly=add_NaNtail_to_each_Evt(rest_evt_fly)
f_walk_evtNaNtail_fly=add_NaNtail_to_each_Evt(f_walk_evt_fly)
b_walk_evtNaNtail_fly=add_NaNtail_to_each_Evt(b_walk_evt_fly)
eye_groom_evtNaNtail_fly=add_NaNtail_to_each_Evt(eye_groom_evt_fly)
antennae_groom_evtNaNtail_fly=add_NaNtail_to_each_Evt(antennae_groom_evt_fly)
foreleg_groom_evtNaNtail_fly=add_NaNtail_to_each_Evt(foreleg_groom_evt_fly)
hindleg_groom_evtNaNtail_fly=add_NaNtail_to_each_Evt(hindleg_groom_evt_fly)
Abd_groom_evtNaNtail_fly=add_NaNtail_to_each_Evt(Abd_groom_evt_fly)
PER_evtNaNtail_fly=add_NaNtail_to_each_Evt(PER_evt_fly)
F_groom_evtNaNtail_fly=add_NaNtail_to_each_Evt(F_groom_evt_fly)
H_groom_evtNaNtail_fly=add_NaNtail_to_each_Evt(H_groom_evt_fly)
CO2puff_evtNaNtail_fly=add_NaNtail_to_each_Evt(CO2_evt_fly)
BW_beyong_CO2puff_evtNaNtail_fly=add_NaNtail_to_each_Evt(BW_beyong_CO2puff_evt_fly)
velForw_evtNaNtail_fly=add_NaNtail_to_each_Evt(velForw_evt_fly)
velSide_evtNaNtail_fly=add_NaNtail_to_each_Evt(velSide_evt_fly)
velTurn_evtNaNtail_fly=add_NaNtail_to_each_Evt(velTurn_evt_fly)
print('Computing mean and CI for pool GC events ...')
GC_mean_fly, GC_downCI_fly, GC_upCI_fly=[],[],[]
for i in range(0, len(GC_evt_set_NaNtail_fly)):
GC_evtNaNtail_fly=GC_evt_set_NaNtail_fly[i]
GC_mean_fly.append([])
GC_downCI_fly.append([])
GC_upCI_fly.append([])
GC_mean, GC_down_CI, GC_up_CI = compute_CI_and_mean_trace_w_BehfreqCorrec(GC_evtNaNtail_fly, confidence=0.95, least_realNum_amount=least_realNum_amount)
GC_mean_fly[i].extend(GC_mean)
GC_downCI_fly[i].extend(GC_down_CI)
GC_upCI_fly[i].extend(GC_up_CI)
# rest_mean_fly, rest_down_CI_fly, rest_up_CI_fly = compute_CI_and_mean_trace_w_BehfreqCorrec(rest_evtNaNtail_fly, confidence=0.95, least_realNum_amount=least_realNum_amount)
# walk_mean_fly, walk_down_CI_fly, walk_up_CI_fly = compute_CI_and_mean_trace_w_BehfreqCorrec(walk_evtNaNtail_fly, confidence=0.95, least_realNum_amount=least_realNum_amount)
# eye_groom_mean_fly, eye_groom_down_CI_fly, eye_groom_up_CI_fly = compute_CI_and_mean_trace_w_BehfreqCorrec(eye_groom_evtNaNtail_fly, confidence=0.95, least_realNum_amount=least_realNum_amount)
# antennae_groom_mean_fly, antennae_groom_down_CI_fly, antennae_groom_up_CI_fly = compute_CI_and_mean_trace_w_BehfreqCorrec(antennae_groom_evtNaNtail_fly, confidence=0.95, least_realNum_amount=least_realNum_amount)
# foreleg_groom_mean_fly, foreleg_groom_down_CI_fly, foreleg_groom_up_CI_fly = compute_CI_and_mean_trace_w_BehfreqCorrec(foreleg_groom_evtNaNtail_fly, confidence=0.95, least_realNum_amount=least_realNum_amount)
# hindleg_groom_mean_fly, hindleg_groom_down_CI_fly, hindleg_groom_up_CI_fly = compute_CI_and_mean_trace_w_BehfreqCorrec(hindleg_groom_evtNaNtail_fly, confidence=0.95, least_realNum_amount=least_realNum_amount)
# Abd_groom_mean_fly, Abd_groom_down_CI_fly, Abd_groom_up_CI_fly = compute_CI_and_mean_trace_w_BehfreqCorrec(Abd_groom_evtNaNtail_fly, confidence=0.95, least_realNum_amount=least_realNum_amount)
# PER_mean_fly, PER_down_CI_fly, PER_up_CI_fly = compute_CI_and_mean_trace_w_BehfreqCorrec(PER_evtNaNtail_fly, confidence=0.95, least_realNum_amount=least_realNum_amount)
# F_groom_mean_fly, F_groom_down_CI_fly, F_groom_up_CI_fly = compute_CI_and_mean_trace_w_BehfreqCorrec(F_groom_evtNaNtail_fly, confidence=0.95, least_realNum_amount=least_realNum_amount)
# H_groom_mean_fly, H_groom_down_CI_fly, H_groom_up_CI_fly = compute_CI_and_mean_trace_w_BehfreqCorrec(H_groom_evtNaNtail_fly, confidence=0.95, least_realNum_amount=least_realNum_amount)
# CO2puff_mean_fly, CO2puff_down_CI_fly, CO2puff_up_CI_fly = compute_CI_and_mean_trace_w_BehfreqCorrec(CO2puff_evtNaNtail_fly, confidence=0.95, least_realNum_amount=least_realNum_amount)
velForw_mean_fly, velForw_down_CI_fly, velForw_up_CI_fly = compute_CI_and_mean_trace_w_BehfreqCorrec(velForw_evtNaNtail_fly, confidence=0.95, least_realNum_amount=least_realNum_amount)
velSide_mean_fly, velSide_down_CI_fly, velSide_up_CI_fly = compute_CI_and_mean_trace_w_BehfreqCorrec(velSide_evtNaNtail_fly, confidence=0.95, least_realNum_amount=least_realNum_amount)
velTurn_mean_fly, velTurn_down_CI_fly, velTurn_up_CI_fly = compute_CI_and_mean_trace_w_BehfreqCorrec(velTurn_evtNaNtail_fly, confidence=0.95, least_realNum_amount=least_realNum_amount)
sum_rest_fly=np.nansum(rest_evtNaNtail_fly,axis=0)
sum_f_walk_fly=np.nansum(f_walk_evtNaNtail_fly,axis=0)
sum_b_walk_fly=np.nansum(b_walk_evtNaNtail_fly,axis=0)
sum_E_groom_fly=np.nansum(eye_groom_evtNaNtail_fly,axis=0)
sum_A_groom_fly=np.nansum(antennae_groom_evtNaNtail_fly,axis=0)
sum_FL_groom_fly=np.nansum(foreleg_groom_evtNaNtail_fly,axis=0)
sum_HL_groom_fly=np.nansum(hindleg_groom_evtNaNtail_fly,axis=0)
sum_Abd_groom_fly=np.nansum(Abd_groom_evtNaNtail_fly,axis=0)
sum_PER_fly=np.nansum(PER_evtNaNtail_fly,axis=0)
sum_F_groom_fly=np.nansum(F_groom_evtNaNtail_fly,axis=0)
sum_H_groom_fly=np.nansum(H_groom_evtNaNtail_fly,axis=0)
sum_CO2puff_fly=
|
np.nansum(CO2puff_evtNaNtail_fly,axis=0)
|
numpy.nansum
|
import sys
from PyQt5 import QtGui
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtCore import *
import vtk
import math
import numpy as np
import time
from vtk.qt.QVTKRenderWindowInteractor import QVTKRenderWindowInteractor
import socket
import traceback
import QtrCalc as QC
#from scipy.spatial.transform import Rotation as R
class vtpDrawScene:
def SetQuatOrientation_old( self, quaternion, shift, i_actor ): #function to rotate one element around his own origine point
if not self.iniOk_qt :
raise Exception("vtpDrawScene not initialized. Call initScene() first")
# Convert quat to the rotation matrix
# self.mtxRot = [[0,0,0],[0,0,0],[0,0,0]]
#quat = QC.qtr_multiplication(quaternion,self.norm_qtr[i_actor])
# self.mtxRot[i_actor] = self.modelActor[i_actor].GetMatrix()
vtk.vtkMath().QuaternionToMatrix3x3(quaternion, self.mtxRot[i_actor])
# mtxTr01 = self.modelActor[i_actor].GetMatrix()
# mtx = np.identity(3)
# for i in range(2):
# for j in range(2) :
# mtx[i][j] = mtxTr01.GetElement(i, j)
# norm matrix
# self.mtxRot[i_actor] = mtx.dot(np.array(self.mtxRot[i_actor])) #np.array(self.mtxRot[i_actor]).dot(np.array(self.norm_mat[i_actor]))
# Rotation: convert 3x3 to 4x4 matrix
# mtxTr2 = vtk.vtkMatrix4x4() # identity mtx
mtxTr2 = vtk.vtkMatrix4x4() # identity mtx
for i in range(3):
for j in range(3):
mtxTr2.SetElement(i, j, self.mtxRot[i_actor][i][j])
# print(mtxTr02)
# mtxTr01 = self.modelActor[i_actor].GetMatrix()
# vtk.vtkMatrix4x4().Multiply4x4(mtxTr01, mtxTr02, mtxTr2)
# three transforms:
# 1. move the object so the rotation center is in the coord center
tr = vtk.vtkTransform()
origin = np.array(self.modelActor[i_actor].GetOrigin())
position = np.array(self.modelActor[i_actor].GetPosition())
# trans = origin
# trans = origin + position
trans = position
# trans = np.array([0.,0.,0.])
tr.Translate(-trans)
mtxTr1 = tr.GetMatrix()
# 2. rotate around coord center using mtxTr2
mtxTr12 = vtk.vtkMatrix4x4()
vtk.vtkMatrix4x4().Multiply4x4(mtxTr2, mtxTr1, mtxTr12)
## 3. move the object back
tr = vtk.vtkTransform()
tr.Translate(trans + np.array(shift))
mtxTr3 = tr.GetMatrix()
mtxTr123 = vtk.vtkMatrix4x4()
vtk.vtkMatrix4x4().Multiply4x4(mtxTr3, mtxTr12, mtxTr123)
tr = vtk.vtkTransform()
tr.PreMultiply()
tr.Concatenate(mtxTr123)
self.modelActor[i_actor].SetUserTransform(tr)
def initScene_qt(self, obj): #initialize alll actors
reader = list()
modelMapper = list()
self.modelActor = list()
self.modelActorsArrows = list()
self.mtxRot = list()
self.norm_qtr = list()
self.pos_set = list()
self.ren = vtk.vtkRenderer()
self.mtxGlob = np.array([[1.,0.,0.],[0.,0.,-1.],[0.,1.,0.]])
# self.mtxGlob[1,1] = 0
# mtxGlob[2,1] = 1
# mtxGlob[1,2] = -1
# mtxGlob[2,2] = 0
# renCornerR = vtk.vtkRenderer()
# self.ren.AddRenderer(renCornerR)
# renCornerR.SetViewport(0.8, 0, 1.0, 0.2)
for i in range(0,len(obj)):
# Read the object data
filename = obj[i]
if filename[-4:] != '.vtp':
continue
reader.append(vtk.vtkXMLPolyDataReader())
reader[i].SetFileName(filename)
reader[i].Update()
# make mapper for the data
modelMapper.append(vtk.vtkPolyDataMapper())
modelMapper[i].SetInputData(reader[i].GetOutput())
# create actor, set its mapper
self.modelActor.append(vtk.vtkActor())
self.modelActor[i].SetMapper(modelMapper[i])
# Add the actor to the renderer, set the background and size.
self.ren.AddActor(self.modelActor[i])
self.mtxRot.append([[1,0,0],[0,1,0],[0,0,1]])
# Get center of the bounding box
origin = self.modelActor[i].GetCenter()
# Set rotation center
self.modelActor[i].SetOrigin(origin)
for i in range(0,len(obj)):
# make mapper for the data
arrowSource = vtk.vtkArrowSource()
arrowSource.SetShaftRadius(0.005)
arrowSource.SetTipLength(0.5)
arrowSource.SetTipRadius(0.01)
# arrowSource = vtk.vtkSphereSource()
# arrowSource.SetCenter(0,0,0)
# arrowSource.SetRadius(0.01)
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(arrowSource.GetOutputPort())
# create actor, set its mapper
self.modelActorsArrows.append(vtk.vtkActor())
self.modelActorsArrows[i].SetMapper(mapper)
self.modelActorsArrows[i].SetScale([0.1,1,1])
# Add the actor to the renderer, set the background and size.
if (i != 7) and (i != 8) and (i != 13) and (i != 14) and (i != 15) and (i != 16) and (i != 23) and (i != 24) and (i != 27):
self.ren.AddActor(self.modelActorsArrows[i])
self.mtxRot.append([[1,0,0],[0,1,0],[0,0,1]])
# Get center of the bounding box
origin = self.modelActorsArrows[i].GetCenter()
self.modelActorsArrows[i].SetOrigin(origin)
self.pos_set.append(dict())
#Initial conditions for bones
self.initial_pos_actors = np.array([[0,0,0],[0,0,0],[0,0,0],[-0.016, -0.006, 0.163],[-0.016, -0.006, -0.163],[-0.01, -0.312, -0.173],[-0.01, -0.312, 0.173],[-0.01, 0.024, 0.166],[-0.01, 0.024, -0.166],[0,0,0],[-0.005, -0.296, -0.153],[-0.005, -0.296, 0.153],[ 0.08, -0.42, 0.0 ],[ 0.08, -0.411, 0.0 ],[ 0.08, -0.411, 0.0 ],[-0.001, -0.486, 0.072],[-0.001, -0.486, -0.072],[ 0.027, -0.896, -0.081],[ 0.027, -0.896, 0.081],[ 0.006, -0.888, 0.077],[ 0.006, -0.888, -0.077],[ 0.008, -1.312, 0.081],[ 0.008, -1.312, -0.081],[-0.047, -1.358, 0.084],[-0.047, -1.358, -0.084],[ 0.138, -1.358, 0.087],[ 0.138, -1.358, -0.087]])
self.initial_pos_actorsArrows = np.array([[0.063,-0.153,0],[0.062,0.191,0],[-0.019,0.06,0],[-0.016, -0.006, 0.163],[-0.016, -0.006, -0.163],[-0.01, -0.544, -0.201],[-0.01, -0.544, 0.201],[-0.01, 0.024, 0.166],[-0.01, 0.024, -0.166],[0,0,0],[-0.005, -0.296, -0.153],[-0.005, -0.296, 0.153],[ 0.06, -0.400, 0. ],[ 0.08, -0.411, 0. ],[ 0.08, -0.411, 0. ],[-0.001, -0.486, 0.072],[-0.001, -0.486, -0.072],[ 0.041, -0.400, -0.123],[ 0.041, -0.400, 0.123],[ 0.006, -0.888, 0.077],[ 0.006, -0.888, -0.077],[ 0.008, -1.312, 0.081],[ 0.008, -1.312, -0.081],[-0.047, -1.358, 0.084],[-0.047, -1.358, -0.084],[ 0.138, -1.358, 0.087],[ 0.138, -1.358, -0.087]])
#28-9 elems self.initial_pos_actorsArrows = np.array([[0.063,-0.153,0],[0.062,0.191,0],[-0.019,0.06,0],[-0.016, -0.006, 0.163],[-0.016, -0.006, -0.163],[-0.01, -0.544, -0.201],[-0.01, -0.544, 0.201],[0,0,0],[-0.005, -0.296, -0.153],[-0.005, -0.296, 0.153],[ 0.06, -0.400, 0. ],[ 0.041, -0.400, -0.123],[ 0.041, -0.400, 0.123],[ 0.006, -0.888, 0.077],[ 0.006, -0.888, -0.077],[ 0.008, -1.312, 0.081],[-0.047, -1.358, -0.084],[ 0.138, -1.358, 0.087]])
for el in range(len(self.initial_pos_actors)):
# self.norm_qtr.append(np.array([1.,0.,0.,0.]))
self.modelActor[el].SetPosition(self.initial_pos_actors[el])
self.modelActorsArrows[el].SetPosition(self.initial_pos_actorsArrows[el])
self.pos_set[el][0] = self.initial_pos_actorsArrows[el]
axes = vtk.vtkAxesActor()
axes.SetNormalizedTipLength(0.05, 0.05, 0.05)
axes.SetNormalizedShaftLength(1,1,1)
self.ren.AddActor(axes)
# arm_axes = vtk.vtkAxesActor()
# arm_axes.SetNormalizedTipLength(0.05, 0.05, 0.05)
# arm_axes.SetTotalLength(0.1,0.1,0.1)
# transform = vtk.vtkTransform()
# transform.Translate(-0.016,-0.006,-0.162)
# arm_axes.SetUserTransform(transform)
# self.ren.AddActor(arm_axes)
# The axes are positioned with a user transform
transform = vtk.vtkTransform()
transform.Translate(0.0, 0.0, 0.0)
axes.SetUserTransform(transform)
self.ren.SetBackground(0.1, 0.2, 0.4)
camera = vtk.vtkCamera()
camera.SetPosition(2, 0.2, 2)
camera.SetFocalPoint(0, 0, 0)
self.ren.SetActiveCamera(camera)
# SetInteractorStyle(MyInteractorStyle())
#ren.ResetCamera()
# ren.Render()
self.iniOk_qt = True
return self.ren
# def SetQuatOrientation( self, quaternion, shift, i_actor ): #function to rotate one element around his own origine point
# if not self.iniOk_qt :
# raise Exception("vtpDrawScene not initialized. Call initScene() first")
# vtk.vtkMath().QuaternionToMatrix3x3(quaternion, self.mtxRot[i_actor])
# # Rotation: convert 3x3 to 4x4 matrix
# mtxTr2 = vtk.vtkMatrix4x4() # identity mtx
# for i in range(3):
# for j in range(3) :
# mtxTr2.SetElement(i, j, self.mtxRot[i_actor][i][j])
def addVector(self,quaternion):
# make mapper for the data
arrowSource = vtk.vtkArrowSource()
arrowSource.SetShaftRadius(0.01)
arrowSource.SetTipLength(.1)
# arrowSource.SetNormalizedTipLength(0.05, 0.05, 0.05)
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(arrowSource.GetOutputPort())
# create actor, set its mapper
self.modelActor.append(vtk.vtkActor())
el = len(self.modelActor) - 1
self.modelActor[el].SetMapper(mapper)
# Add the actor to the renderer, set the background and size.
self.ren.AddActor(self.modelActor[el])
#
self.mtxRot.append([[0,0,0],[0,0,0],[0,0,0]])
# Get center of the bounding box
origin = self.modelActor[el].GetCenter()
# Set rotation center
self.modelActor[el].SetOrigin(origin)
self.modelActor[el].SetPosition([-0.016,-0.006,-0.162])
mtxRot = np.identity(3)
vtk.vtkMath().QuaternionToMatrix3x3(quaternion, mtxRot)
# Rotation: convert 3x3 to 4x4 matrix
mtxTr = vtk.vtkMatrix4x4() # identity mtx
for i in range(3):
for j in range(3) :
mtxTr.SetElement(i, j, mtxRot[i][j])
tr = vtk.vtkTransform()
tr.PreMultiply()
tr.Concatenate(mtxTr)
self.modelActor[el].SetUserTransform(tr)
def SetRefQuatOrientation( self, quaternion, shift, i_actor, motion_flag ): #function to rotate all elements around origine
if not self.iniOk_qt :
raise Exception("vtpDrawScene not initialized. Call initScene() first")
mtxRot =
|
np.identity(3)
|
numpy.identity
|
from scipy.ndimage.filters import generic_filter
from scipy.signal import convolve2d, medfilt2d
from scipy.spatial.distance import cdist
from os.path import abspath, join, dirname
import matplotlib.pyplot as ppl
from time import time
import numpy as np
import math
import sys
import cv2
import numexpr as ne
sys.path.insert(0, abspath(join(dirname(__file__), '..')))
from data_information import dcm_information as di
def mls_parzen(imgo, mask, dt, N, Lo, flag, shapes, a):
"""
Esse metodo realiza uma analise das Faixas de densidade humanas
- Mestrado em Engenharia de Telecomunicacoes
- <NAME>
- 02/03/2016
Level set proposed by <NAME> Mederos in:
a fast level set segmentation algorithm
Submited to: Pattern Recognition Letters
INPUT:
imgo: image
N: maximum number of iterations
dt: delta t
"""
stop2_div = False
show = False
x, y = shapes
window = 9
# if flag == 1:
# window = 5
# [Matheus] Otimizacao do codigo para os mesmos valores nao sejam calculados mais de uma vez
win_mut = window * window
win_m_2 = - (2 * win_mut)
lo_size_mul = x * y
parzen_eq = 1 / ((window * math.sqrt(math.pi * 2)) ** 3)
imgo_reshape_temp = np.zeros((x, x, 3))
imgo_reshape_temp[:, :, 0] = imgo
i = np.zeros((x, x, 3))
i[:, :, 0] = np.copy(imgo)
i[:, :, 1] = conv2(imgo, np.tile(1 / win_mut, (window, window)))
i[:, :, 2] = generic_filter(imgo, np.std, window)
i = i.reshape((lo_size_mul, 3))
s_eg = np.array([[0, 1, 0],
[1, 1, 1],
[0, 1, 0]], dtype=np.uint8)
f = np.zeros((x, y))
int1 = np.uint8(1)
int0 = np.uint8(0)
# Inicialization of Surface and others
psi=np.where(Lo>0,1,-1)
psi=np.int16(psi)
# psi = (2 * Lo) - 1 # Superficie inicial: cones
psi_d = cv2.dilate(psi, s_eg) # Primeira dilatacao
psi_e = cv2.erode(psi, s_eg) # Primeira erosao
# Main procedure
c = 1
phi0 = np.ones((x, y))
thr = np.ones((1, N))
and_lamd = lambda ps, mas: imgo[np.where((ps == 1) & (mas == 1))]
# Plot segmentation
if show:
fig = ppl.figure(figsize=(12, 12))
ax3 = fig.add_subplot(1, 1, 1)
ax3.imshow(imgo, cmap='gray')
while np.sum(thr == 0) < 1: # Convergence
# print('sum thr == 0 < 10', np.sum(thr == 0) < 10 * 1)
print('[INFO] C =', c)
# time_while = time()
c = c + 1
psi = medfilt2d(psi + (dt * ((np.maximum(f, 0) * psi_d) - (np.minimum(f, 0) * psi_e))))
psi_d = cv2.dilate(psi, s_eg)
psi_e = cv2.erode(psi, s_eg)
psi_bigger_1 = np.where(psi > 0, int1, int0)
phi = bwperim(psi_bigger_1, shapes)
# threshold of convergence (front stabilization)
thr[0, c - 1] = np.sum(np.absolute(phi0 - phi))
phi0 = np.copy(phi)
# Parameter Estimation
# print('time while', time() - time_while)
if (c > 60 and c % 5 == 0) or c == 2:
# time_if = time()
and1 = and_lamd(psi_bigger_1, mask)
and2 = and_lamd(np.where(psi < 0, int1, int0), mask)
stroke_t = np.zeros((win_mut, 3))
stroke_f = np.zeros((win_mut, 3))
stroke_t[:, 0] = and1[np.random.permutation(and1.shape[0])[0:win_mut]]
stroke_t[:, 1] = np.mean(and1)
stroke_t[:, 2] = np.std(and1, ddof=1)
stroke_f[:, 0] = and2[np.random.permutation(and2.shape[0])[0:win_mut]]
stroke_f[:, 1] = np.mean(and2)
stroke_f[:, 2] = np.std(and2, ddof=1)
# print('time strokes', time() - time_if)
# time_if = time()
prob1 = pdf_parzen(stroke_t, i, win_m_2, x, parzen_eq)
prob2 = pdf_parzen(stroke_f, i, win_m_2, x, parzen_eq)
# print('time parzen total', time() - time_if)
# time_if = time()
f = prob1 - prob2
f = (f * np.where(f > 0, 1, 0) /
|
np.amax(f)
|
numpy.amax
|
import time
import random
import numpy as np
import pandas as pd
import hdbscan
import sklearn.datasets
from sklearn import metrics
from classix import CLASSIX
from sklearn.cluster import KMeans
from sklearn.cluster import DBSCAN
from sklearn import preprocessing
from tqdm import tqdm
from sklearn.cluster import MeanShift
from quickshift.QuickshiftPP import *
import matplotlib.pyplot as plt
from tqdm import tqdm
import seaborn as sns
plt.style.use('bmh')
seed = 0
np.random.seed(seed)
random.seed(seed)
def test_kmeanspp_labels(X=None, y=None, _range=np.arange(2, 21, 1)):
ar = list()
am = list()
for i in _range:
kmeans = KMeans(n_clusters=i, init='k-means++', random_state=1)
kmeans.fit(X)
ri = metrics.adjusted_rand_score(y, kmeans.labels_)
mi = metrics.adjusted_mutual_info_score(y, kmeans.labels_)
ar.append(ri)
am.append(mi)
return ar, am
def test_meanshift_labels(X=None, y=None, _range=np.arange(2, 21, 1)):
ar = list()
am = list()
for i in _range:
meanshift = MeanShift(bandwidth=i)
meanshift.fit(X)
ri = metrics.adjusted_rand_score(y, meanshift.labels_)
mi = metrics.adjusted_mutual_info_score(y, meanshift.labels_)
ar.append(ri)
am.append(mi)
return ar, am
def test_dbscan_labels(X=None, y=None, _range=np.arange(0.05, 0.505, 0.005), minPts=5):
ar = list()
am = list()
for i in _range:
dbscan = DBSCAN(eps=i, n_jobs=1, min_samples=minPts)
dbscan.fit(X)
ri = metrics.adjusted_rand_score(y, dbscan.labels_)
mi = metrics.adjusted_mutual_info_score(y, dbscan.labels_)
ar.append(ri)
am.append(mi)
return ar, am
def test_hdbscan_labels(X=None, y=None, _range=np.arange(2, 21, 1)):
ar = list()
am = list()
for i in _range:
_hdbscan = hdbscan.HDBSCAN(min_cluster_size=int(i), algorithm='best')
_hdbscan.fit(X)
ri = metrics.adjusted_rand_score(y, _hdbscan.labels_)
mi = metrics.adjusted_mutual_info_score(y, _hdbscan.labels_)
ar.append(ri)
am.append(mi)
return ar, am
def test_quickshiftpp_labels(X=None, y=None, _range=np.arange(2, 17, 1), beta=0.3):
ar = list()
am = list()
for i in _range:
quicks = QuickshiftPP(k=i, beta=beta)
quicks.fit(X.copy(order='C'))
ri = metrics.adjusted_rand_score(y, quicks.memberships)
mi = metrics.adjusted_mutual_info_score(y, quicks.memberships)
ar.append(ri)
am.append(mi)
return ar, am
def test_classix_radius_labels(X=None, y=None, method=None, minPts=1, sorting='pca', _range=np.arange(0.05, 0.3, 0.005)):
ar = list()
am = list()
for i in _range:
classix = CLASSIX(radius=i, minPts=minPts, post_alloc=True, sorting=sorting,
group_merging=method, verbose=0)
classix.fit(X)
ri = metrics.adjusted_rand_score(y, classix.labels_)
mi = metrics.adjusted_mutual_info_score(y, classix.labels_)
ar.append(ri)
am.append(mi)
return ar, am
def run_sensitivity_test(datasets, _range, clustering='CLASSIX (Density)', fix_k=1, sorting='pca', label_files=None):
|
np.random.seed(1)
|
numpy.random.seed
|
""" Group Factor Analysis (extended model) """
#Author: <NAME> (<EMAIL>)
#Date: 22 February 2021
import numpy as np
from scipy.special import digamma, gammaln
from scipy.optimize import fmin_l_bfgs_b as lbfgsb
class GFA_DiagonalNoiseModel(object):
def __init__(self, X, params, imputation=False):
self.s = params['num_groups'] #number of groups
# ensure the data was generated with the correct number of groups
assert self.s == len(X)
#number of features in each group
self.d = []
for s in range(self.s):
self.d.append(X[s].shape[1])
self.td = np.sum(self.d) #total number of features
self.k = params['K'] #number of factors
self.N = X[0].shape[0] #number of samples
# Check scenario ('complete' for complete data; 'incomplete' for incomplete data)
if imputation:
self.scenario = 'complete'
else:
self.scenario = params['scenario']
#hyperparameters
self.a0_alpha = self.b0_alpha = self.a0_tau = self.b0_tau = 1e-14
# Initialising variational parameters
#latent variables
self.means_z = np.reshape(np.random.normal(0, 1, self.N*self.k),(self.N, self.k))
self.sigma_z = np.zeros((self.k, self.k, self.N))
self.sum_sigmaZ = self.N * np.identity(self.k)
#loading matrices
self.means_w = [[] for _ in range(self.s)]
self.sigma_w = [[] for _ in range(self.s)]
self.E_WW = [[] for _ in range(self.s)]
self.Lqw = [[] for _ in range(self.s)]
#ARD parameters
self.a_alpha = [[] for _ in range(self.s)]
self.b_alpha = [[] for _ in range(self.s)]
self.E_alpha = [[] for _ in range(self.s)]
#noise parameters
self.a_tau = [[] for _ in range(self.s)]
self.b_tau = [[] for _ in range(self.s)]
self.E_tau = [[] for _ in range(self.s)]
if self.scenario == 'incomplete':
#initialise variables to incomplete data sets
self.X_nan = [[] for _ in range(self.s)]
self.N_clean = [[] for _ in range(self.s)]
#constants for ELBO
self.logalpha = [[] for _ in range(self.s)]
self.logtau = [[] for _ in range(self.s)]
self.L_const = [[] for _ in range(self.s)]
for i in range(0, self.s):
if self.scenario == 'incomplete':
# Checking NaNs
X_new = np.zeros((1, X[i].size))
X_new[0, np.flatnonzero(np.isnan(X[i]))] = 1
self.X_nan[i] = np.reshape(X_new,(self.N, self.d[i]))
self.N_clean[i] = np.sum(~np.isnan(X[i]),axis=0)
#loading matrices
self.means_w[i] = np.zeros((self.d[i], self.k))
self.sigma_w[i] = np.zeros((self.k,self.k,self.d[i]))
#ARD parameters
self.a_alpha[i] = self.a0_alpha + self.d[i]/2.0
self.b_alpha[i] = np.ones((1, self.k))
self.E_alpha[i] = self.a_alpha[i] / self.b_alpha[i]
#noise parameters
if self.scenario == 'incomplete':
self.a_tau[i] = self.a0_tau + (self.N_clean[i])/2
else:
self.a_tau[i] = self.a0_tau + (self.N) * np.ones((1,self.d[i]))/2
self.b_tau[i] = np.zeros((1, self.d[i]))
self.E_tau[i] = 1000.0 * np.ones((1, self.d[i]))
#ELBO constant
if self.scenario == 'complete':
self.L_const[i] = -0.5 * self.N * self.d[i] * np.log(2*np.pi)
else:
self.L_const[i] = -0.5 * np.sum(self.N_clean[i]) * np.log(2*np.pi)
# Rotation parameters
self.DoRotation = True
def update_w(self, X):
"""
Update the variational parameters of the loading matrices.
Parameters
----------
X : list
List of arrays containing the data matrix of each group.
"""
self.sum_sigmaW = [np.zeros((self.k,self.k)) for _ in range(self.s)]
for i in range(0, self.s):
self.Lqw[i] = np.zeros((1, self.d[i]))
if self.scenario == 'complete':
S1 = self.sum_sigmaZ + np.dot(self.means_z.T,self.means_z)
S2 = np.dot(X[i].T,self.means_z)
for j in range(0, self.d[i]):
# Update covariance matrices of Ws
self.sigma_w[i][:,:,j] = np.diag(self.E_alpha[i]) + \
self.E_tau[i][0,j] * S1
cho = np.linalg.cholesky(self.sigma_w[i][:,:,j])
invCho = np.linalg.inv(cho)
self.sigma_w[i][:,:,j] = np.dot(invCho.T,invCho)
self.sum_sigmaW[i] += self.sigma_w[i][:,:,j]
# Update expectations of Ws
self.means_w[i][j,:] = np.dot(S2[j,:],self.sigma_w[i][:,:,j]) * \
self.E_tau[i][0,j]
# Compute determinant for ELBO
self.Lqw[i][0,j] = -2 * np.sum(np.log(np.diag(cho)))
else:
for j in range(0, self.d[i]):
samples = np.array(np.where(self.X_nan[i][:,j] == 0))
x = np.reshape(X[i][samples[0,:], j],(1, samples.shape[1]))
Z = np.reshape(self.means_z[samples[0,:],:],(samples.shape[1],self.k))
S1 = self.sum_sigmaZ + np.dot(Z.T,Z)
S2 = np.dot(x,Z)
# Update covariance matrices of Ws
self.sigma_w[i][:,:,j] = np.diag(self.E_alpha[i]) + \
self.E_tau[i][0,j] * S1
cho = np.linalg.cholesky(self.sigma_w[i][:,:,j])
invCho = np.linalg.inv(cho)
self.sigma_w[i][:,:,j] = np.dot(invCho.T,invCho)
self.sum_sigmaW[i] += self.sigma_w[i][:,:,j]
# Update expectations of Ws
self.means_w[i][j,:] = np.dot(S2,self.sigma_w[i][:,:,j]) * \
self.E_tau[i][0,j]
# Compute determinant for ELBO
self.Lqw[i][0,j] = -2 * np.sum(np.log(np.diag(cho)))
# Calculate E[W^T W]
self.E_WW[i] = self.sum_sigmaW[i] + \
np.dot(self.means_w[i].T, self.means_w[i])
def update_z(self, X):
"""
Update the variational parameters of the latent variables.
Parameters
----------
X : list
List of arrays containing the data matrix of each group.
"""
self.means_z = self.means_z * 0
if self.scenario == 'complete':
# Update covariance matrix of Z
self.sigma_z = np.identity(self.k)
for i in range(0, self.s):
for j in range(0, self.d[i]):
w = np.reshape(self.means_w[i][j,:], (1,self.k))
ww = self.sigma_w[i][:,:, j] + np.dot(w.T, w)
self.sigma_z += ww * self.E_tau[i][0,j]
#efficient way of computing sigmaZ
cho = np.linalg.cholesky(self.sigma_z)
invCho = np.linalg.inv(cho)
self.sigma_z = np.dot(invCho.T,invCho)
self.sum_sigmaZ = self.N * self.sigma_z
# Compute determinant for ELBO
self.Lqz = -2 * np.sum(np.log(np.diag(cho)))
# Update expectations of Z
self.means_z = self.means_z * 0
for i in range(0, self.s):
for j in range(0, self.d[i]):
x = np.reshape(X[i][:, j],(self.N,1))
w = np.reshape(self.means_w[i][j,:], (1,self.k))
self.means_z += np.dot(x, w) * self.E_tau[i][0,j]
self.means_z = np.dot(self.means_z, self.sigma_z)
else:
self.sigma_z = np.zeros((self.k,self.k,self.N))
self.sum_sigmaZ = np.zeros((self.k,self.k))
self.Lqz = np.zeros((1, self.N))
for n in range(0, self.N):
self.sigma_z[:,:,n] = np.identity(self.k)
S1 = np.zeros((1,self.k))
for i in range(0, self.s):
dim = np.array(np.where(self.X_nan[i][n,:] == 0))
for j in range(dim.shape[1]):
w = np.reshape(self.means_w[i][dim[0,j],:], (1,self.k))
ww = self.sigma_w[i][:,:, dim[0,j]] + np.dot(w.T, w)
self.sigma_z[:,:,n] += ww * self.E_tau[i][0,dim[0,j]]
x = np.reshape(X[i][n, dim[0,:]],(1, dim.size))
tau = np.reshape(self.E_tau[i][0,dim[0,:]],(1, dim.size))
S1 += np.dot(x, np.diag(tau[0])).dot(self.means_w[i][dim[0,:],:])
# Update covariance matrix of Z
cho = np.linalg.cholesky(self.sigma_z[:,:,n])
invCho = np.linalg.inv(cho)
self.sigma_z[:,:,n] = np.dot(invCho.T,invCho)
self.sum_sigmaZ += self.sigma_z[:,:,n]
# Update expectations of Z
self.means_z[n,:] = np.dot(S1, self.sigma_z[:,:,n])
# Compute determinant for ELBO
self.Lqz[0,n] = -2 * np.sum(np.log(np.diag(cho)))
# Calculate E[Z^T Z]
self.E_zz = self.sum_sigmaZ + np.dot(self.means_z.T, self.means_z)
def update_alpha(self):
"""
Update the variational parameters of the alphas.
Parameters
----------
X : list
List of arrays containing the data matrix of each group.
"""
for i in range(0, self.s):
# Update b_alpha
self.b_alpha[i] = self.b0_alpha + np.diag(self.E_WW[i])/2
# Update expectation of alpha
self.E_alpha[i] = self.a_alpha[i] / self.b_alpha[i]
def update_tau(self, X):
"""
Update the variational parameters of the taus.
Parameters
----------
X : list
List of arrays containing the data matrix of each group.
"""
for i in range(0, self.s):
for j in range(0, self.d[i]):
if self.scenario == 'complete':
w = np.reshape(self.means_w[i][j,:], (1,self.k))
ww = self.sigma_w[i][:,:, j] + np.dot(w.T, w)
x = np.reshape(X[i][:, j],(self.N,1))
z = self.means_z
ZZ = self.E_zz
else:
samples = np.array(np.where(self.X_nan[i][:,j] == 0))
w = np.reshape(self.means_w[i][j,:], (1,self.k))
ww = self.sigma_w[i][:,:,j] + np.dot(w.T,w)
x = np.reshape(X[i][samples[0,:],j],(samples.size,1))
z = np.reshape(self.means_z[samples[0,:],:],(samples.size,self.k))
sum_covZ = np.sum(self.sigma_z[:,:,samples[0,:]],axis=2)
ZZ = sum_covZ + np.dot(z.T,z)
# Update b_tau
self.b_tau[i][0,j] = self.b0_tau + 0.5 * (np.dot(x.T,x) + \
np.trace(
|
np.dot(ww, ZZ)
|
numpy.dot
|
#!/usr/bin/python
import sys
import os
import numpy as np
import logging
import mmap
import copy
from parsevasp import constants
from parsevasp import utils
from parsevasp.base import BaseParser
from lxml import etree
# Try to import lxml, if not present fall back to
# intrinsic ElementTree
lxml = False
try:
from lxml import etree
lxml = True
except ImportError:
try:
# Python 2.5
import xml.etree.cElementTree as etree
except ImportError:
try:
# Python 2.5
import xml.etree.ElementTree as etree
except ImportError:
try:
# normal cElementTree
import cElementTree as etree
except ImportError:
try:
# normal ElementTree
import elementtree.ElementTree as etree
except ImportError:
logging.error('Failed to import ElementTree.')
sys.exit('Failed to import ElementTree.')
_SUPPORTED_TOTAL_ENERGIES = {'energy_extrapolated': 'e_0_energy',
'energy_free': 'e_fr_energy',
'energy_no_entropy': 'e_wo_entrp'}
class Xml(BaseParser):
ERROR_MULTIPLE_ENTRIES = 500
ERROR_NO_SPECIES = 501
ERROR_NO_ISPIN = 502
ERROR_NO_NBANDS = 503
ERROR_NO_KPOINTS = 504
ERROR_MISMATCH_KPOINTS_NBANDS = 505
ERROR_UNKNOWN_ELEMENT = 506
ERROR_UNSUPPORTED_STATUS = 507
ERROR_NO_SIZE = 508
ERROR_OVERFLOW = 509
BaseParser.ERROR_MESSAGES.update({
ERROR_NO_SPECIES:
'Please extract the species first.',
ERROR_MULTIPLE_ENTRIES:
'Multiple entries of were located.',
ERROR_NO_ISPIN:
'Please extract ISPIN first.',
ERROR_NO_NBANDS:
'Please extract NBANDS first.',
ERROR_NO_KPOINTS:
'Please extract the kpoints first.',
ERROR_MISMATCH_KPOINTS_NBANDS:
'The number of kpoints and bands for the entries does not match the number of '
'located kpoints and NBANDS.',
ERROR_UNKNOWN_ELEMENT:
'There is an atomic element present in the XML file that is unknown.',
ERROR_UNSUPPORTED_STATUS:
'The supplied status is not supported.',
ERROR_NO_SIZE:
'Can not calculate size.',
ERROR_OVERFLOW:
'Overflow detected in the XML file.'
})
ERROR_MESSAGES = BaseParser.ERROR_MESSAGES
def __init__(self,
file_path=None,
file_handler=None,
k_before_band=False,
extract_all=True,
logger=None,
event=False):
"""Initialize the XmlParser by first trying the lxml and
fall back to the standard ElementTree if that is not present.
Parameters
----------
k_before_band : bool
If True the kpoint index runs before the bands
index.
extract_all : bool
Extract data from all calculation (i.e. ionic steps)
event : bool
If True, force event based method.
Notes
-----
lxml should be used and is required for large files
"""
super(Xml, self).__init__(file_path=file_path,
file_handler=file_handler,
logger=logger)
self._sizecutoff = 500
self._event = event
if self._file_path is None and self._file_handler is None:
self._logger.error(
self.ERROR_MESSAGES[self.ERROR_ONLY_ONE_ARGUMENT])
sys.exit(self.ERROR_ONLY_ONE_ARGUMENT)
# extract data from all calculations (e.g. ionic steps)
self._extract_all = extract_all
# kpoint index before band index (for instance for the ordering
# of the eigenvalue data etc.)?
self._k_before_band = k_before_band
# version
self._version = None
# dictionaries that contain the output of the parsing
self._parameters = {
'symprec': None,
'ismear': None,
'sigma': None,
'ispin': None,
'nbands': None,
'nelect': None,
'system': None,
'nelm': None,
'nsw': None,
}
self._lattice = {
'unitcell': None,
'species': None,
'positions': None,
'kpoints': None,
'kpointsw': None,
'kpointdiv': None
}
self._data = {
'eigenvalues': None,
'eigenvalues_specific': None,
'eigenvelocities': None,
'kpoints': None,
'kpointsw': None,
'occupancies': None,
'dos': None,
'dos_specific': None,
'totens': None,
'forces': None,
'stress': None,
'dielectrics': None,
'projectors': None,
'hessian': None,
'dynmat': None,
'born': None
}
if lxml:
self._logger.info('We are utilizing lxml!')
else:
self._logger.info('We are not uitilizing lxml!')
# parse parse parse
self._parse()
@property
def truncated(self):
"""Return True of the xml parsed is truncated."""
return self._xml_recover
def _parse(self):
"""Perform the actual parsing
Parameters
----------
None
Returns
-------
None
"""
# Check size of the XML file. For large files we need to
# perform event driven parsing. For smaller files this is
# not necessary and is too slow.
file_size = self._file_size()
if file_size is None:
return None
# Do a quick check to see if the XML file is not truncated
self._xml_recover = self._check_xml()
if ((file_size < self._sizecutoff) or self._xml_recover) and \
not self._event:
# run regular method (loads file into memory) and
# enable recovery mode if necessary
self._parsew(self._xml_recover)
else:
# event based, saves a bit of memory
self._parsee()
def _parsew(self, xml_recover):
"""Performs parsing on the whole XML files. For smaller files
"""
self._logger.debug('Running parsew.')
# now open the complete file, we have already checked its presence when we checked the recover.
if self._file_handler is None:
filer = self._file_path
else:
filer = self._file_handler
# make sure we enable the recovery mode
# pretty sure there is a performance bottleneck running this
# enabled at all times, so consider to add check for
# truncated XML files and then enable
if lxml and xml_recover:
if xml_recover:
self._logger.debug('Running LXML in recovery mode.')
parser = etree.XMLParser(recover=True)
vaspxml = etree.parse(filer, parser=parser)
else:
vaspxml = etree.parse(filer)
# do we want to extract data from all calculations (e.g. ionic steps)
extract_all = self._extract_all
# let us start to parse the content
self._version = self._fetch_versionw(vaspxml)
self._parameters['symprec'] = self._fetch_symprecw(vaspxml)
self._parameters['sigma'] = self._fetch_sigmaw(vaspxml)
self._parameters['ismear'] = self._fetch_ismearw(vaspxml)
self._parameters['ispin'] = self._fetch_ispinw(vaspxml)
self._parameters['nbands'] = self._fetch_nbandsw(vaspxml)
self._parameters['nelect'] = self._fetch_nelectw(vaspxml)
self._parameters['system'] = self._fetch_systemw(vaspxml)
self._parameters['nelm'] = self._fetch_nelmw(vaspxml)
self._parameters['nsw'] = self._fetch_nsww(vaspxml)
self._lattice['species'] = self._fetch_speciesw(vaspxml)
self._lattice['unitcell'], self._lattice['positions'], \
self._data['forces'], self._data['stress'] = \
self._fetch_upfsw(vaspxml, extract_all=extract_all)
self._lattice['kpoints'] = self._fetch_kpointsw(vaspxml)
self._lattice['kpointsw'] = self._fetch_kpointsww(vaspxml)
self._lattice['kpointdiv'] = self._fetch_kpointdivw(vaspxml)
self._data['eigenvalues'], self._data[
'occupancies'] = self._fetch_eigenvaluesw(vaspxml)
self._data['eigenvalues_specific'] = self._fetch_eigenvalues_specificw(
vaspxml)
self._data['eigenvelocities'] = self._fetch_eigenvelocitiesw(vaspxml)
self._data['dos'], self._data['dos_specific'] = self._fetch_dosw(
vaspxml)
self._data['totens'] = self._fetch_totensw(vaspxml)
self._data['dielectrics'] = self._fetch_dielectricsw(vaspxml)
self._data['projectors'] = self._fetch_projectorsw(vaspxml)
self._data['hessian'] = self._fetch_hessian(vaspxml)
self._data['dynmat'] = self._fetch_dynmatw(vaspxml)
self._data['born'] = self._fetch_bornw(vaspxml)
def _parsee(self):
"""Performs parsing in an event driven fashion on the XML file.
Slower, but suitable for bigger files.
"""
# set logger
self._logger.debug('Running parsee.')
# helper lists
data = []
data2 = []
data3 = []
data4 = []
data5 = []
data6 = []
# dicts
cell = {}
pos = {}
force = {}
stress = {}
dos = {}
totens = {}
dynmat = {}
_dos = {}
_dos2 = {}
# bool to control extraction of content
extract_generator = False
extract_parameters = False
extract_calculation = False
extract_latticedata = False
extract_unitcell = False
extract_positions = False
extract_species = False
extract_kpointdata = False
extract_kpoints = False
extract_kpointsw = False
extract_kpointdiv = False
extract_kpoints_specific = False
extract_kpointsw_specific = False
extract_eigenvalues = False
extract_eigenvalues_specific = False
extract_eigenvalues_spin1 = False
extract_eigenvalues_spin2 = False
extract_eigenvalues_specific_spin1 = False
extract_eigenvalues_specific_spin2 = False
extract_eigenvelocities = False
extract_eigenvelocities_spin1 = False
extract_eigenvelocities_spin2 = False
extract_dos = False
extract_dos_specific = False
extract_total_dos = False
extract_partial_dos = False
extract_dos_ispin1 = False
extract_dos_ispin2 = False
extract_dos_specific_ispin1 = False
extract_dos_specific_ispin2 = False
extract_projected = False
extract_forces = False
extract_stress = False
extract_energies = False
extract_e_0_energy = False
extract_e_fr_energy = False
extract_e_wo_entrp = False
extract_scstep = False
extract_dielectrics = False
extract_eig_proj = False
extract_eig_proj_ispin1 = False
extract_eig_proj_ispin2 = False
extract_dynmat = False
extract_dynmat_eigen = False
extract_hessian = False
extract_born = False
# do we want to extract data from all calculations (e.g. ionic steps)
extract_all = self._extract_all
if self._file_handler is None:
filer = self._file_path
else:
filer = self._file_handler
# index that control the calculation step (e.g. ionic step)
calc = 1
for event, element in etree.iterparse(filer, events=('start', 'end')):
# set extraction points (what to read and when to read it)
# here we also set the relevant data elements when the tags
# close when they contain more than one element
if event == 'start' and element.tag == 'generator':
extract_generator = True
if event == 'end' and element.tag == 'generator':
extract_generator = False
if event == 'start' and element.tag == 'parameters':
extract_parameters = True
if event == 'end' and element.tag == 'parameters':
extract_parameters = False
if event == 'start' and element.tag == 'calculation':
# Instead of needing to check the nested dicts we initialize them here
# so that we can use update for each calculation.
totens[calc] = {}
extract_calculation = True
if event == 'end' and element.tag == 'calculation':
data3 = self._convert_array1D_f(data3)
data4 = self._convert_array1D_f(data4)
data5 = self._convert_array1D_f(data5)
totens[calc].update({'energy_extrapolated': data3,
'energy_free': data4,
'energy_no_entropy': data5
})
data3 = []
data4 = []
data5 = []
# update index for the calculation
calc = calc + 1
extract_calculation = False
if event == 'start' and element.tag == 'array' \
and element.attrib.get('name') == 'atoms':
extract_species = True
if event == 'end' and element.tag == 'array' \
and element.attrib.get('name') == 'atoms':
# only need every other element (element, not atomtype)
self._lattice['species'] = self._convert_species(data[::2])
data = []
extract_species = False
if event == 'start' and element.tag == 'kpoints' and not extract_calculation:
extract_kpointdata = True
if event == 'end' and element.tag == 'kpoints' and not extract_calculation:
extract_kpointdata = False
if event == 'start' and element.tag == 'projected':
extract_projected = True
if event == 'end' and element.tag == 'projected':
extract_projected = False
# now fetch the data
if extract_generator:
try:
if event == 'start' and element.attrib['name'] == 'version':
self._version = element.text
except KeyError:
pass
if extract_parameters:
try:
if event == 'start' and element.attrib['name'] == 'SYMPREC':
self._parameters['symprec'] = self._convert_f(element)
except KeyError:
pass
try:
if event == 'start' and element.attrib['name'] == 'ISPIN':
self._parameters['ispin'] = self._convert_i(element)
except KeyError:
pass
try:
if event == 'start' and element.attrib['name'] == 'ISMEAR':
self._parameters['ismear'] = self._convert_i(element)
except KeyError:
pass
try:
if event == 'start' and element.attrib['name'] == 'SIGMA':
self._parameters['sigma'] = self._convert_f(element)
except KeyError:
pass
try:
if event == 'start' and element.attrib['name'] == 'NBANDS':
self._parameters['nbands'] = self._convert_i(element)
except KeyError:
pass
try:
if event == 'start' and element.attrib['name'] == 'NELECT':
self._parameters['nelect'] = self._convert_f(element)
except KeyError:
pass
try:
if event == 'start' and element.attrib['name'] == 'SYSTEM':
self._parameters['system'] = element.text
except KeyError:
pass
try:
if event == 'start' and element.attrib['name'] == 'NELM' \
and element.getparent().attrib['name'] == 'electronic convergence':
self._parameters['nelm'] = self._convert_i(element)
except KeyError:
pass
try:
if event == 'start' and element.attrib['name'] == 'NSW':
self._parameters['nsw'] = self._convert_i(element)
except KeyError:
pass
if extract_calculation:
# it would be very tempting just to fill the data and disect
# it later, would be faster, but it is not so easy since
# we do not know how many calculations have been performed
# or how many scteps there are per calculation
if event == 'start' and element.tag == 'dos' and element.attrib.get(
'comment') is None:
extract_dos = True
if event == 'end' and element.tag == 'total' and extract_dos:
if data2:
# only store energy for one part as
# this is the same for both
dos_ispin = self._convert_array2D_f(data, 3)
_dos['energy'] = dos_ispin[:, 0]
_dos['total'] = dos_ispin[:, 1]
_dos['integrated'] = dos_ispin[:, 2]
_dos['partial'] = None
dos_ispin = self._convert_array2D_f(data2, 3)
_dos2['total'] = dos_ispin[:, 1]
_dos2['integrated'] = dos_ispin[:, 2]
_dos2['partial'] = None
else:
dos_ispin = self._convert_array2D_f(data, 3)
_dos['energy'] = dos_ispin[:, 0]
_dos['total'] = dos_ispin[:, 1]
_dos['integrated'] = dos_ispin[:, 2]
_dos['partial'] = None
data = []
data2 = []
if event == 'end' and element.tag == 'partial' and extract_dos:
num_atoms = 0
if self._lattice['species'] is not None:
num_atoms = self._lattice['species'].shape[0]
else:
self._logger.error(
self.ERROR_MESSAGES[self.ERROR_NO_SPECIES])
sys.exit(self.ERROR_NO_SPECIES)
if data2:
dos_ispin = self._convert_array2D_f(data, 10)
# do not need the energy term (similar to total)
_dos['partial'] = np.asarray(
np.split(dos_ispin[:, 1:10], num_atoms))
dos_ispin = self._convert_array2D_f(data2, 10)
# do not need the energy term (similar to total)
_dos2['partial'] = np.asarray(
np.split(dos_ispin[:, 1:10], num_atoms))
else:
dos_ispin = self._convert_array2D_f(data, 10)
# do not need the energy term (similar to total)
_dos['partial'] = np.asarray(
np.split(dos_ispin[:, 1:10], num_atoms))
data = []
data2 = []
if event == 'end' and element.tag == 'dos' and extract_dos:
# check the Fermi level
if len(data6) == 1:
fermi_level = self._convert_f(data6[0])
elif len(data6) > 1:
self._logger.error(
self.ERROR_MESSAGES[self.ERROR_MULTIPLE_ENTRIES] +
" The tag in question is 'efermi'.")
sys.exit(self.ERROR_MULTIPLE_ENTRIES)
else:
fermi_level = None
if _dos2:
dos['up'] = _dos
dos['down'] = _dos2
dos['total'] = {
'fermi_level': fermi_level,
'energy': _dos['energy']
}
del dos['up']['energy']
else:
_dos['fermi_level'] = fermi_level
dos['total'] = _dos
self._data['dos'] = copy.deepcopy(dos)
data = []
data2 = []
data6 = []
_dos = {}
_dos2 = {}
extract_dos = False
if event == 'start' and element.tag == 'dos' and element.attrib.get(
'comment') == 'interpolated':
extract_dos_specific = True
if event == 'end' and element.tag == 'total' and extract_dos_specific:
if data2:
# only store energy for one part as
# this is the same for both
dos_ispin = self._convert_array2D_f(data, 3)
_dos['energy'] = dos_ispin[:, 0]
_dos['total'] = dos_ispin[:, 1]
_dos['integrated'] = dos_ispin[:, 2]
_dos['partial'] = None
dos_ispin = self._convert_array2D_f(data2, 3)
_dos2['total'] = dos_ispin[:, 1]
_dos2['integrated'] = dos_ispin[:, 2]
_dos2['partial'] = None
else:
dos_ispin = self._convert_array2D_f(data, 3)
_dos['energy'] = dos_ispin[:, 0]
_dos['total'] = dos_ispin[:, 1]
_dos['integrated'] = dos_ispin[:, 2]
_dos['partial'] = None
data = []
data2 = []
if event == 'end' and element.tag == 'partial' and extract_dos_specific:
num_atoms = 0
if self._lattice['species'] is not None:
num_atoms = self._lattice['species'].shape[0]
else:
self._logger.error(
self.ERROR_MESSAGES[self.ERROR_NO_SPECIES])
sys.exit(self.ERROR_NO_SPECIES)
if data2:
dos_ispin = self._convert_array2D_f(data, 10)
# do not need the energy term (similar to total)
_dos['partial'] = np.asarray(
np.split(dos_ispin[:, 1:10], num_atoms))
dos_ispin = self._convert_array2D_f(data2, 10)
# do not need the energy term (similar to total)
_dos2['partial'] = np.asarray(
np.split(dos_ispin[:, 1:10], num_atoms))
else:
dos_ispin = self._convert_array2D_f(data, 10)
# do not need the energy term (similar to total)
_dos['partial'] = np.asarray(
np.split(dos_ispin[:, 1:10], num_atoms))
data = []
data2 = []
if event == 'end' and element.tag == 'dos' and extract_dos_specific:
# check the Fermi level
if len(data6) == 1:
fermi_level = self._convert_f(data6[0])
elif len(data6) > 1:
self._logger.error(
self.ERROR_MESSAGES[self.ERROR_MULTIPLE_ENTRIES] +
" The tag in question is 'efermi'.")
sys.exit(self.ERROR_MULTIPLE_ENTRIES)
else:
fermi_level = None
if _dos2:
dos['up'] = _dos
dos['down'] = _dos2
dos['total'] = {
'fermi_level': fermi_level,
'energy': _dos['energy']
}
del dos['up']['energy']
else:
_dos['fermi_level'] = fermi_level
dos['total'] = _dos
self._data['dos_specific'] = dos
data = []
data2 = []
data6 = []
_dos = {}
_dos2 = {}
extract_dos_specific = False
if event == 'start' and element.tag == 'structure':
extract_latticedata = True
if event == 'end' and element.tag == 'structure':
extract_latticedata = False
if event == 'start' and element.tag == 'varray' and \
element.attrib['name'] == 'forces':
extract_forces = True
if event == 'end' and element.tag == 'varray' and \
element.attrib['name'] == 'forces':
force[calc] = self._convert_array2D_f(data, 3)
data = []
extract_forces = False
if event == 'start' and element.tag == 'varray' and \
element.attrib['name'] == 'stress':
extract_stress = True
if event == 'end' and element.tag == 'varray' and \
element.attrib['name'] == 'stress':
stress[calc] = self._convert_array2D_f(data, 3)
data = []
extract_stress = False
if event == 'start' and element.tag == 'energy' and not extract_scstep:
extract_energies = True
if event == 'end' and element.tag == 'energy' and not extract_scstep:
extract_energies = False
if event == 'start' and element.tag == 'scstep':
extract_scstep = True
if event == 'end' and element.tag == 'scstep':
extract_scstep = False
if event == 'start' and element.tag == 'eigenvalues' and not extract_eigenvelocities and element.attrib.get(
'comment'
) != 'interpolated' and not extract_eigenvalues_specific:
extract_eigenvalues = True
if event == 'end' and element.tag == 'eigenvalues' and extract_eigenvalues:
num_kpoints = len(self._lattice['kpoints'])
if not data2:
eigenvalues, occupancies = self._extract_eigenvalues(
data, None, num_kpoints)
else:
eigenvalues, occupancies = self._extract_eigenvalues(
data, data2, num_kpoints)
self._data['eigenvalues'] = eigenvalues
self._data['occupancies'] = occupancies
data = []
data2 = []
extract_eigenvalues = False
if event == 'start' and element.tag == 'eigenvalues' and element.attrib.get(
'comment') == 'interpolated':
extract_eigenvalues_specific = True
if event == 'end' and element.tag == 'eigenvalues' and extract_eigenvalues_specific:
num_kpoints = len(self._data['kpoints'])
if not data2:
eigenvalues_specific, _ = self._extract_eigenvalues(
data, None, num_kpoints)
else:
eigenvalues_specific, _ = self._extract_eigenvalues(
data, data2, num_kpoints)
self._data['eigenvalues_specific'] = eigenvalues_specific
data = []
data2 = []
extract_eigenvalues_specific = False
if event == 'start' and element.tag == 'eigenvelocities':
extract_eigenvelocities = True
if event == 'end' and element.tag == 'eigenvelocities':
num_kpoints = len(self._data['kpoints'])
if not data2:
eigenvelocities = self._extract_eigenvelocities(
data, None, num_kpoints)
else:
eigenvelocities = self._extract_eigenvelocities(
data, data2, num_kpoints)
self._data['eigenvelocities'] = eigenvelocities
data = []
data2 = []
extract_eigenvelocities = False
if event == 'start' and element.tag == 'dielectricfunction':
extract_dielectrics = True
if event == 'end' and element.tag == 'dielectricfunction':
_diel = {}
diel = np.split(self._convert_array2D_f(data, 7), 2)
_diel['energy'] = diel[0][:, 0]
_diel['imag'] = diel[0][:, 1:7]
_diel['real'] = diel[1][:, 1:7]
self._data['dielectrics'] = _diel
data = []
extract_dielectrics = False
if event == 'start' and element.tag == 'dynmat':
extract_dynmat = True
if event == 'end' and element.tag == 'dynmat':
self._data['dynmat'] = dynmat
extract_dynmat = False
if event == 'start' and element.tag == 'array':
# a bit of special threatment here as there is
# an array element without attributes, so we get
# KeyErrors
try:
if element.attrib['name'] == 'born_charges':
extract_born = True
except KeyError:
pass
if event == 'end' and element.tag == 'array':
# again a bit special
try:
if element.attrib['name'] == 'born_charges':
num_atoms = 0
if self._lattice['species'] is not None:
num_atoms = self._lattice['species'].shape[0]
else:
self._logger.error(
self.ERROR_MESSAGES[self.ERROR_NO_SPECIES])
sys.exit(self.ERROR_NO_SPECIES)
data = self._convert_array2D_f(data, 3)
data = np.split(data, num_atoms)
self._data['born'] = np.asarray(data)
data = []
extract_born = False
except KeyError:
pass
# now extract data
if extract_scstep:
# extrapolated energy
if event == 'start' and element.tag == 'i' and \
element.attrib['name'] == 'e_0_energy':
extract_e_0_energy = True
if event == 'end' and element.tag == 'i' and \
element.attrib['name'] == 'e_0_energy':
extract_e_0_energy = False
if extract_e_0_energy:
data3.append(element)
# free energy
if event == 'start' and element.tag == 'i' and \
element.attrib['name'] == 'e_fr_energy':
extract_e_fr_energy = True
if event == 'end' and element.tag == 'i' and \
element.attrib['name'] == 'e_fr_energy':
extract_e_fr_energy = False
if extract_e_fr_energy:
data4.append(element)
# energy without entropy
if event == 'start' and element.tag == 'i' and \
element.attrib['name'] == 'e_wo_entrp':
extract_e_wo_entrp = True
if event == 'end' and element.tag == 'i' and \
element.attrib['name'] == 'e_wo_entrp':
extract_e_wo_entrp = False
if extract_e_wo_entrp:
data5.append(element)
if extract_latticedata:
if event == 'start' and element.tag == 'varray' \
and element.attrib.get('name') == 'basis':
extract_unitcell = True
if event == 'end' and element.tag == 'varray' \
and element.attrib.get('name') == 'basis':
cell[calc] = self._convert_array2D_f(data, 3)
data = []
extract_unitcell = False
if event == 'start' and element.tag == 'varray' \
and element.attrib.get('name') == 'positions':
extract_positions = True
if event == 'end' and element.tag == 'varray' \
and element.attrib.get('name') == 'positions':
pos[calc] = self._convert_array2D_f(data, 3)
data = []
extract_positions = False
if extract_unitcell:
if event == 'start' and element.tag == 'v':
data.append(element)
if extract_positions:
if event == 'start' and element.tag == 'v':
data.append(element)
if extract_forces:
if event == 'start' and element.tag == 'v':
data.append(element)
if extract_stress:
if event == 'start' and element.tag == 'v':
data.append(element)
if extract_energies:
# extrapolated energy
if event == 'start' and element.tag == 'i' and \
element.attrib['name'] == 'e_0_energy':
totens[calc].update({'energy_extrapolated_final': float(element.text)})
# free energy
if event == 'start' and element.tag == 'i' and \
element.attrib['name'] == 'e_fr_energy':
totens[calc].update({'energy_free_final': float(element.text)})
# energy without entropy
if event == 'start' and element.tag == 'i' and \
element.attrib['name'] == 'e_wo_entrp':
totens[calc].update({'energy_no_entropy_final': float(element.text)})
if extract_eigenvalues:
if event == 'start' and element.tag == 'set' \
and element.attrib.get('comment') == 'spin 1':
extract_eigenvalues_spin1 = True
if event == 'end' and element.tag == 'set' \
and element.attrib.get('comment') == 'spin 1':
extract_eigenvalues_spin1 = False
if event == 'start' and element.tag == 'set' \
and element.attrib.get('comment') == 'spin 2':
extract_eigenvalues_spin2 = True
if event == 'end' and element.tag == 'set' \
and element.attrib.get('comment') == 'spin 2':
extract_eigenvalues_spin2 = False
if extract_eigenvalues_spin1:
if event == 'start' and element.tag == 'r':
data.append(element)
if extract_eigenvalues_spin2:
if event == 'start' and element.tag == 'r':
data2.append(element)
if extract_eigenvalues_specific:
if event == 'start' and element.tag == 'varray' \
and element.attrib.get('name') == 'kpointlist':
extract_kpoints_specific = True
if event == 'end' and element.tag == 'varray' \
and element.attrib.get('name') == 'kpointlist':
self._data['kpoints'] = self._convert_array2D_f(
data, 3)
data = []
extract_kpoints_specific = False
if event == 'start' and element.tag == 'varray' \
and element.attrib.get('name') == 'weights':
extract_kpointsw_specific = True
if event == 'end' and element.tag == 'varray' \
and element.attrib.get('name') == 'weights':
self._data['kpointsw'] = self._convert_array1D_f(data)
data = []
extract_kpointsw_specific = False
if event == 'start' and element.tag == 'set' \
and element.attrib.get('comment') == 'spin 1':
extract_eigenvalues_spin1 = True
if event == 'end' and element.tag == 'set' \
and element.attrib.get('comment') == 'spin 1':
extract_eigenvalues_spin1 = False
if event == 'start' and element.tag == 'set' \
and element.attrib.get('comment') == 'spin 2':
extract_eigenvalues_spin2 = True
if event == 'end' and element.tag == 'set' \
and element.attrib.get('comment') == 'spin 2':
extract_eigenvalues_spin2 = False
if extract_kpoints_specific:
if event == 'start' and element.tag == 'v':
data.append(element)
if extract_kpointsw_specific:
if event == 'start' and element.tag == 'v':
data.append(element)
if extract_eigenvalues_spin1:
if event == 'start' and element.tag == 'r':
data.append(element)
if extract_eigenvalues_spin2:
if event == 'start' and element.tag == 'r':
data2.append(element)
if extract_eigenvelocities:
if event == 'start' and element.tag == 'varray' \
and element.attrib.get('name') == 'kpointlist':
extract_kpoints_specific = True
if event == 'end' and element.tag == 'varray' \
and element.attrib.get('name') == 'kpointlist':
self._data['kpoints'] = self._convert_array2D_f(
data, 3)
data = []
extract_kpoints_specific = False
if event == 'start' and element.tag == 'varray' \
and element.attrib.get('name') == 'weights':
extract_kpointsw_specific = True
if event == 'end' and element.tag == 'varray' \
and element.attrib.get('name') == 'weights':
self._data['kpointsw'] = self._convert_array1D_f(data)
data = []
extract_kpointsw_specific = False
if event == 'start' and element.tag == 'set' \
and element.attrib.get('comment') == 'spin 1':
extract_eigenvelocities_spin1 = True
if event == 'end' and element.tag == 'set' \
and element.attrib.get('comment') == 'spin 1':
extract_eigenvelocities_spin1 = False
if event == 'start' and element.tag == 'set' \
and element.attrib.get('comment') == 'spin 2':
extract_eigenvelocities_spin2 = True
if event == 'end' and element.tag == 'set' \
and element.attrib.get('comment') == 'spin 2':
extract_eigenvelocities_spin2 = False
if extract_kpoints_specific:
if event == 'start' and element.tag == 'v':
data.append(element)
if extract_kpointsw_specific:
if event == 'start' and element.tag == 'v':
data.append(element)
if extract_eigenvelocities_spin1:
if event == 'start' and element.tag == 'r':
data.append(element)
if extract_eigenvelocities_spin2:
if event == 'start' and element.tag == 'r':
data2.append(element)
if extract_dielectrics:
if event == 'start' and element.tag == 'r':
data.append(element)
if extract_projected:
# make sure we skip the first entry containing
# the eigenvalues (already stored at this point)
if event == 'end' and element.tag == 'eigenvalues':
extract_eig_proj = True
if event == 'end' and element.tag == 'array' and \
extract_eig_proj:
if not data2:
projectors = self._extract_projectors(data, None)
else:
projectors = self._extract_projectors(data, data2)
self._data['projectors'] = projectors
data = []
data2 = []
extract_eig_proj = False
if extract_eig_proj:
if event == 'start' and element.tag == 'set' \
and element.attrib.get('comment') == 'spin1':
extract_eig_proj_ispin1 = True
if event == 'end' and element.tag == 'set' \
and element.attrib.get('comment') == 'spin1':
extract_eig_proj_ispin1 = False
if event == 'start' and element.tag == 'set' \
and element.attrib.get('comment') == 'spin2':
extract_eig_proj_ispin2 = True
if event == 'end' and element.tag == 'set' \
and element.attrib.get('comment') == 'spin2':
extract_eig_proj_ispin2 = False
if extract_eig_proj_ispin1:
if event == 'start' and element.tag == 'r':
data.append(element)
if extract_eig_proj_ispin2:
if event == 'start' and element.tag == 'r':
data2.append(element)
if extract_dynmat:
if event == 'start' and element.tag == 'varray' \
and element.attrib.get('name') == 'hessian':
extract_hessian = True
if event == 'end' and element.tag == 'varray' \
and element.attrib.get('name') == 'hessian':
num_atoms = 0
if self._lattice['species'] is not None:
num_atoms = self._lattice['species'].shape[0]
else:
self._logger.error(
self.ERROR_MESSAGES[self.ERROR_NO_SPECIES])
sys.exit(self.ERROR_NO_SPECIES)
hessian = self._convert_array2D_f(data, num_atoms * 3)
self._data['hessian'] = hessian
data = []
extract_hessian = False
if event == 'start' and element.tag == 'varray' \
and element.attrib.get('name') == 'eigenvectors':
extract_dynmat_eigen = True
if event == 'end' and element.tag == 'varray' \
and element.attrib.get('name') == 'eigenvectors':
num_atoms = 0
if self._lattice['species'] is not None:
num_atoms = self._lattice['species'].shape[0]
else:
self._logger.error(
self.ERROR_MESSAGES[self.ERROR_NO_SPECIES])
sys.exit(self.ERROR_NO_SPECIES)
eigenvec = self._convert_array2D_f(data, num_atoms * 3)
dynmat['eigenvectors'] = eigenvec
data = []
extract_dynmat_eigen = False
if extract_hessian:
if event == 'start' and element.tag == 'v':
data.append(element)
if extract_dynmat_eigen:
if event == 'start' and element.tag == 'v':
data.append(element)
try:
if event == 'start' and \
element.attrib['name'] == 'eigenvalues':
dynmat['eigenvalues'] = self._convert_array_f(
element)
except KeyError:
pass
if extract_born:
if event == 'start' and element.tag == 'v':
data.append(element)
if extract_species:
if event == 'start' and element.tag == 'c':
data.append(element)
if extract_kpointdata:
try:
if event == 'start' and element.tag == 'v' and \
element.attrib['name'] == 'divisions':
self._lattice['kpointdiv'] = self._convert_array_i(
element)
except KeyError:
pass
if event == 'start' and element.tag == 'varray' \
and element.attrib.get('name') == 'kpointlist':
extract_kpoints = True
if event == 'end' and element.tag == 'varray' \
and element.attrib.get('name') == 'kpointlist':
self._lattice['kpoints'] = self._convert_array2D_f(data, 3)
data = []
extract_kpoints = False
if event == 'start' and element.tag == 'varray' \
and element.attrib.get('name') == 'weights':
extract_kpointsw = True
if event == 'end' and element.tag == 'varray' \
and element.attrib.get('name') == 'weights':
self._lattice['kpointsw'] = self._convert_array1D_f(data)
data = []
extract_kpointsw = False
if extract_kpoints:
if event == 'start' and element.tag == 'v':
data.append(element)
if extract_kpointsw:
if event == 'start' and element.tag == 'v':
data.append(element)
if extract_dos:
if event == 'start' and element.tag == 'i' and \
element.attrib.get('name') == 'efermi':
data6.append(element)
if event == 'start' and element.tag == 'set' \
and element.attrib.get('comment') == 'spin 1':
extract_dos_ispin1 = True
if event == 'end' and element.tag == 'set' \
and element.attrib.get('comment') == 'spin 1':
extract_dos_ispin1 = False
if event == 'start' and element.tag == 'set' \
and element.attrib.get('comment') == 'spin 2':
extract_dos_ispin2 = True
if event == 'end' and element.tag == 'set' \
and element.attrib.get('comment') == 'spin 2':
extract_dos_ispin2 = False
if extract_dos_ispin1:
if event == 'start' and element.tag == 'r':
data.append(element)
if extract_dos_ispin2:
if event == 'start' and element.tag == 'r':
data2.append(element)
if extract_dos_specific:
if event == 'start' and element.tag == 'i' and \
element.attrib.get('name') == 'efermi':
data6.append(element)
if event == 'start' and element.tag == 'set' \
and element.attrib.get('comment') == 'spin 1':
extract_dos_specific_ispin1 = True
if event == 'end' and element.tag == 'set' \
and element.attrib.get('comment') == 'spin 1':
extract_dos_specific_ispin1 = False
if event == 'start' and element.tag == 'set' \
and element.attrib.get('comment') == 'spin 2':
extract_dos_specific_ispin2 = True
if event == 'end' and element.tag == 'set' \
and element.attrib.get('comment') == 'spin 2':
extract_dos_specific_ispin2 = False
if extract_dos_specific_ispin1:
if event == 'start' and element.tag == 'r':
data.append(element)
if extract_dos_specific_ispin2:
if event == 'start' and element.tag == 'r':
data2.append(element)
# now we need to update some elements
# for static runs, initial is equal to last
# if cell:
# if len(cell) == 1:
# cell[2] = cell[1]
# if pos:
# if len(pos) == 1:
# pos[2] = pos[1]
# if force:
# if len(force) == 1:
# force[2] = force[1]
# if stress:
# if len(stress) == 1:
# stress[2] = stress[1]
# if totens:
# if len(totens) == 1:
# totens[2] = totens[1]
# if not extract_all:
# # only save initial and last
# if cell:
# cell = {key: np.asarray(cell[key]) for key in {1, 2}}
# if pos:
# pos = {key: np.asarray(pos[key]) for key in {1, 2}}
# if force:
# force = {key: force[key] for key in {1, 2}}
# if stress:
# stress = {key: stress[key] for key in {1, 2}}
# if totens:
# totens = {key: totens[key] for key in {1, 2}}
# if any dict is empty, set to zero
if not cell:
cell = None
if not pos:
pos = None
if not force:
force = None
if not stress:
stress = None
if not totens:
totens = None
# store
self._lattice['unitcell'] = cell
self._lattice['positions'] = pos
self._data['forces'] = force
self._data['stress'] = stress
self._data['totens'] = totens
return
def _fetch_versionw(self, xml):
"""Fetch and set version using etree
Parameters
----------
xml : object
An ElementTree object to be used for parsing.
Returns
-------
version : string
If version is found it is returned.
Notes
-----
Used when detecting the VASP version.
"""
entry = self._find(
xml, './/generator/i[@name="version"]')
if entry is None:
return None
return entry.text
def _fetch_symprecw(self, xml):
"""Fetch and set symprec using etree
Parameters
----------
xml : object
An ElementTree object to be used for parsing.
Returns
-------
symprec : float
If SYMPREC is found it is returned.
Notes
-----
Used when detecting symmetry.
"""
entry = self._find(
xml, './/parameters/separator[@name="symmetry"]/'
'i[@name="SYMPREC"]')
if entry is None:
return None
symprec = self._convert_f(entry)
return symprec
def _fetch_sigmaw(self, xml):
"""Fetch and set sigma using etree
Parameters
----------
xml : object
An ElementTree object to be used for parsing.
Returns
-------
sigma : float
If SIGMA is found it is returned.
Notes
-----
Determines the smearing used etc.
"""
entry = self._find(
xml, './/parameters/separator[@name="electronic"]/'
'separator[@name="electronic smearing"]/'
'i[@name="SIGMA"]')
if entry is None:
return None
sigma = self._convert_f(entry)
return sigma
def _fetch_nelmw(self, xml):
"""Fetch and set nelm using etree
Parameters
----------
xml : object
An ElementTree object to be used for parsing.
Returns
-------
nelm : float
If NELM is found it is returned.
Notes
-----
Maximum number of eletronic steps. This is needed for checking if the eletronic
structure is converged.
"""
entry = self._find(
xml, './/parameters/separator[@name="electronic"]/'
'separator[@name="electronic convergence"]/'
'i[@name="NELM"]')
if entry is None:
return None
nelm = self._convert_i(entry)
return nelm
def _fetch_nsww(self, xml):
"""Fetch and set nsw using etree
Parameters
----------
xml : object
An ElementTree object to be used for parsing.
Returns
-------
nsw : int
If NSW is found it is returned.
Notes
-----
Maximum number of eletronic steps. This is needed for checking ionic convergence.
"""
entry = self._find(
xml, './/parameters/separator[@name="ionic"]/'
'i[@name="NSW"]')
if entry is None:
return None
nsw = self._convert_i(entry)
return nsw
def _fetch_ispinw(self, xml):
"""Fetch and set ispin using etree
Parameters
----------
xml : object
An ElementTree object to be used for parsing.
Returns
-------
ispin : int
If ISPIN is found it is returned.
Notes
-----
Determines if spin is included. ISPIN=2 separates the spins.
"""
entry = self._find(
xml, './/parameters/separator[@name="electronic"]/'
'separator[@name="electronic spin"]/'
'i[@name="ISPIN"]')
if entry is None:
return None
ispin = self._convert_i(entry)
return ispin
def _fetch_ismearw(self, xml):
"""Fetch and set ismear using etree
Parameters
----------
xml : object
An ElementTree object to be used for parsing.
Returns
-------
ismear : int
If ISMEAR is found it is returned.
Notes
-----
Determines which smearing factor is used on the electrons.
"""
entry = self._find(
xml, './/parameters/separator[@name="electronic"]/'
'separator[@name="electronic smearing"]/'
'i[@name="ISMEAR"]')
if entry is None:
return None
ismear = self._convert_i(entry)
return ismear
def _fetch_nbandsw(self, xml):
"""Fetch and set nbands using etree
Parameters
----------
xml : object
An ElementTree object to be used for parsing.
Returns
-------
nbands : int
If NBANDS is found it is returned.
Notes
-----
The number of bands used in the calculation.
"""
entry = self._find(
xml, './/parameters/separator[@name="electronic"]/'
'i[@name="NBANDS"]')
if entry is None:
return None
nbands = self._convert_i(entry)
return nbands
def _fetch_nelectw(self, xml):
"""Fetch and set nelect using etree.
Parameters
----------
xml : object
An ElementTree object to be used for parsing.
Returns
-------
nelect : float
If NELECT is found it is returned.
Notes
-----
The number of electrons used in the calculation.
"""
entry = self._find(
xml, './/parameters/separator[@name="electronic"]/'
'i[@name="NELECT"]')
if entry is None:
return None
nelect = self._convert_f(entry)
return nelect
def _fetch_systemw(self, xml):
"""Fetch and set system using etree.
Parameters
----------
xml : object
An ElementTree object to be used for parsing.
Returns
-------
system : string
If SYSTEM is found it is returned.
Notes
-----
A comment that can be specified in the INCAR file.
"""
entry = self._find(
xml, './/parameters/separator[@name="general"]/'
'i[@name="SYSTEM"]')
if entry is None:
return None
system = entry.text
return system
def _fetch_bornw(self, xml):
"""Fetch the Born effetive charges.
Parameters
----------
xml : object
An ElementTree object to be used for parsing.
Returns
-------
born : ndarray
A ndarray containing the born effective charge
tensor for each atom.
"""
entry = self._findall(
xml, './/calculation/array[@name="born_charges"]/'
'set/v')
if entry is None:
return None
num_atoms = 0
species = self._lattice['species']
if species is None:
# Try to fetch species again, if still none, we cannot parse it
# and thus we cannot parse the entries in here either.
species = self._fetch_speciesw(xml)
if species is None:
return None, None, None, None
num_atoms = species.shape[0]
born = self._convert_array2D_f(entry, 3)
born = np.asarray(np.split(born, num_atoms))
return born
def _fetch_upfsw(self, xml, extract_all=False):
"""Fetch the unitcell, atomic positions, force and stress.
Parameters
----------
xml : object
An ElementTree object to be used for parsing.
extract_all : bool
Determines which unitcell and positions to get.
Defaults to the initial and last. If True, extract all.
Returns
-------
cell, pos, force, stress : dict
An dictionary containing ndarrays of the:
| unitcells with the vectors as rows in AA.
| positions with each position as a row in direct coordinates.
| forces where each row is the force in eV/AA on each atom.
| stress where each row is the stress matrix for the unitcell in kB.
"""
cell = {}
pos = {}
force = {}
stress = {}
num_atoms = 0
species = self._lattice['species']
if species is None:
# Try to fetch species again, if still none, we cannot parse it
# and thus we cannot parse the entries in here either.
species = self._fetch_speciesw(xml)
if species is None:
return None, None, None, None
num_atoms = species.shape[0]
if not extract_all:
entry = self._findall(
xml,
'.//structure[@name="finalpos"]/crystal/varray[@name="basis"]/v'
)
if entry is not None:
cell[2] = self._convert_array2D_f(entry, 3)
else:
cell[2] = None
entry = self._findall(
xml,
'.//structure[@name="initialpos"]/crystal/varray[@name="basis"]/v'
)
if entry is not None:
cell[1] = self._convert_array2D_f(entry, 3)
else:
cell[1] = None
entry = self._findall(
xml,
'.//structure[@name="finalpos"]/varray[@name="positions"]/v')
if entry is not None:
pos[2] = self._convert_array2D_f(entry, 3)
else:
pos[2] = None
entry = self._findall(
xml,
'.//structure[@name="initialpos"]/varray[@name="positions"]/v')
if entry is not None:
pos[1] = self._convert_array2D_f(entry, 3)
else:
pos[1] = None
entry = self._findall(xml,
'.//calculation/varray[@name="stress"]/v')
if entry is not None:
stress[1] = self._convert_array2D_f(entry[0:3], 3)
stress[2] = self._convert_array2D_f(entry[-3:], 3)
else:
stress[1] = None
stress[2] = None
entry = self._findall(xml,
'.//calculation/varray[@name="forces"]/v')
if entry is not None:
force[1] = self._convert_array2D_f(entry[0:num_atoms], 3)
force[2] = self._convert_array2D_f(entry[-num_atoms:], 3)
else:
force[1] = None
force[2] = None
else:
structures = self._findall(xml, './/calculation/structure')
entrycell = self._findall(
xml,
'.//calculation/structure/crystal/varray[@name="basis"]/v')
entrypos = self._findall(
xml, './/calculation/structure/varray[@name="positions"]/v')
entryforce = self._findall(
xml, './/calculation/varray[@name="forces"]/v')
entrystress = self._findall(
xml, './/calculation/varray[@name="stress"]/v')
if structures is not None:
num_calcs = len(structures)
else:
return None
num_entrycell = 0
num_entrypos = 0
num_entryforce = 0
num_entrystress = 0
if entrycell is not None:
num_entrycell = len(entrycell)
cell[1] = self._convert_array2D_f(entrycell[0:3], 3)
if num_entrycell > 3:
cell[2] = self._convert_array2D_f(entrycell[-3:], 3)
else:
cell[2] = None
else:
cell[1] = None
cell[2] = None
if entrypos is not None:
num_entrypos = len(entrypos)
pos[1] = self._convert_array2D_f(entrypos[0:num_atoms], 3)
if num_entrypos > 3:
pos[2] = self._convert_array2D_f(entrypos[-num_atoms:], 3)
else:
pos[2] = None
else:
pos[1] = None
pos[2] = None
if entryforce is not None:
num_entryforce = len(entryforce)
force[1] = self._convert_array2D_f(entryforce[0:num_atoms], 3)
if num_entryforce > 3:
force[2] = self._convert_array2D_f(entryforce[-num_atoms:],
3)
else:
force[2] = None
else:
force[1] = None
force[2] = None
if entrystress is not None:
num_entrystress = len(entrystress)
stress[1] = self._convert_array2D_f(entrystress[0:3], 3)
if num_entrystress > 3:
stress[2] = self._convert_array2D_f(entrystress[-3:], 3)
else:
stress[2] = None
else:
stress[1] = None
stress[2] = None
max_entries = max(
num_entrystress,
max(num_entryforce, max(num_entrycell, num_entrypos)))
if max_entries > 6:
for calc in range(1, num_calcs):
basecell = calc * 3
basepos = calc * num_atoms
if entrycell is not None:
cell[calc + 1] = self._convert_array2D_f(
entrycell[basecell:basecell + 3], 3)
if entrypos is not None:
pos[calc + 1] = self._convert_array2D_f(
entrypos[basepos:basepos + num_atoms], 3)
if entryforce is not None:
force[calc + 1] = self._convert_array2D_f(
entryforce[basepos:basepos + num_atoms], 3)
if entrystress is not None:
stress[calc + 1] = self._convert_array2D_f(
entrystress[basecell:basecell + 3], 3)
# If we still only have one entry, or number two is None, last and initial should
# be the same, force them to be similar. We could do this earlier, but this is done
# to keep it tidy, and if one in the future would like to utilize the returned None.
if cell:
if len(cell) == 1 or cell[2] is None:
cell[2] = cell[1]
if pos:
if len(pos) == 1 or pos[2] is None:
pos[2] = pos[1]
if force:
if len(force) == 1 or force[2] is None:
force[2] = force[1]
if stress:
if len(stress) == 1 or stress[2] is None:
stress[2] = stress[1]
return cell, pos, force, stress
def _fetch_speciesw(self, xml):
"""Fetch the atomic species
Parameters
----------
xml : object
An ElementTree object to be used for parsing.
Returns
-------
spec : ndarray
An array containing the atomic species as a number.
Organized in the same order as the atomic positions.
"""
entry = self._findall(xml, './/atominfo/'
'array[@name="atoms"]/set/rc/c')
if entry is None:
return None
spec = self._convert_species(entry[::2])
return spec
def _fetch_hessian(self, xml):
"""Fetch the hessian.
Parameters
----------
xml : object
An ElementTree object to be used for parsing.
Returns
-------
hessian : ndarray
An array containing the Hessian matrix.
"""
entry = self._findall(
xml, './/calculation/dynmat/'
'varray[@name="hessian"]/v')
if entry is None:
return None
species = self._lattice['species']
if species is None:
# Try to fetch species again, if still none, we cannot parse it
# and thus we cannot parse the entries in here either.
species = self._fetch_speciesw(xml)
if species is None:
return None
num_atoms = species.shape[0]
hessian = self._convert_array2D_f(entry, num_atoms * 3)
return hessian
def _fetch_dynmatw(self, xml):
"""Fetch the dynamical matrix data.
Parameters
----------
xml : object
An ElementTree object to be used for parsing.
Returns
-------
dynmat : dict
An dict containing the eigenvalues and eigenvectors.
"""
entry = self._find(xml, './/calculation/dynmat/'
'v[@name="eigenvalues"]')
if entry is None:
return None
species = self._lattice['species']
if species is None:
# Try to fetch species again, if still none, we cannot parse it
# and thus we cannot parse the entries in here either.
species = self._fetch_speciesw(xml)
if species is None:
return None
num_atoms = species.shape[0]
eigenvalues = self._convert_array_f(entry)
entry = self._find(
xml, './/calculation/dynmat/'
'varray[@name="eigenvectors"]')
if entry is None:
return None
eigenvectors = self._convert_array2D_f(entry, num_atoms * 3)
dynmat = {'eigenvalues': eigenvalues, 'eigenvectors': eigenvectors}
return dynmat
def _fetch_kpointsw(self, xml,
path='kpoints/varray[@name="kpointlist"]/v'):
"""Fetch the kpoints.
Parameters
----------
xml : object
An ElementTree object to be used for parsing.
path : string
The path in the XML file containing the k-point set to be extracted.
Returns
-------
kpoints : ndarray
An array containing the kpoints used in the calculation
in direct coordinates.
"""
entry = self._findall(xml, path)
if entry is None:
return None
kpoints = self._convert_array2D_f(entry, 3)
return kpoints
def _fetch_kpointsww(self, xml, path='kpoints/varray[@name="weights"]/v'):
"""Fetch the kpoint weights.
Parameters
----------
xml : object
An ElementTree object to be used for parsing.
path : string
The path in the XML file containing the k-point weights to be extracted.
Returns
-------
kpointw : ndarray
An array containing the kpoint weights used in the
calculation.
"""
entry = self._findall(xml, path)
if entry is None:
return None
kpointsw = self._convert_array1D_f(entry)
return kpointsw
def _fetch_kpointdivw(self, xml):
"""Fetch the number of kpoints in each direction.
Parameters
----------
xml : object
An ElementTree object to be used for parsing.
Returns
-------
kpointdiv : list
An list containing the kpoint divisions used in the
calculation for the full BZ.
"""
entry = self._find(xml, 'kpoints/generation/v[@name="divisions"]')
if entry is None:
return None
kpointdiv = self._convert_array_i(entry)
return kpointdiv
def _fetch_eigenvaluesw(self, xml):
"""Fetch the eigenvalues.
Parameters
----------
xml : object
An ElementTree object to be used for parsing.
Returns
-------
eigenvalues, occupancies : tupple
An tupple of dicts containing ndarrays containing the
eigenvalues and occupancies for each spin, band and
kpoint index.
"""
# spin 1
entry_ispin1 = self._findall(
xml, './/calculation/eigenvalues/array/set/'
'set[@comment="spin 1"]/set/r')
# spin 2
entry_ispin2 = self._findall(
xml, './/calculation/eigenvalues/array/set/'
'set[@comment="spin 2"]/set/r')
# if we do not find spin 1 entries return right away
if entry_ispin1 is None:
return None, None
eigenvalues, occupancies = self._extract_eigenvalues(
entry_ispin1, entry_ispin2, len(self._lattice['kpoints']))
return eigenvalues, occupancies
def _fetch_eigenvalues_specificw(self, xml):
"""Fetch the eigenvalues at specific k-point grids.
Parameters
----------
xml : object
An ElementTree object to be used for parsing.
Returns
-------
eigenvalues : ndarray
An ndarray containing the
eigenvalues for each spin, band and
kpoint index.
"""
# spin 1
entry_ispin1 = self._findall(
xml, './/calculation/eigenvalues/'
'eigenvalues/array/set/'
'set[@comment="spin 1"]/set/r')
# spin 2
entry_ispin2 = self._findall(
xml, './/calculation/eigenvalues/'
'eigenvalues/array/set/'
'set[@comment="spin 2"]/set/r')
if entry_ispin1 is not None:
# Also extract the k-point grids
self._data['kpoints'] = self._fetch_kpointsw(
xml,
path='.//calculation/eigenvalues/'
'kpoints/varray[@name="kpointlist"]/v')
self._data['kpointsw'] = self._fetch_kpointsww(
xml,
path='//calculation/eigenvalues/'
'kpoints/varray[@name="weights"]/v')
# if we do not find spin 1 entries return right away
if entry_ispin1 is None:
return None
eigenvalues, _ = self._extract_eigenvalues(entry_ispin1, entry_ispin2,
len(self._data['kpoints']))
return eigenvalues
def _fetch_eigenvelocitiesw(self, xml):
"""Fetch the eigenvelocities and eigenvalues..
Parameters
----------
xml : object
An ElementTree object to be used for parsing.
Returns
-------
eigenvelocities : dict
A dict with ndarrays containing the
eigenvalues and eigenvelocities for each spin, band and
kpoint index.
kpoints : dict
A dict with a ndarray containing the k-point in the full BZ
on which the eigenvelocities were extracted.
"""
# spin 1
entry_ispin1 = self._findall(
xml, './/calculation/eigenvelocities/'
'eigenvalues/array/set/'
'set[@comment="spin 1"]/set/r')
# spin 2
entry_ispin2 = self._findall(
xml, './/calculation/eigenvelocities/'
'eigenvalues/array/set/'
'set[@comment="spin 2"]/set/r')
# if we do not find spin 1 entries return right away
if entry_ispin1 is None:
return None
self._data['kpoints'] = self._fetch_kpointsw(
xml,
path='.//calculation/eigenvelocities/'
'kpoints/varray[@name="kpointlist"]/v')
self._data['kpointsw'] = self._fetch_kpointsww(
xml,
path='//calculation/eigenvelocities/'
'kpoints/varray[@name="weights"]/v')
eigenvelocities = self._extract_eigenvelocities(
entry_ispin1, entry_ispin2, len(self._data['kpoints']))
return eigenvelocities
def _fetch_projectorsw(self, xml):
"""Fetch the projectors.
Parameters
----------
xml : object
An ElementTree object to be used for parsing.
Returns
-------
projectors : dict
An dict containing ndarrays of the projectors
for each atomic, spin, band and kpoint index.
"""
# projectors spin 1
entry_ispin1 = self._findall(
xml, './/calculation/projected/array/set/'
'set[@comment="spin1"]/set/set/r')
# projectors spin 2
entry_ispin2 = self._findall(
xml, './/calculation/projected/array/set/'
'set[@comment="spin2"]/set/set/r')
# if we do not find spin 1 entries return right away
if entry_ispin1 is None:
return None
projectors = self._extract_projectors(entry_ispin1, entry_ispin2)
return projectors
def _fetch_totensw(self, xml):
"""Fetch the total energies
Parameters
----------
xml : object
An ElementTree object to be used for parsing.
Returns
-------
eigenvalues, occupancies : tupple
An tupple of two ndarrays containing the eigenvalues
for each spin, band and kpoint index.
"""
# fetch the energies for all electronic
# steps, due to the fact that the number of steps is not the
# same between each calculation we need to look at all the
# children
#
# TODO: check in the future if it is faster to fetch all scstep
# elements and then only how many scstep there is pr. calc
# and sort from there
#
entries = self._findall(xml, './/calculation')
if entries is None:
return None
# this most likely takes too long for very long fpmd calculations,
# so consider putting in a flag that only extract the
# energies from each step in the calculation and not the scsteps as
# well
energies = {}
for index, calc in enumerate(entries):
energies_pr_calc = {}
for supported_energy, supported_key in _SUPPORTED_TOTAL_ENERGIES.items():
data = self._findall(calc,
'.//scstep/energy/i[@name="' + supported_key + '"]')
if data is None:
return None
data = self._convert_array1D_f(data)
energies_pr_calc[supported_energy] = data
# now fetch the final entry outside the sc steps for each calculation
# this term might have been corrected and scaled compared to the final sc energy
# extrapolated energy
data = self._findall(calc,
'./energy/i[@name="' + supported_key + '"]')
if data is None:
return None
data = self._convert_f(data[0])
energies_pr_calc[supported_energy+'_final'] = data
energies[index + 1] = energies_pr_calc
return energies
def _fetch_dosw(self, xml):
"""Fetch the density of states.
Parameters
----------
xml : object
An ElementTree object to be used for parsing.
Returns
-------
dos, dos_specific : dicts
Dictionaries with ndarrays containing the energies, total and
integrated density of states for the regular and specific k-point grid, respectively.
"""
# fetch the Fermi level
entry = self._find(xml, './/calculation/dos/i[@name="efermi"]')
if entry is not None:
fermi_level = self._convert_f(entry)
else:
fermi_level = None
# spin 1
entry_total_ispin1 = self._findall(
xml, './/calculation/dos/total/array/set/set[@comment="spin 1"]/r')
# spin 2
entry_total_ispin2 = self._findall(
xml, './/calculation/dos/total/array/set/set[@comment="spin 2"]/r')
# partial spin 1
entry_partial_ispin1 = self._findall(
xml,
'.//calculation/dos/partial/array/set/set/set[@comment="spin 1"]/r'
)
# partial spin 2
entry_partial_ispin2 = self._findall(
xml,
'.//calculation/dos/partial/array/set/set/set[@comment="spin 2"]/r'
)
# if no entries for spin 1, eject right away
if entry_total_ispin1 is None:
return None, None
num_atoms = 0
species = self._lattice['species']
if species is None:
# Try to fetch species again, if still none, we cannot parse it
# and thus we cannot parse the entries in here either.
species = self._fetch_speciesw(xml)
if species is None:
return None, None
num_atoms = species.shape[0]
dos = self._extract_dos(entry_total_ispin1, entry_total_ispin2,
entry_partial_ispin1, entry_partial_ispin2,
fermi_level, num_atoms)
# Now extract the density of states for specific k-point grids
# fetch the Fermi level again as this can be different
entry = self._find(
xml,
'.//calculation/dos[@comment="interpolated"]/i[@name="efermi"]')
if entry is not None:
fermi_level = self._convert_f(entry)
else:
fermi_level = None
# spin 1
entry_total_ispin1 = self._findall(
xml,
'.//calculation/dos[@comment="interpolated"]/total/array/set/set[@comment="spin 1"]/r'
)
# spin 2
entry_total_ispin2 = self._findall(
xml,
'.//calculation/dos[@comment="interpolated"]/total/array/set/set[@comment="spin 2"]/r'
)
# partial spin 1
entry_partial_ispin1 = self._findall(
xml,
'.//calculation/dos[@comment="interpolated"]/partial/array/set/set/set[@comment="spin 1"]/r'
)
# partial spin 2
entry_partial_ispin2 = self._findall(
xml,
'.//calculation/dos[@comment="interpolated"]/partial/array/set/set/set[@comment="spin 2"]/r'
)
# if no entries for spin 1, eject right away
if entry_total_ispin1 is None:
return dos, None
dos_specific = self._extract_dos(entry_total_ispin1,
entry_total_ispin2,
entry_partial_ispin1,
entry_partial_ispin2, fermi_level,
num_atoms)
return dos, dos_specific
def _extract_dos(self, entry_total_ispin1, entry_total_ispin2,
entry_partial_ispin1, entry_partial_ispin2, fermi_level,
num_atoms):
"""Extract the density of states.
Parameters
----------
entry_total_spin1 : list
A list containing ElementTree objects of the total density of states entry for
spin channel 1.
entry_total_spin2 : list
A list containing ElementTree objects of the total density of states entry for
spin channel 2.
entry_total_spin1 : list
A list containing ElementTree objects of the partial density of states entry for
spin channel 1.
entry_total_spin1 : list
A list containing ElementTree objects of the partial density of states entry for
spin_channel 2.
fermi_level : float
The Fermi level in eV.
num_atoms : int
The number of atoms.
Returns
-------
dos : dict
A dict of ndarrays containing the energies, total and
integrated density of states.
"""
if entry_total_ispin2:
dos = {}
dos = {'up': None, 'down': None}
dos_ispin = self._convert_array2D_f(entry_total_ispin1, 3)
_dosup = {}
_dosdown = {}
enrgy = dos_ispin[:, 0]
_dosup['total'] = dos_ispin[:, 1]
_dosup['integrated'] = dos_ispin[:, 2]
# check if partial exists
if entry_partial_ispin1:
dos_ispin = self._convert_array2D_f(entry_partial_ispin1, 10)
# do not need the energy term (similar to total)
_dosup['partial'] = np.asarray(
np.split(dos_ispin[:, 1:10], num_atoms))
else:
_dosup['partial'] = None
dos['up'] = _dosup
dos_ispin = self._convert_array2D_f(entry_total_ispin2, 3)
_dosdown['total'] = dos_ispin[:, 1]
_dosdown['integrated'] = dos_ispin[:, 2]
if entry_partial_ispin2:
dos_ispin = self._convert_array2D_f(entry_partial_ispin2, 10)
# do not need the energy term (similar to total)
_dosdown['partial'] = np.asarray(
np.split(dos_ispin[:, 1:10], num_atoms))
else:
_dosdown['partial'] = None
dos['down'] = _dosdown
dos['total'] = {'fermi_level': fermi_level, 'energy': enrgy}
else:
dos = {}
dos = {'total': None}
dos_ispin = self._convert_array2D_f(entry_total_ispin1, 3)
_dos = {}
_dos['energy'] = dos_ispin[:, 0]
_dos['total'] = dos_ispin[:, 1]
_dos['integrated'] = dos_ispin[:, 2]
# check if partial exists
if entry_partial_ispin1:
dos_ispin = self._convert_array2D_f(entry_partial_ispin1, 10)
# do not need the energy term (similar to total)
_dos['partial'] = np.asarray(
np.split(dos_ispin[:, 1:10], num_atoms))
else:
_dos['partial'] = None
_dos['fermi_level'] = fermi_level
dos['total'] = _dos
return dos
def _fetch_dielectricsw(self, xml, method='dft', transfer=None):
""" Fetch the dielectric function from the VASP XML file
Parameters
----------
xml : object
An ElementTree object to be used for parsing.
method : {'dft', 'qp', 'bse'}, optional
What method was used to obtain the dielectric function. VASP
uses different output between DFT, QP and BSE calculations
(defaults to 'dft').
transfer : {'density', 'current'}, optional
Which dielectric function do you want? Density-density or
current-current? Defaults to the density-density.
Returns
-------
diel_imag : (N,6) list of list of float
If `method` is 'dft'.
The imaginary dielectric function for N energies for the
xx, yy, zz, xy, yz and zx component, respectively.
diel_real : (N,6) list of float
If `method` is 'dft'.
The real dielectric function for N energies for the
xx, yy, zz, xy, yz and zx component, respectively.
diel_imag_mac : (N,6) list of list of float
If `method` is 'qp'.
The imaginary part of the macroscopic dielectric function.
See `diel_imag` for layout.
diel_real_mac : (N,6) list of list of float
If `method` is 'qp'.
The real part of the polarized dielectric function.
See `diel_imag` for layout.
diel_imag_pol : (N,6) list of list of float
If `method` is 'qp'.
The imaginary part of the polarized dielectric function.
See `diel_imag` for layout.
diel_real_pol : (N,6) list of list of floa
If `method` is 'qp'.
The real part of the polarized dielectric function.
See `diel_imag` for layout.
diel_imag_invlfrpa : (N,6) list of list of float
If `method` is 'qp'.
The imaginary part of the inverse dielectric function with
local field effects on the RPA level.
See `diel_imag` for layout.
diel_real_invlfrpa : (N,6) list of list of float
If `method` is 'qp'.
The real part of the inverse dielectric function with
local field effects on the RPA level.
See `diel_imag` for layout.
diel_imag : (N,6) list of list of float
If `method` is 'bse'.
The imaginary part of the BSE dielectric function.
See `diel_imag` above for layout.
diel_real : (N,6) list of list of float
If `method` is 'bse'.
The real part of the BSE dielectric function.
See `diel_imag` at the top for layout.
epsilon : (3,3) list of list of float
"""
if method == 'dft':
diel = {}
if transfer == 'density':
tag = 'dielectricfunction[@comment="density-density"]'
elif transfer == 'current':
tag = 'dielectricfunction[@comment="current-current"]'
else:
tag = 'dielectricfunction'
# imaginary part
entry = self._findall(
xml, './/calculation/' + tag + '/imag/array/set/r')
if entry is None:
diel['imag'] = None
diel['energy'] = None
else:
data = self._convert_array2D_f(entry, 7)
diel['energy'] = data[:, 0]
diel['imag'] = data[:, 1:7]
# real part
entry = self._findall(
xml, './/calculation/' + tag + '/real/array/set/r')
if entry is None:
diel['real'] = None
else:
data = self._convert_array2D_f(entry, 7)
diel['real'] = data[:, 1:7]
# epsilon part
entry = self._findall(xml,
'.//calculation/varray[@name="epsilon"]/v')
if entry is not None:
diel['epsilon'] = self._convert_array2D_f(entry, 3)
else:
diel['epsilon'] = None
# ionic epsilon part
entry = self._findall(
xml, './/calculation/varray[@name="epsilon_ion"]/v')
if entry is not None:
diel['epsilon_ion'] = self._convert_array2D_f(entry, 3)
else:
diel['epsilon_ion'] = None
return diel
# if method == "qp":
# try:
# dielectric_xml = root.findall('dielectricfunction')
# except AttributeError:
# logger.error(
# "Did not find <dielectricfunction> tag in the current XML."
# "Exiting.")
# sys.exit(1)
# # first head of macroscopic
# diel_imag_xml = dielectric_xml[0].find(
# 'imag').find('array').find('set')
# diel_imag_mac = []
# # first imag part
# for energy in diel_imag_xml.iter('r'):
# diel_imag_mac.append([float(x) for x in energy.text.split()])
# diel_real_xml = dielectric_xml[0].find(
# 'real').find('array').find('set')
# diel_real_mac = []
# # then real part
# for energy in diel_real_xml.iter('r'):
# diel_real_mac.append([float(x) for x in energy.text.split()])
# # then polarized
# diel_imag_xml = dielectric_xml[1].find(
# 'imag').find('array').find('set')
# diel_imag_pol = []
# # first imag part
# for energy in diel_imag_xml.iter('r'):
# diel_imag_pol.append([float(x) for x in energy.text.split()])
# diel_real_xml = dielectric_xml[1].find(
# 'real').find('array').find('set')
# diel_real_pol = []
# # then real part
# for energy in diel_real_xml.iter('r'):
# diel_real_pol.append([float(x) for x in energy.text.split()])
# # then inverse macroscopic (including local field)
# diel_imag_xml = dielectric_xml[2].find(
# 'imag').find('array').find('set')
# diel_imag_invlfrpa = []
# # first imag part
# for energy in diel_imag_xml.iter('r'):
# diel_imag_invlfrpa.append([float(x) for x in energy.text.split()])
# diel_real_xml = dielectric_xml[2].find(
# 'real').find('array').find('set')
# diel_real_invlfrpa = []
# # then real part
# for energy in diel_real_xml.iter('r'):
# diel_real_invlfrpa.append([float(x) for x in energy.text.split()])
# return diel_imag_mac, diel_real_mac, diel_imag_pol, diel_real_pol, \
# diel_imag_invlfrpa, diel_real_invlfrpa
# if method == "bse":
# try:
# dielectric_xml = root.find('dielectricfunction')
# except AttributeError:
# logger.error(
# "Did not find <dielectricfunction> tag in the current XML."
# "Exiting.")
# sys.exit(1)
# diel_imag_xml = dielectric_xml.find('imag').find('array').find('set')
# diel_imag = []
# # first imag part
# for energy in diel_imag_xml.iter('r'):
# diel_imag.append([float(x) for x in energy.text.split()])
# diel_real_xml = dielectric_xml.find('real').find('array').find('set')
# diel_real = []
# # then real part
# for energy in diel_real_xml.iter('r'):
# diel_real.append([float(x) for x in energy.text.split()])
# return diel_imag, diel_real
def _extract_eigenvalues(self, spin1, spin2, num_kpoints):
"""Extract the eigenvalues.
Parameters
----------
spin1 : list
A list of ElementTree object to be used for parsing of the
ispin=1 entries.
spin2 : list
A list of ElementTree object to be used for parsing of the
ispin=2 entries.
num_kpoints : int
The number of k-points to extract
Returns
-------
eigenvalues, occupancies : tupple of dicts
An tupple of two dicts containing ndarrays with the eigenvalues
and occupancies for each band and kpoint index.
"""
# then check if we have asigned ispin
if self._parameters['ispin'] is None:
self._logger.error(self.ERROR_MESSAGES[self.ERROR_NO_ISPIN])
sys.exit(self.ERROR_NO_ISPIN)
# then check if we have asigned nbands
if self._parameters['nbands'] is None:
self._logger.error(self.ERROR_MESSAGES[self.ERROR_NO_NBANDS])
sys.exit(self.ERROR_NO_NBADS)
# ispin
ispin = self._parameters['ispin']
# number of bands
num_bands = self._parameters['nbands']
# set dicts
eigenvalues = {}
occupancies = {}
data = []
if len(spin1) != num_bands * num_kpoints:
self._logger.error(
self.ERROR_MESSAGES[self.ERROR_MISMATCH_KPOINTS_NBANDS])
sys.exit(self.ERROR_MISMATCH_KPOINTS_NBANDS)
# check number of elements in first entry of spin1 (we assume all are equal)
entries = len(spin1[0].text.split())
if entries > 1:
data.append(self._convert_array2D_f(spin1, entries))
else:
data.append(self._convert_array1D_f(spin1))
data[0] = np.asarray(np.split(data[0], num_kpoints))
if spin2 is not None:
if len(spin2) != num_bands * num_kpoints:
self._logger.error(
self.ERROR_MESSAGES[self.ERROR_MISMATCH_KPOINTS_NBANDS])
sys.exit(self.ERROR_MISMATCH_KPOINTS_NBANDS)
if entries > 1:
data.append(self._convert_array2D_f(spin2, entries))
else:
data.append(self._convert_array1D_f(spin2))
data[1] = np.asarray(np.split(data[1], num_kpoints))
# convert to numpy arrays
data = np.asarray(data)
# swap axis if the band index should be before the kpoint index
if not self._k_before_band:
data = np.swapaxes(data, 1, 2)
if spin2 is not None:
if entries > 1:
eigenvalues['up'] = np.ascontiguousarray(data[0, :, :, 0])
eigenvalues['down'] = np.ascontiguousarray(data[1, :, :, 0])
occupancies['up'] = np.ascontiguousarray(data[0, :, :, 1])
occupancies['down'] = np.ascontiguousarray(data[1, :, :, 1])
else:
eigenvalues['up'] = np.ascontiguousarray(data[0, :, :])
eigenvalues['down'] = np.ascontiguousarray(data[1, :, :])
else:
if entries > 1:
eigenvalues['total'] = np.ascontiguousarray(data[0, :, :, 0])
occupancies['total'] = np.ascontiguousarray(data[0, :, :, 1])
else:
eigenvalues['total'] = np.ascontiguousarray(data[0, :, :])
if entries > 1:
return eigenvalues, occupancies
else:
return eigenvalues, None
def _extract_eigenvelocities(self, spin1, spin2, num_kpoints):
"""Extract the eigenvalues and eigenvelocities.
Parameters
----------
spin1 : list
A list of ElementTree object to be used for parsing of the
ispin=1 entries.
spin2 : list
A list of ElementTree object to be used for parsing of the
ispin=2 entries.
num_kpoints : dict
The number of k-point in the full BZ
on which the eigenvelocities were extracted.
Returns
-------
eigenvelocities : dict
A dict containing ndarrays with the eigenvalues
and eigenvelocities for each band and kpoint index.
"""
# check if we have asigned ispin
if self._parameters['ispin'] is None:
self._logger.error(self.ERROR_MESSAGES[self.ERROR_NO_ISPIN])
sys.exit(self.ERROR_NO_ISPIN)
# then check if we have asigned nbands
if self._parameters['nbands'] is None:
self._logger.error(self.ERROR_MESSAGES[self.ERROR_NO_NBANDS])
sys.exit(self.ERROR_NO_NBANDS)
# ispin
ispin = self._parameters['ispin']
# number of bands
num_bands = self._parameters['nbands']
# set dicts
eigenvelocities = {}
data = []
if len(spin1) != num_bands * num_kpoints:
self._logger.error(
self.ERROR_MESSAGES[self.ERROR_MISMATCH_KPOINTS_NBANDS])
sys.exit(self.ERROR_MISMATCH_KPOINTS_NBANDS)
data.append(self._convert_array2D_f(spin1, 4))
data[0] = np.asarray(np.split(data[0], num_kpoints))
if spin2 is not None:
if len(spin2) != num_bands * num_kpoints:
self._logger.error(
self.ERROR_MESSAGES[self.ERROR_MISMATCH_KPOINTS_NBANDS])
sys.exit(self.ERROR_MISMATCH_KPOINTS_NBANDS)
data.append(self._convert_array2D_f(spin2, 4))
data[1] = np.asarray(np.split(data[1], num_kpoints))
# convert to numpy arrays
data =
|
np.asarray(data)
|
numpy.asarray
|
import numpy as np
import itertools
from operator import itemgetter
from scipy.linalg import lstsq
from view.camera import is_front_of_view
def get_cross(A):
return np.array([[0, -A[2], A[1]], [A[2], 0, -A[0]], [-A[1], A[0], 0]])
def get_fundamental_matrix_from_camera_matrix(camera_matrixs):
fundamental_matrixs = {}
camera_centers = [-np.linalg.inv(camera_matrix[:,:3]) @ camera_matrix[:,3] for camera_matrix in camera_matrixs]
for i in range(len(camera_centers)):
for j in range(len(camera_centers)):
if i == j:
continue
e = camera_matrixs[j] @ np.append(camera_centers[i],1)
e = e/e[2]
fundamental_matrixs[i,j] = get_cross(e) @ camera_matrixs[j] @
|
np.linalg.pinv(camera_matrixs[i])
|
numpy.linalg.pinv
|
#!/usr/bin/env python
# coding: utf-8
from __future__ import division
from . import Scheme
import numpy as np
from numpy import array, sqrt
class RungeKutta(Scheme):
"""
Collection of classes containing the coefficients of various Runge-Kutta methods.
:Attributes:
tableaux : dictionary
dictionary containing a Butcher tableau for every available number of stages.
"""
@classmethod
def time_vector(cls, tableau):
return tableau[:,0]
class RKDAE(RungeKutta):
"""
Partitioned Runge-Kutta for index 2 DAEs.
"""
def __init__(self, *args, **kwargs):
super(RKDAE, self).__init__(*args, **kwargs)
tableau = kwargs.get('tableau')
if tableau is not None:
self.tableau = tableau
self.ts = self.get_ts()
self.nb_stages = len(self.tableau) - 1
self.QT = np.eye(self.nb_stages+1, self.nb_stages)
def get_ts(self):
return RungeKutta.time_vector(self.tableau)
def dynamics(self, YZT):
return np.dot(self.system.dynamics(YZT[:,:-1]), self.tableau[:,1:].T)
def get_residual(self, t, u, h):
T = t + self.ts*h
QT = self.QT
def residual(DYZ):
YZ = u.reshape(-1,1) + DYZ
YZT = np.vstack([YZ,T])
dyn = self.dynamics(YZT)
DY = self.system.state(DYZ)
Y = self.system.state(YZ)
r1 = DY - h*dyn
r2 = np.dot(self.system.constraint(YZT), QT)
# unused final versions of z
r3 = self.system.lag(YZ[:,-1])
return [r1,r2,r3]
return residual
def get_guess(self, t, u, h):
s = self.tableau.shape[0]
# pretty bad guess here:
guess = np.column_stack([np.zeros_like(u)]*s)
return guess
def reconstruct(self, full_result,t,u0,h):
# we keep the last Z value:
result = np.hstack([self.system.state(full_result[:,-1]), self.system.lag(full_result[:,-2])])
return h, result
class MultiRKDAE(RKDAE):
def dynamics(self, YZT):
s = self.nb_stages
dyn_dict = self.system.multi_dynamics(YZT[:,:-1])
dyn = [np.dot(vec, RK_class.tableaux[s][:,1:].T) for RK_class, vec in dyn_dict.items()]
return sum(dyn)
class Spark(MultiRKDAE):
r"""
Solver for semi-explicit index 2 DAEs by Lobatto III RK methods
as presented in [Ja03]_.
We consider the following system of DAEs:
.. math::
y' = f(t,y,z)\\
0 = g(t,y)
where t is the independent variable, y is a vector of length n containing
the differential variables and z is a vector of length m containing the
algebraic variables. Also,
.. math::
f:\ R × R^n × R^m → R^n\\
g:\ R × R^n → R^m
It is assumed that :math:`g_y f_z` exists and is invertible.
The above system is integrated from ``t0`` to ``tfinal``, where
tspan = [t0, tfinal] using constant stepsize h. The initial condition is
given by ``(y,z) = (y0,z0)`` and the number of stages in the Lobatto III RK
methods used is given by ``s``.
The corresponding :class:`odelab.system.System` object must implement a *tensor* version the following methods:
* :meth:`odelab.system.System.state`
* :meth:`odelab.system.System.multi_dynamics`
* :meth:`odelab.system.System.constraint`
* :meth:`odelab.system.System.lag`
By vector, it is meant that methods must accept vector arguments, i.e., accept a nxs matrix as input parameter.
References:
.. [Ja03] \<NAME>ay - *Solution of index 2 implicit differential-algebraic equations by Lobatto Runge-Kutta methods.* BIT 43, 1, 93-106 (2003). :doi:`10.1023/A:1023696822355`
"""
def __init__(self, *args, **kwargs):
super(Spark, self).__init__(*args, **kwargs)
self.QT = self.compute_mean_stage_constraint().T
def compute_mean_stage_constraint(self):
"""
Compute the s x (s+1) matrix defined in [Ja03]_.
"""
s = self.nb_stages
A1t = LobattoIIIA.tableaux[s][1:-1,1:]
es = np.zeros(s)
es[-1] = 1.
Q = np.zeros([s,s+1])
Q[:-1,:-1] = A1t
Q[-1,-1] = 1.
L = np.linalg.inv(np.vstack([A1t, es]))
Q =
|
np.dot(L,Q)
|
numpy.dot
|
'''Script for the automatic assessment of the intonation of monophonic singing.
Prototype for the CSP of the TROMPA project.
'''
import sys
eps = sys.float_info.epsilon
import json
import numpy as np
import mir_eval
import pandas as pd
import music21 as m21
import pretty_midi
import unidecode
from mir_eval.util import midi_to_hz, intervals_to_samples
import argparse
import configparser
def xml2midi(xmlfile, format):
try:
score = m21.converter.parseFile(xmlfile, format=format)
except:
raise RuntimeError("Can not parse the {} score.".format(format))
try:
score.write('midi', '{}.mid'.format(xmlfile))
# if xmlfile.endswith('xml'):
# score.write('midi', xmlfile.replace('xml', 'mid'))
#
# elif xmlfile.endswith('mxl'):
# score.write('midi', xmlfile.replace('mxl', 'mid'))
except:
raise RuntimeError("Could not convert {} to MIDI.".format(format))
def midi_preparation(midifile):
midi_data = dict()
midi_data['onsets'] = dict()
midi_data['offsets'] = dict()
midi_data['midipitches'] = dict() # midi notes?
midi_data['hz'] = dict()
patt = pretty_midi.PrettyMIDI(midifile)
midi_data['downbeats'] = patt.get_downbeats()
for instrument in patt.instruments:
midi_data['onsets'][instrument.name] = []
midi_data['offsets'][instrument.name] = []
midi_data['midipitches'][instrument.name] = []
for note in instrument.notes:
midi_data['onsets'][instrument.name].append(note.start)
midi_data['offsets'][instrument.name].append(note.end)
midi_data['midipitches'][instrument.name].append(note.pitch)
p = midi_data['midipitches'][instrument.name]
midi_data['hz'][instrument.name] = midi_to_hz(np.array(p))
return midi_data
def midi_to_trajectory(des_timebase, onsets, offsets, pitches):
hop = des_timebase[2] - des_timebase[1]
intervals = np.concatenate([np.array(onsets)[:, None], np.array(offsets)[:, None]], axis=1)
timebase, midipitches = intervals_to_samples(intervals, list(pitches),
offset=des_timebase[0], sample_size=hop, fill_value=0)
return np.array(timebase), np.array(midipitches)
def parse_midi(score_fname, voice_shortcut, format):
voice_shortcut = unidecode.unidecode(voice_shortcut)
try:
midi_data = midi_preparation("{}.mid".format(score_fname))
except:
raise RuntimeError("Could not parse converted MIDI file".format(score_fname))
onsets = np.array(midi_data['onsets'][voice_shortcut])
offsets = np.array(midi_data['offsets'][voice_shortcut])
pitches = np.array(midi_data['hz'][voice_shortcut])
return onsets, offsets, pitches, midi_data
def load_json_data(load_path):
with open(load_path, 'r') as fp:
data = json.load(fp)
return data
def save_json_data(data, save_path):
with open(save_path, 'w') as fp:
json.dump(data, fp, indent=2)
def load_f0_contour(pitch_json_path, starttime):
pitch = np.array(load_json_data(pitch_json_path)['pitch'])
times_ = pitch[:, 0]
freqs_ = pitch[:, 1]
times_shift = times_ - np.abs(starttime)
idxs_no = np.max(np.where(times_shift < 0)[0])
times = times_shift[idxs_no + 1:]
if not type(times[0]) == np.float64:
raise ValueError("Problem with F0 contour")
if times[0] != 0:
offs = times[0]
times -= offs
freqs = freqs_[idxs_no + 1:]
return times, freqs
def map_deviation_range(input_deviation, max_deviation=100):
'''This function takes as input the deviation between the score and the performance in cents (as a ratio),
and computes the output value mapping it into the range 0-1, (0 is bad intonation and 1 is good intonation).
By default, we limit the deviation to max_deviation cents, which is one semitone. Values outside the range +-100 cents
will be clipped and counted as intonation score = 0.
'''
score = np.clip(np.abs(input_deviation), 0, max_deviation) / float(max_deviation)
# assert score <= 1, "Score value is above 1"
# assert score >= 0, "Score value is below 0"
return 1 - score
def intonation_assessment(startbar, endbar, offset, pitch_json_file, score_file, voice, output_filename, dev_thresh=100, format='xml'):
'''Automatic assessment of the intonation of singing performances from the CSP platform of the TROMPA project.
Parameters
----------
startbar : (int) indicates the first bar of the performance
endbar : (int) indicates the last bar of the performance
offset : (float) measured latency between audio and score
pitch_json_file : (string) json file with the pitch contour
score_file : (string) music score xml file
voice : (string) voice part as written in the score
output_filename : (string) output filename to use for the assessment results file
dev_thresh : (float) maximum allowed deviation in cents. Defaults to 100 cents
Returns
-------
assessment : (dictionary) the field 'pitchAssessment' contains a list of arrays with the results for each note in
in the form [note_start_time, intonation_rating]. If the process fails, the list will be empty. The field 'error'
will contain a string with an error message if the process fails, and will be None if it's successful.
overall_score : (float) overall intonation score computed as the weighted sum of note intonation scores. Can be
ignored because it's not used by the CSP.
This function stores a json file with the assessment dictionary in the file indicated by the `output_filename`
parameter.
'''
assessment = {}
assessment['pitchAssessment'] = []
assessment['error'] = None
try:
'''STEP 1: parse xml score, convert to MIDI and save
'''
# quick hack to deal with accents in the voice parts, needs to be updated
change_flag = 0
xml_data = m21.converter.parse(score_file, format=format)
for i in range(len(xml_data.parts)):
name = xml_data.parts[i].getInstrument().partName
if name != unidecode.unidecode(name):
change_flag = 1
xml_data.parts[i].getInstrument().partName = unidecode.unidecode(name)
if change_flag != 0:
try:
xml_data.write('midi', "{}.mid".format(score_file))
except:
raise RuntimeError("Could not convert modified {} to MIDI.".format(format))
else:
xml2midi(score_file, format=format)
# import pdb; pdb.set_trace()
#
# if voice == 'Baríton':
# xml_data = m21.converter.parse(score_file)
# xml_data.parts[2].getInstrument().partName = 'Bariton'
# xml_data.write('midi', score_file.replace('xml', 'mid'))
#
# else:
# xml2midi(score_file)
'''STEP 2: parse MIDI file and arrange info
'''
onsets, offsets, pitches, midi_data = parse_midi(score_file, unidecode.unidecode(voice), format)
'''STEP 3: parse the F0 contour and adjust according to latency
'''
# if latency is larger than 1 second it's likely and error, we set it to 0.3 by default
if offset >= 1:
offset = 0.3
times, freqs = load_f0_contour(pitch_json_file, starttime=offset)
'''STEP 4: Delimiting the performance in the score and the F0 curve
'''
starting = midi_data['downbeats'][int(startbar) - 1] # bars start at 1, indices at 0
# Account for the case of last bar being the last of the piece and size mismatch
if int(endbar) >= len(midi_data['downbeats']):
ending = offsets[-1]
else:
ending = midi_data['downbeats'][int(endbar)] - 0.005
st_idx = np.where(onsets >= starting)[0][0]
end_idx = np.where(offsets >= ending)[0][0]
# getting info from notes according to the sung audio excerpt
onsets, offsets, pitches = onsets[st_idx:end_idx + 1], offsets[st_idx:end_idx + 1], pitches[st_idx:end_idx + 1]
# If all freqs are 0, there's no singing in the performance, we return 0
if sum(freqs) == 0:
assessment['pitchAssessment'] = [
|
np.array([onset, 0])
|
numpy.array
|
from enum import Enum
from typing import Tuple
import numpy as np
import scipy
from scipy.integrate import odeint, solve_ivp
class Simulation_Algorithm(Enum):
SOLVE_IVP = (1,)
ODE_INT = 2
class Simulator:
def __init__(self, model) -> None:
self.model = model
self.ode_list = None
self.supported_sim_types = [
"S I",
"M V S E2 I3 Q3 R D",
]
self.simulation_type = None
self.simulation_algorithm = None
self.params = None
self.J = 1
self.K = 1
def run(
self,
params,
simulation_type,
simulation_algorithm: Simulation_Algorithm = Simulation_Algorithm.SOLVE_IVP,
) -> dict:
"""
Runs the simulation with the given simulation type
Parameters
----------
params : dict
Parameters for the simulation
simulation_type : str
Type of simulation to be run e.g. "S I", "S E I R", "M V S E2 I3 Q3 R D"
Returns
-------
dict
New parameters
"""
self.params = params
if simulation_type == "full":
self.simulation_type = "M V S E2 I3 Q3 R D"
else:
self.simulation_type = simulation_type
# TODO make sure attribute error doesn't crash this
self.simulation_algorithm = simulation_algorithm
self.J = params["J"]
self.K = params["K"]
self.param_count = self._count_params()
return self._run_ode_system(params)
def _count_params(self):
if self.simulation_type == "S I":
return 2
if self.simulation_type == "S E I R":
return 4
if self.simulation_type == "M V S E2 I3 Q3 R D":
return 13
def _build_dMdt(self, t) -> np.ndarray:
"""
Set up equation for M (newborn)
Parameters
----------
t : int
Current timestep
Returns
-------
np.ndarray
Calculated values of equation of M for current iteration
"""
res = []
rho_mat = self.params["rho_mat"]
M = self.params["M"]
res.append(-1 * rho_mat[int(t)] * M)
return np.array(res).sum(axis=0)
def _build_dVdt(self, t) -> np.ndarray:
"""
Set up equation for V (vaccinated)
Parameters
----------
t : int
Current timestep
Returns
-------
np.ndarray
Calculated values of equation of V for current iteration
"""
res = []
rho_vac = self.params["rho_vac"]
V = self.params["V"]
res.append(-1 * rho_vac[int(t)] * V)
nu = self.params["nu"]
S = self.params["S"]
res.append(nu[int(t)] * S)
return np.array(res).sum(axis=0)
def _build_dSdt(self, t) -> np.ndarray:
"""
Set up equation for S (susceptable)
Parameters
----------
t : int
Current timestep
Returns
-------
np.ndarray
Calculated values of equation of S for current iteration
"""
res = []
cls = self.simulation_type.split()
if "M" in cls:
rho_mat = self.params["rho_mat"]
M = self.params["M"]
res.append(rho_mat[int(t)] * M)
if "V" in cls:
rho_vac = self.params["rho_vac"]
nu = self.params["nu"]
S = self.params["S"]
V = self.params["V"]
res.append(rho_vac[int(t)] * V - nu[int(t)] * S)
if "R" in cls:
rho_rec = self.params["rho_rec"]
R = self.params["R"]
res.append(rho_rec[int(t)] * R)
if "I3" in cls:
S = self.params["S"]
N = self.params["N"]
beta_asym = self.params["beta_asym"]
beta_sym = self.params["beta_sym"]
beta_sev = self.params["beta_sev"]
I_asym = self.params["I_asym"]
I_sym = self.params["I_sym"]
I_sev = self.params["I_sev"]
res.append(
[
np.array(
[
-np.sum(
[
beta_asym[int(t), j, l, k] * I_asym[j, l]
+ beta_sym[int(t), j, l, k] * I_sym[j, l]
+ beta_sev[int(t), j, l, k] * I_sev[j, l]
for l in range(self.K)
]
)
* S[j, k]
/ N[j, k]
for k in range(self.K)
]
)
for j in range(self.J)
]
)
elif "I2" in cls:
pass
elif "I" in cls:
pass
return np.array(res).sum(axis=0)
def _build_dEdt(self, t) -> Tuple:
"""
Wrap equations for E (exposed)
Parameters
----------
t : int
Current timestep
Returns
-------
Tuple
Equation for class E based on simulation_type
"""
cls = self.simulation_type.split()
if "E2" in cls:
return self._build_dE_ntdt(t), self._build_dE_trdt(t)
elif "E" in cls:
pass
def _build_dE_ntdt(self, t) -> np.ndarray:
"""
Set up equation for Ent (exposed and not tracked)
Parameters
----------
t : int
Current timestep
Returns
-------
np.ndarray
Calculated values of equation of Ent for current iteration
"""
res = []
cls = self.simulation_type.split()
if "I3" in cls:
beta_asym = self.params["beta_asym"]
S = self.params["S"]
N = self.params["N"]
I_asym = self.params["I_asym"]
res.append(
[
np.array(
[
np.sum(
[beta_asym[int(t), j, l, k] * I_asym[j, l] for l in range(self.K)]
)
* S[j, k]
/ N[j, k]
for k in range(self.K)
]
)
for j in range(self.J)
]
)
beta_sym = self.params["beta_sym"]
beta_sev = self.params["beta_sev"]
psi = self.params["psi"]
I_sym = self.params["I_sym"]
I_sev = self.params["I_sev"]
res.append(
[
np.array(
[
np.sum(
[
beta_sym[int(t), j, l, k]
* (1 - psi[int(t), j, l] * psi[int(t), j, k])
* I_sym[j, l]
+ beta_sev[int(t), j, l, k]
* (1 - psi[int(t), j, l] * psi[int(t), j, k])
* I_sev[j, l]
for l in range(self.K)
]
)
* S[j, k]
/ N[j, k]
for k in range(self.K)
]
)
for j in range(self.J)
]
)
elif "I" in cls:
pass
# method _build_dE_ntdt is only called if E2 is in simulation_type so this has to be executed anways
epsilon = self.params["epsilon"]
Ent = self.params["E_nt"]
res.append(-1 * epsilon[int(t)] * Ent)
return np.array(res).sum(axis=0)
def _build_dE_trdt(self, t) -> np.ndarray:
"""
Set up equation for Etr (exposed and tracked)
Parameters
----------
t : int
Current timestep
Returns
-------
np.ndarray
Calculated values of equation of Etr for current iteration
"""
res = []
cls = self.simulation_type.split()
if "I3" in cls:
beta_sym = self.params["beta_sym"]
beta_sev = self.params["beta_sev"]
psi = self.params["psi"]
I_sym = self.params["I_sym"]
I_sev = self.params["I_sev"]
S = self.params["S"]
N = self.params["N"]
res.append(
[
np.array(
[
np.sum(
[
beta_sym[int(t), j, l, k]
* psi[int(t), j, l]
* psi[int(t), j, k]
* I_sym[j, l]
+ beta_sev[int(t), j, l, k]
* psi[int(t), j, l]
* psi[int(t), j, k]
* I_sev[j, l]
for l in range(self.K)
]
)
* S[j, k]
/ N[j, k]
for k in range(self.K)
]
)
for j in range(self.J)
]
)
elif "I2" in cls:
pass
# method _build_dE_trdt is only called if E2 is in simulation_type so this has to be executed anways
epsilon = self.params["epsilon"]
Etr = self.params["E_tr"]
res.append(-1 * epsilon[int(t)] * Etr)
return np.array(res).sum(axis=0)
def _build_dIdt(self, t) -> Tuple:
"""
Wrap equation for I (infectious)
Parameters
----------
t : int
Current timestep
Returns
-------
Tuple
Equation for class I based on simulation_type
"""
cls = self.simulation_type.split()
if "I3" in cls and "Q3" in cls:
return (
self._build_dI_asymdt(t),
self._build_dI_symdt(t),
self._build_dI_sevdt(t),
)
elif "I2" in cls:
return self._build_dI_asymdt(t), self._build_dI_symdt(t)
elif "I" in cls:
pass
def _build_dI_asymdt(self, t) -> np.ndarray:
"""
Set up equation for Iasym (asymptomatic infectious)
Parameters
----------
t : int
Current timestep
Returns
-------
np.ndarray
Calculated values of equation of Iasym for current iteration
"""
res = []
cls = self.simulation_type.split()
if "E2" in cls:
epsilon = self.params["epsilon"]
Ent = self.params["E_nt"]
res.append(epsilon[int(t)] * Ent)
if "I3" in cls:
gamma_asym = self.params["gamma_asym"]
mu_sym = self.params["mu_sym"]
mu_sev = self.params["mu_sev"]
tau_asym = self.params["tau_asym"]
I_asym = self.params["I_asym"]
res.append(
-1
* (
gamma_asym[int(t)] * I_asym
+ mu_sym[int(t)] * I_asym
+ mu_sev[int(t)] * I_asym
+ tau_asym[int(t)] * I_asym
)
)
elif "I2" in cls:
pass
return
|
np.array(res)
|
numpy.array
|
import os
import numpy as np
import chess
import chess.uci
PATH_TO_STOCKFISH_EXE = os.path.expanduser('~/stockfish-8-mac/Mac/stockfish-8-64')
# Map from piece to layer in net input. 0-5 are white.
INDEX_TO_PIECE_MAP = {0: chess.KING, 6: chess.KING,
1: chess.QUEEN, 7: chess.QUEEN,
2: chess.ROOK, 8: chess.ROOK,
3: chess.BISHOP, 9: chess.BISHOP,
4: chess.KNIGHT, 10: chess.KNIGHT,
5: chess.PAWN, 11: chess.PAWN}
CHAR_TO_INDEX_MAP = {'K': 0, 'k': 6,
'Q': 1, 'q': 7,
'R': 2, 'r': 8,
'B': 3, 'b': 9,
'N': 4, 'n': 10,
'P': 5, 'p': 11}
FULL_CHESS_INPUT_SHAPE = (8, 8, 13)
PIECE_POSITION_ACTION_SIZE = 32 * 64
POSITION_POSITION_ACTION_SIZE = 64 * 64
def map_xy_to_square(x, y):
return int(8*y + x)
def map_square_to_xy(square):
return int(square % 8), int(square // 8)
class ChessEnv(object):
"""
The full chess environment.
"""
def __init__(self, input_shape=FULL_CHESS_INPUT_SHAPE, action_size=POSITION_POSITION_ACTION_SIZE):
self.action_size = action_size
self.input_shape = input_shape
def reset(self):
board = chess.Board()
state = self.map_board_to_state(board)
return state
def get_next_state(self, state, action):
board = self.map_state_to_board(state)
move = self.map_action_to_move(state, action)
board.push(move)
next_state = self.map_board_to_state(board)
return next_state
def get_legal_actions(self, state):
board = self.map_state_to_board(state)
legal_moves = board.legal_moves
legal_actions = np.zeros(len(legal_moves), dtype=int)
i = 0
for move in legal_moves:
action = self.map_move_to_action(board, move)
legal_actions[i] = action
i += 1
return legal_actions
def get_legality_mask(self, state):
board = self.map_state_to_board(state)
legal_moves = board.legal_moves
legal_moves_as_indices = [self.move_to_index(board, move) for move in legal_moves]
move_legality_mask = np.zeros(self.action_size)
for index in legal_moves_as_indices:
move_legality_mask[index] = 1
return move_legality_mask
def is_game_over(self, state):
board = self.map_state_to_board(state)
return board.is_game_over()
def outcome(self, state):
board = self.map_state_to_board(state)
if board.result() == '1/2-1/2':
result = 0
elif board.result() == '1-0':
result = 1
elif board.result() == '0-1':
result = -1
return result
def board_str(self, state):
board = self.map_state_to_board(state)
return str(board)
def print_board(self, state):
board = self.map_state_to_board(state)
print(board)
def map_board_to_state(self, board):
"""
Translates a chess.Board() object to an input state according to the
full Chess representation. Should be moved to the chess env
once that exists.
Parameters
----------
board: a chess.Board
Returns the state corresponding do the board
"""
board_string = str(board)
rows = board_string.split('\n')
state = np.zeros(shape=(8, 8, 13))
for i in range(8):
row = rows[i]
pieces = row.split(' ')
for j in range(8):
char = pieces[j]
if char == '.':
continue
state[i][j][CHAR_TO_INDEX_MAP[char]] = 1
if board.turn:
state[:, :, 12] = np.ones(shape=(8, 8))
return state
def map_state_to_board(self, state):
"""
Transforms a state representation according to the full Chess board input
into its chess Board equivalent.
Parameters
----------
state: a numpy object representing the input board
Returns a chess.Board object
"""
pieces = {}
for i in range(12):
piece = INDEX_TO_PIECE_MAP[i]
if i < 6:
color = chess.WHITE
else:
color = chess.BLACK
indices = np.argwhere(state[:, :, i] == 1)
squares = []
for coords in indices:
x, y = coords
squares.append(8 * (7 - x) + y)
for square in squares:
pieces[square] = chess.Piece(piece, color)
board = chess.Board()
board.set_piece_map(pieces)
board.turn = int(state[0, 0, 12])
return board
def map_action_to_move(self, action):
index = np.where(action == 1)[0][0]
from_pos = int(index / 64)
to_pos = index % 64
return chess.Move(from_pos, to_pos)
def map_move_to_action(self, board, move):
action =
|
np.zeros(self.action_size)
|
numpy.zeros
|
import argparse
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.ticker as plticker
import sys
import os
import glog
from matplotlib.ticker import FormatStrFormatter
root_data_location = './figures/'
def get_allocations_and_time(ifile):
# print(ifile)
fd = open(ifile, 'r')
lines = fd.readlines()
start_read = False
execution_time = []
allocations = []
total_allocs = 0
for line in lines:
if not start_read and 'init' in line:
start_read = True
continue
if 'init' in line:
continue
if 'done' in line:
start_read = False
break
if start_read:
# stop after cumulative 3600 seconds (one hour)
cumul_time = float(line.split(' ')[2])
if cumul_time > 3600:
break
total_allocs += 1
execution_time.append(float(line.split(' ')[2]))
allocations.append(total_allocs)
fd.close()
return (allocations, execution_time)
def cactus_plot(plot_file_name, datacenterName, vdcName, datasets,
allocs_range, time_range):
print('started plotting')
# fig, ax = plt.subplots(figsize=(3.25, 3.25))
fig, ax = plt.subplots(figsize=(4, 2.25))
n_groups = len(datasets)
lines = []
axes = plt.gca()
n_of_markers = 4
#plt.title(datacenterName + " " + vdcName)
for index, (name, color, marker, msize, file) in enumerate(datasets):
if os.path.exists(file):
allocations, execution_times = get_allocations_and_time(file)
if(len(allocations)==0):
print("Warning: No allocations found for " + file)
mspace = allocs_range[-1]/n_of_markers
line = ax.plot(execution_times, allocations, color=color,
marker=marker, markevery=mspace, markersize=msize, label=name)
else:
print("Warning: Missing file: " + file)
ax.legend(loc='upper left', bbox_to_anchor=(0, 1.8), numpoints=2, ncol=2,
frameon=False)
plt.xlabel('CPU Time (s)')
plt.ylabel('Number of VDC allocations')
plt.yticks(allocs_range)
# set time axis a bit forward, to make sure that secondnet runs (that are very close to 0) are visible
xshift = max(3, time_range[1]/10)
ax.set_xlim(-xshift)
plt.xticks(time_range)
fig.tight_layout()
plt.draw()
final_figure = plt.gcf()
final_figure.savefig(plot_file_name, bbox_inches='tight', dpi=200)
print('plotting done, see %s' % plot_file_name)
plt.close()
def fig6():
# included in the paper.pdf
# fig6_cactus_2000_16_T1_9VMs.pdf
# fig6_cactus_2000_16_T3_15VMs.pdf
servers_and_cores = [('200', '4'), #('200', '16'), ('400', '16'),
('2000', '16')]
allocs_ranges = [np.arange(0, 101, 25),
np.arange(0, 101, 25),
np.arange(0, 101, 25),
np.arange(0, 61, 20),
np.arange(0, 61, 20),
np.arange(0, 61, 20),
np.arange(0, 4001, 1000),
np.arange(0, 3601, 900),
np.arange(0, 3001, 750),
np.arange(0, 1201, 300),
np.arange(0, 1201, 300),
np.arange(0, 1201, 300)]
time_ranges = [np.arange(0, 61, 15),
np.arange(0, 81, 20),
np.arange(0, 81, 20),
np.arange(0, 141, 35),
np.arange(0, 141, 35),
np.arange(0, 141, 35),
np.arange(0, 3601, 900),
np.arange(0, 3601, 900),
np.arange(0, 3601, 900),
np.arange(0, 3601, 900),
np.arange(0, 3601, 900),
np.arange(0, 3601, 900)]
range_index = 0
for (server, core) in servers_and_cores:
# files for 4 core data do not contain numbers of cores, only 16 core does
if core == '4':
core_str = '_';
else:
core_str = core + '_'
# secondnet sample file name: fmcad13_secondnet_datacentertree_200.20_instance_vn2_3.1.log
root_dir = root_data_location + 'fmcad-instances/secondnet/timelogs/'
prefix = 'fmcad13_secondnet_datacentertree'
suffix = '.20_instance_vn2_'
secondnet_files = [
root_dir + prefix + core_str + server + suffix + '3.1.log',
root_dir + prefix + core_str + server + suffix + '3.2.log',
root_dir + prefix + core_str + server + suffix + '3.3.log',
root_dir + prefix + core_str + server + suffix + '5.1.log',
root_dir + prefix + core_str + server + suffix + '5.2.log',
root_dir + prefix + core_str + server + suffix + '5.3.log']
# netsolver sample file name: fmcad13_vdcmapper_intrinsic-edge-sets-no-rnd-theory-order-theory-order-vsids-rnd-theory-freq-0-99_datacentertree_200.20.pn_instance_vn2_3.1.vn
root_dir = root_data_location + 'fmcad-instances/netsolver-smt/monosat-master/timelogs/'
prefix = 'fmcad13_vdcmapper_intrinsic-edge-sets-no-rnd-theory-order-theory-order-vsids-rnd-theory-freq-0-99_datacentertree'
suffix = '.20.pn_instance_vn2_'
netsolver_files = [
root_dir + prefix + core_str + server + suffix + '3.1.vn.log',
root_dir + prefix + core_str + server + suffix + '3.2.vn.log',
root_dir + prefix + core_str + server + suffix + '3.3.vn.log',
root_dir + prefix + core_str + server + suffix + '5.1.vn.log',
root_dir + prefix + core_str + server + suffix + '5.2.vn.log',
root_dir + prefix + core_str + server + suffix + '5.3.vn.log']
root_dir = root_data_location + 'fmcad-instances/netsolver-ilp/'
prefix = 'results.tree'
suffix = '.20.vn'
gurobi_files = [
root_dir + prefix + core_str + server + suffix + '3.1',
root_dir + prefix + core_str + server + suffix + '3.2',
root_dir + prefix + core_str + server + suffix + '3.3',
root_dir + prefix + core_str + server + suffix + '5.1',
root_dir + prefix + core_str + server + suffix + '5.2',
root_dir + prefix + core_str + server + suffix + '5.3']
# z3 sample file name: fmcad13_z3_mode4_datacentertree_200.20_instance_vn2_3.1.log
root_dir = root_data_location + 'fmcad-instances/Z3-AR/timelogs/'
prefix = 'fmcad13_z3_mode3_datacentertree'
suffix = '.20_instance_vn2_'
z3_files = [
root_dir + prefix + core_str + server + suffix + '3.1.log',
root_dir + prefix + core_str + server + suffix + '3.2.log',
root_dir + prefix + core_str + server + suffix + '3.3.log',
root_dir + prefix + core_str + server + suffix + '5.1.log',
root_dir + prefix + core_str + server + suffix + '5.2.log',
root_dir + prefix + core_str + server + suffix + '5.3.log']
# vdc_names = ['vn3.1', 'vn3.2', 'vn3.3', 'vn5.1', 'vn5.2', 'vn5.3']
vdc_names = ['T1 9VMs', 'T2 9VMs', 'T3 9VMs',
'T1 15VMs', 'T2 15VMs', 'T3 15VMs']
for i, vdc_name in enumerate(vdc_names):
plot_file_name = "./paper-plots/cactus/fig6_cactus_" + server + "_" + str(core) + "_" + vdc_name.replace(
" ", "_") +".pdf"
cactus_plot(plot_file_name, server + core, vdc_name, [
("Z3-AR", 'y', '.', 8, z3_files[i]),
("SecondNet", 'r', '^', 8, secondnet_files[i]),
("NetSolver-SMT", 'g', 's', 8, netsolver_files[i]),
("NetSolver-ILP", 'b', '*', 8, gurobi_files[i])],
allocs_ranges[range_index], time_ranges[range_index])
range_index += 1
def fig7_bcube():
# included in the paper.pdf
# fig7_cactus_bcube512_12VMs.pdf
dc_sizes = ['8', '10', '12', '16']
vn_sizes = ['6','9','12','15']
allocs_range = np.arange(0, 721, 180)
time_range = np.arange(0, 3601, 900)
for size in dc_sizes:
if size == '8':
servers = '512'
elif size == '10':
servers = '1000'
elif size == '12':
servers = '1728'
elif size == '16':
servers = '4096'
root_dir = root_data_location + 'fattree-bcube/secondnet/timelogs/'
file_prefix = 'results.BCube_n'
file_infix = "_k2_bw10_cpu16.pn.instance_list_"
file_suffix = "vm_rnd.txt.log";
secondnet_files = [root_dir + file_prefix + size + file_infix + vn_size +file_suffix for vn_size in vn_sizes]
root_dir = root_data_location + 'fattree-bcube/netsolver-smt/monosat-master/timelogs/'
file_prefix = 'config_results_vdcmapper_bcube'
file_infix = "_"
file_suffix = "vm";
netsolver_files = [root_dir + file_prefix + size + file_infix + vn_size +file_suffix for vn_size in vn_sizes]
root_dir = root_data_location + 'fattree-bcube/netsolver-ilp/timelogs/'
file_prefix = 'results.BCube_n'
file_infix = "_k2_bw10_cpu16.pn.instance_list_"
file_suffix = "vm_rnd.txt.log";
gurobi_files = [root_dir + file_prefix + size + file_infix + vn_size +file_suffix for vn_size in vn_sizes]
vdc_names = ['6VMs', '9VMs', '12VMs', '15VMs']
for i, vdc_name in enumerate(vdc_names):
plot_file_name = "paper-plots/cactus/fig7_cactus_" + "bcube" + servers + "_" + vdc_name +".pdf"
cactus_plot(plot_file_name, "bcube" + servers + " ", vdc_name, [
("SecondNet", 'r', '^', 8, secondnet_files[i]),
("NetSolver-SMT", 'g', 's', 8, netsolver_files[i]),
("NetSolver-ILP", 'b', '*', 8, gurobi_files[i])],
allocs_range, time_range)
def fig7_fattree():
# included in the paper.pdf
# fig7_cactus_fattree432_12VMs.pdf
allocs_range = np.arange(0, 721, 180)
time_range = np.arange(0, 3601, 900)
dc_sizes = ['8', '12', '16']
vn_sizes = ['6', '9', '12', '15']
for size in dc_sizes:
if size == '8':
servers = '128'
elif size == '12':
servers = '432'
elif size == '16':
servers = '1024'
root_dir = root_data_location + 'fattree-bcube/secondnet/timelogs/'
file_prefix = 'results.Fattree_n'
file_infix = "_k2_bw10_cpu16.pn.instance_list_"
file_suffix = "vm_rnd.txt.log";
secondnet_files = [root_dir + file_prefix + size + file_infix + vn_size +file_suffix for vn_size in vn_sizes]
root_dir = root_data_location + 'fattree-bcube/netsolver-smt/monosat-master/timelogs/'
file_prefix = 'config_results_vdcmapper_fattree'
file_infix = "_"
file_suffix = "vm";
netsolver_files = [root_dir + file_prefix + size + file_infix + vn_size +file_suffix for vn_size in vn_sizes]
root_dir = root_data_location + 'fattree-bcube/netsolver-ilp/timelogs/'
file_prefix = 'results.Fattree_n'
file_infix = "_k2_bw10_cpu16.pn.instance_list_"
file_suffix = "vm_rnd.txt.log";
gurobi_files = [root_dir + file_prefix + size + file_infix + vn_size +file_suffix for vn_size in vn_sizes]
vdc_names = ['6VMs', '9VMs', '12VMs', '15VMs']
for i, vdc_name in enumerate(vdc_names):
plot_file_name = "paper-plots/cactus/fig7_cactus_" + "fattree" + servers + "_" + vdc_name +".pdf"
cactus_plot(plot_file_name, "fattree" + servers + " " , vdc_name, [
("SecondNet", 'r', '^', 8, secondnet_files[i]),
("NetSolver-SMT", 'g', 's', 8, netsolver_files[i]),
("NetSolver-ILP", 'b', '*', 8, gurobi_files[i])],
allocs_range, time_range)
def fig8():
# included in the paper.pdf
# fig8_cactus_mid1_H-10VM.pdf
# fig8_cactus_west1_H-10VM.pdf
allocs_ranges = [
|
np.arange(0, 101, 25)
|
numpy.arange
|
from astropy.io import fits
from astropy.modeling import models, fitting
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
import os
#This python script takes the 4D fits files produced by reshape_fits.py, calculates the parameters of the psf, such as
#FWHM and EE50, and plots the results as 2D grids.
#By <NAME>.
# -----PARAMETERS-----
input_directory = './fits_4D/'
output_directory = './plots/'
fit_threshold = 0.05 #all data below this fraction of the max will be masked out for the gaussian fit.
plot_slices=False
# --------------------
def main():
if not os.path.exists(output_directory):
os.makedirs(output_directory)
for fits_filename in os.listdir(input_directory):
if fits_filename[-4:] == 'fits':
process_fits(input_directory,fits_filename)
def process_fits(directory, FITSfilename):
with fits.open(directory + FITSfilename) as psfFITS:
header = psfFITS[0].header
data = psfFITS[0].data
strehl_data = psfFITS[1].data
print("Processing", FITSfilename)
sim_parameters = {'label': header['LABEL'],
'psf_spacing' : header['PSFSPACE'],
'pixel_scale' : header['PIXSCALE'],
'TT_offset' : [header['NGSX'],header['NGSY']],
'laser_radius' : header['LGSRAD'],
'psf_size' : data.shape[2],
'grid_size' : data.shape[0],
}
psf_size = sim_parameters['psf_size']
grid_size = sim_parameters['grid_size']
psf_parameters = np.zeros((grid_size,grid_size,5)) #x_fwhm, y_fwhm, theta, 50% encircled energy, avg_fwhm
y_coords, x_coords = np.mgrid[:psf_size,:psf_size]
fit = fitting.LevMarLSQFitter()
data_masked = np.ma.empty((grid_size,grid_size,psf_size,psf_size)) #mask out data in the wings
mask_level =
|
np.zeros((grid_size,grid_size))
|
numpy.zeros
|
import settings
import model
from utils import VOC
import os
import time
import cv2
import matplotlib.pyplot as plt
import tensorflow as tf
import numpy as np
config = tf.ConfigProto()
config.gpu_options.visible_device_list = "0"
sess = tf.InteractiveSession(config = config)
model = model.Model(training = False)
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver(tf.global_variables())
boundary1 = settings.cell_size * settings.cell_size * settings.num_class
boundary2 = boundary1 + settings.cell_size * settings.cell_size * settings.box_per_cell
tryOriginal = settings.try_original
try:
if tryOriginal :
saver.restore(sess, os.getcwd() + '/YOLO_small.ckpt')
print('load from YOLO small pretrained')
else :
saver.restore(sess, os.getcwd() + '/model.ckpt')
print('load from past checkpoint')
except:
try:
saver.restore(sess, os.getcwd() + '/YOLO_small.ckpt')
print('load from YOLO small pretrained')
except:
print('you must train first, exiting..')
exit(0)
def draw_result(img, result,color=(0, 255, 0)):
for i in range(len(result)):
x = int(result[i][1])
y = int(result[i][2])
w = int(result[i][3] / 2)
h = int(result[i][4] / 2)
cv2.rectangle(img, (x - w, y - h), (x + w, y + h), color, 2)
cv2.rectangle(img, (x - w, y - h - 20), (x + w, y - h), (125, 125, 125), -1)
cv2.putText(img, result[i][0] + ' : %.2f' % result[i][5], (x - w + 5, y - h - 7), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1, cv2.LINE_AA)
def detect(img):
img_h, img_w, _ = img.shape
inputs = cv2.resize(img, (settings.image_size, settings.image_size))
inputs = cv2.cvtColor(inputs, cv2.COLOR_BGR2RGB).astype(np.float32)
inputs = (inputs / 255.0) * 2.0 - 1.0
inputs = np.reshape(inputs, (1, settings.image_size, settings.image_size, 3))
r1,r2 = detect_from_cvmat(inputs)
result = r1[0]#detect_from_cvmat(inputs)[0]
result2 = r2[0]
#print(result)
for i in range(len(result)):
result[i][1] *= (1.0 * img_w / settings.image_size)
result[i][2] *= (1.0 * img_h / settings.image_size)
result[i][3] *= (1.0 * img_w / settings.image_size)
result[i][4] *= (1.0 * img_h / settings.image_size)
result2[i][0] *= (1.0 * img_w / settings.image_size)
result2[i][1] *= (1.0 * img_h / settings.image_size)
result2[i][2] *= (1.0 * img_w / settings.image_size)
result2[i][3] *= (1.0 * img_h / settings.image_size)
return result,result2
def detect_from_cvmat(inputs):
net_output = sess.run(model.logits, feed_dict = {model.images: inputs})
results = []
results2 = []
for i in range(net_output.shape[0]):
r1,r2 = interpret_output(net_output[i])
results.append(r1)
results2.append(r2)
#print(' -',i)
'''print('dfc : ',results)
print('dfc2 : ',results2)'''
return results,results2
def iou(box1, box2):
#tb=min(b1[x2
tb = min(box1[0] + 0.5 * box1[2], box2[0] + 0.5 * box2[2]) - max(box1[0] - 0.5 * box1[2], box2[0] - 0.5 * box2[2])
lr = min(box1[1] + 0.5 * box1[3], box2[1] + 0.5 * box2[3]) - max(box1[1] - 0.5 * box1[3], box2[1] - 0.5 * box2[3])
if tb < 0 or lr < 0:
intersection = 0
else:
intersection = tb * lr
return intersection / (box1[2] * box1[3] + box2[2] * box2[3] - intersection)
#return [intersection, (box1[2] * box1[3] + box2[2] * box2[3] - intersection)]
def iou_mine(box1, box2):
tb = min(box1[0] + 0.5 * box1[2], box2[0] + 0.5 * box2[2]) - max(box1[0] - 0.5 * box1[2], box2[0] - 0.5 * box2[2])
lr = min(box1[1] + 0.5 * box1[3], box2[1] + 0.5 * box2[3]) - max(box1[1] - 0.5 * box1[3], box2[1] - 0.5 * box2[3])
if tb < 0 or lr < 0:
intersection = 0
else:
intersection = tb * lr
print(intersection,box1[2] * box1[3])
return [intersection/(box1[2] * box1[3]) , (box1[2] * box1[3] + box2[2] * box2[3] - intersection)]
def interpret_output(output):
probs = np.zeros((settings.cell_size, settings.cell_size, settings.box_per_cell, len(settings.classes_name)))
class_probs = np.reshape(output[0 : boundary1], (settings.cell_size, settings.cell_size, settings.num_class))
scales = np.reshape(output[boundary1 : boundary2], (settings.cell_size, settings.cell_size, settings.box_per_cell))
boxes = np.reshape(output[boundary2 :], (settings.cell_size, settings.cell_size, settings.box_per_cell, 4))
offset = np.transpose(np.reshape(np.array([np.arange(settings.cell_size)] * settings.cell_size * settings.box_per_cell), [settings.box_per_cell, settings.cell_size, settings.cell_size]), (1, 2, 0))
boxes[:, :, :, 0] += offset
boxes[:, :, :, 1] += np.transpose(offset, (1, 0, 2))
boxes[:, :, :, :2] = 1.0 * boxes[:, :, :, 0:2] / settings.cell_size
boxes[:, :, :, 2:] =
|
np.square(boxes[:, :, :, 2:])
|
numpy.square
|
# Copyright 2014 Diamond Light Source Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module:: vo_centering_iterative
:platform: Unix
:synopsis: A plugin to find the center of rotation per frame
.. moduleauthor:: <NAME> <<EMAIL>>
"""
import math
import logging
import numpy as np
import scipy.ndimage as ndi
import scipy.ndimage.filters as filter
import pyfftw.interfaces.scipy_fftpack as fft
from scipy import signal
from savu.plugins.utils import register_plugin
from savu.data.plugin_list import CitationInformation
from savu.plugins.filters.base_filter import BaseFilter
from savu.plugins.driver.iterative_plugin import IterativePlugin
# :u*param search_area: Search area in pixels from horizontal approximate \
# centre of the image. Default: (-50, 50).
@register_plugin
class VoCenteringIterative(BaseFilter, IterativePlugin):
"""
A plugin to calculate the centre of rotation using the Vo Method
:param ratio: The ratio between the size of object and FOV of \
the camera. Default: 0.5.
:param row_drop: Drop lines around vertical center of the \
mask. Default: 20.
:param search_radius: Use for fine searching. Default: 6.
:param step: Step of fine searching. Default: 0.5.
:param expand_by: The number of pixels to expand the search region by \
on each iteration. Default: 5
:param boundary_distance: Accepted distance of minima from the boundary of\
the listshift in the coarse search. Default: 3.
:u*param preview: A slice list of required frames (sinograms) to use in \
the calulation of the centre of rotation (this will not reduce the data \
size for subsequent plugins). Default: [].
:param datasets_to_populate: A list of datasets which require this \
information. Default: [].
:param out_datasets: The default \
names. Default: ['cor_raw','cor_fit', 'reliability'].
:u*param start_pixel: The approximate centre. If value is None, take the \
value from .nxs file else set to image centre. Default: None.
"""
def __init__(self):
super(VoCenteringIterative, self).__init__("VoCenteringIterative")
self.search_area = (-20, 20)
self.peak_height_min = 50000 # arbitrary
self.min_dist = 3 # min distance deamed acceptible from boundary
self.expand_by = 5 # expand the search region by this amount
self.list_shift = None
self.warning_level = 0
self.final = False
self.at_boundary = False
self.list_metric = []
self.expand_direction = None
def _create_mask(self, Nrow, Ncol, obj_radius):
du, dv = 1.0/Ncol, (Nrow-1.0)/(Nrow*2.0*math.pi)
cen_row, cen_col = int(np.ceil(Nrow/2)-1), int(np.ceil(Ncol/2)-1)
drop = self.parameters['row_drop']
mask = np.zeros((Nrow, Ncol), dtype=np.float32)
for i in range(Nrow):
num1 = np.round(((i-cen_row)*dv/obj_radius)/du)
p1, p2 = (np.clip(np.sort((-num1+cen_col, num1+cen_col)),
0, Ncol-1)).astype(int)
mask[i, p1:p2+1] = np.ones(p2-p1+1, dtype=np.float32)
if drop < cen_row:
mask[cen_row-drop:cen_row+drop+1, :] = \
np.zeros((2*drop + 1, Ncol), dtype=np.float32)
mask[:, cen_col-1:cen_col+2] = np.zeros((Nrow, 3), dtype=np.float32)
return mask
def _get_start_shift(self, centre):
if self.parameters['start_pixel'] is not None:
shift = centre - int(self.parameters['start_pixel']/self.downlevel)
else:
in_mData = self.get_in_meta_data()[0]
shift = centre - in_mData['centre'] if 'centre' in \
in_mData.get_dictionary().keys() else 0
return int(shift)
def _coarse_search(self, sino, list_shift):
# search minsearch to maxsearch in 1 pixel steps
list_metric = np.zeros(len(list_shift), dtype=np.float32)
(Nrow, Ncol) = sino.shape
# check angles to determine if a sinogram should be chopped off.
# Copy the sinogram and flip left right, to make a full [0:2Pi] sino
sino2 = np.fliplr(sino[1:])
# This image is used for compensating the shift of sino2
compensateimage = np.zeros((Nrow-1, Ncol), dtype=np.float32)
# Start coarse search in which the shift step is 1
compensateimage[:] = np.flipud(sino)[1:]
mask = self._create_mask(2*Nrow-1, Ncol,
0.5*self.parameters['ratio']*Ncol)
count = 0
for i in list_shift:
sino2a = np.roll(sino2, i, axis=1)
if i >= 0:
sino2a[:, 0:i] = compensateimage[:, 0:i]
else:
sino2a[:, i:] = compensateimage[:, i:]
list_metric[count] = np.sum(
np.abs(fft.fftshift(fft.fft2(np.vstack((sino, sino2a)))))*mask)
count += 1
return list_metric
def _fine_search(self, sino, raw_cor):
(Nrow, Ncol) = sino.shape
centerfliplr = (Ncol + 1.0)/2.0-1.0
# Use to shift the sino2 to the raw CoR
shiftsino = np.int16(2*(raw_cor-centerfliplr))
sino2 = np.roll(np.fliplr(sino[1:]), shiftsino, axis=1)
lefttake = 0
righttake = Ncol-1
search_rad = self.parameters['search_radius']
if raw_cor <= centerfliplr:
lefttake = np.int16(np.ceil(search_rad+1))
righttake = np.int16(np.floor(2*raw_cor-search_rad-1))
else:
lefttake = np.int16(np.ceil(raw_cor-(Ncol-1-raw_cor)+search_rad+1))
righttake = np.int16(np.floor(Ncol-1-search_rad-1))
Ncol1 = righttake-lefttake + 1
mask = self._create_mask(2*Nrow-1, Ncol1,
0.5*self.parameters['ratio']*Ncol)
numshift = np.int16((2*search_rad)/self.parameters['step'])+1
listshift = np.linspace(-search_rad, search_rad, num=numshift)
listmetric = np.zeros(len(listshift), dtype=np.float32)
num1 = 0
factor1 = np.mean(sino[-1, lefttake:righttake])
for i in listshift:
sino2a = ndi.interpolation.shift(sino2, (0, i), prefilter=False)
factor2 = np.mean(sino2a[0, lefttake:righttake])
sino2a = sino2a*factor1/factor2
sinojoin = np.vstack((sino, sino2a))
listmetric[num1] = np.sum(np.abs(fft.fftshift(
fft.fft2(sinojoin[:, lefttake:righttake + 1])))*mask)
num1 = num1 + 1
minpos = np.argmin(listmetric)
rotcenter = raw_cor + listshift[minpos]/2.0
return rotcenter
def _get_listshift(self):
smin, smax = self.search_area if self.get_iteration() is 0 \
else self._expand_search()
list_shift = np.arange(smin, smax+2, 2) - self.start_shift
logging.debug('list shift is %s', list_shift)
return list_shift
def _expand_search(self):
if self.expand_direction == 'left':
return self._expand_left()
elif self.expand_direction == 'right':
return self._expand_right()
else:
raise Exception('Unknown expand direction.')
def _expand_left(self):
smax = self.list_shift[0] - 2
smin = smax - self.expand_by*2
if smin <= -self.boundary:
smin = -self.boundary
self.at_boundary = True
return smin, smax
def _expand_right(self):
smin = self.list_shift[-1] + 2
smax = self.list_shift[-1] + self.expand_by*2
if smax <= self.boundary:
smax = self.boundary
self.at_boundary = True
return smin, smax
def pre_process(self):
pData = self.get_plugin_in_datasets()[0]
label = pData.get_data_dimension_by_axis_label
Ncol = pData.get_shape()[label('detector_x')]
self.downlevel = 4 if Ncol > 1800 else 1
self.downsample = slice(0, Ncol, self.downlevel)
Ncol_downsample = len(np.arange(0, Ncol, self.downlevel))
self.centre_fliplr = (Ncol_downsample - 1.0)/2.0
self.start_shift = self._get_start_shift(self.centre_fliplr)*2
self.boundary = int(np.ceil(Ncol/4.0))
def process_frames(self, data):
if not self.final:
logging.debug('performing coarse search for iteration %s',
self.get_iteration())
sino = filter.gaussian_filter(data[0][:, self.downsample], (3, 1))
list_shift = self._get_listshift()
list_metric = self._coarse_search(sino, list_shift)
self._update_lists(list(list_shift), list(list_metric))
self.coarse_cor, dist, reliability_metrics = \
self._analyse_result(self.list_metric, self.list_shift)
return [np.array([self.coarse_cor]),
|
np.array([dist])
|
numpy.array
|
import numpy as np
import math
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import random,pickle
# for positive butyrate dose
result = np.array(pickle.load(open('result.p', 'rb'))) # for input bI1 = 0.18
result1 = np.array(pickle.load(open('result1.p', 'rb'))) # for input bI1 = 0.25
result2 = np.array(pickle.load(open('result2.p', 'rb'))) # for input bI1 = 0.11
result3 = np.array(pickle.load(open('resulth.p', 'rb'))) # for input bI1 = 0
T = np.array(pickle.load(open('time1.p', 'rb')))
# butyrate in intestine
plt.rcParams.update({'font.size': 25,'legend.fontsize': 20})
plt.xticks(np.arange(0, 30, step=7))
plt.plot(T, result[:,0], color = 'orange',linewidth=3)
plt.fill_between(T, result1[:,0], result2[:,0], color = 'k', alpha = 0.1)
plt.legend(['butyrate in intestine'], loc='lower right')
plt.xlabel('Time (days)')
plt.ylabel('$\Delta$ Butyrate \u03bcM')
plt.ylim([0,0.42])
plt.errorbar([28], [0.18], yerr=[0.07], fmt='s',color='r', fillstyle = 'none', elinewidth=3, markersize=10, capsize=6, capthick=3, barsabove= False)
#plt.savefig("images/IECR/" + "1.png", dpi = 300, bbox_inches='tight')
plt.show()
# butyrate in blood
plt.rcParams.update({'font.size': 25,'legend.fontsize': 20})
plt.xticks(np.arange(0, 30, step=7))
plt.plot(T, result[:,1], color = 'orange',linewidth=3)
plt.fill_between(T, result1[:,1], result2[:,1], color = 'k', alpha = 0.1)
plt.legend(['butyrate in blood'], loc='lower right')
plt.xlabel('Time (days)')
plt.ylabel('$\Delta$ Butyrate \u03bcM')
plt.ylim([0,0.42])
plt.errorbar([28], [0.29], yerr=[0.09], fmt='s',color='r', fillstyle = 'none', elinewidth=3, markersize=10, capsize=6, capthick=3, barsabove= False)
#plt.savefig("images/IECR/" + "2.png", dpi = 300, bbox_inches='tight')
plt.show()
# butyrate in bone
plt.rcParams.update({'font.size': 25,'legend.fontsize': 20})
plt.xticks(np.arange(0, 30, step=7))
plt.plot(T, result[:,2], color = 'orange',linewidth=3)
plt.fill_between(T, result1[:,2], result2[:,2], color = 'k', alpha = 0.1)
plt.legend(['butyrate in bone'], loc='lower right')
plt.xlabel('Time (days)')
plt.ylabel('$\Delta$ Butyrate \u03bcM')
plt.ylim([0,0.42])
plt.errorbar([28], [0.29], yerr=[0], fmt='s',color='r', fillstyle = 'none', elinewidth=3, markersize=10, capsize=0, capthick=3, barsabove= False)
#plt.savefig("images/IECR/" + "3.png", dpi = 300, bbox_inches='tight')
plt.show()
# Tregs in blood
plt.rcParams.update({'font.size': 25,'legend.fontsize': 20})
plt.xticks(np.arange(0, 30, step=7))
plt.yticks(
|
np.arange(13, 18, step=1)
|
numpy.arange
|
#!/usr/bin/python3.6
""" Converts the data into a format, suitable for training. """
import multiprocessing, os, sys
from functools import partial
from glob import glob
from typing import *
import numpy as np, pandas as pd
from tqdm import tqdm
from metrics import mapk
import torch
def get_classes_list() -> List[str]:
classes = [os.path.basename(f)[:-4] for f in glob("../../data/train_raw/*.csv")]
return sorted(classes, key=lambda s: s.lower())
if __name__ == '__main__':
test_paths = sorted(glob('new/*test*.npy'))
train_paths = [s.replace('test', 'train') for s in test_paths]
train_df = pd.read_csv('level2_train.csv')
val_df = pd.read_csv('level2_val.csv')
classes = get_classes_list()
class2idx = {c.replace('_', ' '): i for i, c in enumerate(classes)}
test_predicts = [
|
np.load(pred)
|
numpy.load
|
#!/usr/bin/env python3
"""
Script to create Figure 2 of the paper.
"""
from pathlib import Path
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from tqdm import tqdm
from utils import load_dataset
PROJECT_ROOT = Path.cwd()
def main():
"""Create elements for figure 2 of the paper"""
# ----------------------------------------------------------------------------
n_bootstrap = 1000
model_name = 'supervised_aae'
outputs_dir = PROJECT_ROOT / 'outputs'
bootstrap_dir = outputs_dir / 'bootstrap_analysis'
model_dir = bootstrap_dir / model_name
# ----------------------------------------------------------------------------
dataset_name = 'ADNI'
participants_path = PROJECT_ROOT / 'data' / dataset_name / 'participants.tsv'
freesurfer_path = PROJECT_ROOT / 'data' / dataset_name / 'freesurferData.csv'
ids_path = PROJECT_ROOT / 'outputs' / (dataset_name + '_homogeneous_ids.csv')
adni_df = load_dataset(participants_path, ids_path, freesurfer_path)
mean_adni_list = []
for i_bootstrap in tqdm(range(n_bootstrap)):
bootstrap_model_dir = model_dir / '{:03d}'.format(i_bootstrap)
output_dataset_dir = bootstrap_model_dir / dataset_name
output_dataset_dir.mkdir(exist_ok=True)
reconstruction_error_df = pd.read_csv(output_dataset_dir / 'reconstruction_error.csv')
error_hc = reconstruction_error_df.loc[adni_df['Diagn'] == 1]['Reconstruction error']
error_emci = reconstruction_error_df.loc[adni_df['Diagn'] == 27]['Reconstruction error']
error_lmci = reconstruction_error_df.loc[adni_df['Diagn'] == 28]['Reconstruction error']
error_ad = reconstruction_error_df.loc[adni_df['Diagn'] == 17]['Reconstruction error']
mean_adni_list.append([error_hc.mean(), error_emci.mean(), error_lmci.mean(), error_ad.mean()])
mean_adni_list = np.array(mean_adni_list)
plt.hlines(range(4),
np.percentile(mean_adni_list, 2.5, axis=0),
np.percentile(mean_adni_list, 97.5, axis=0))
plt.plot(np.mean(mean_adni_list, axis=0), range(4), 's', color='k')
plt.savefig(bootstrap_dir / 'ADNI.eps', format='eps')
plt.close()
plt.clf()
results = pd.DataFrame(columns={'Measure', 'HC', 'EMCI', 'LMCI', 'AD'})
results = results.append({'Measure': 'Mean',
'HC': np.mean(mean_adni_list, axis=0)[0],
'EMCI': np.mean(mean_adni_list, axis=0)[1],
'LMCI': np.mean(mean_adni_list, axis=0)[2],
'AD': np.mean(mean_adni_list, axis=0)[3], }, ignore_index=True)
results = results.append({'Measure': 'Lower',
'HC': np.percentile(mean_adni_list, 2.5, axis=0)[0],
'EMCI': np.percentile(mean_adni_list, 2.5, axis=0)[1],
'LMCI': np.percentile(mean_adni_list, 2.5, axis=0)[2],
'AD': np.percentile(mean_adni_list, 2.5, axis=0)[3], }, ignore_index=True)
results = results.append({'Measure': 'Upper',
'HC': np.percentile(mean_adni_list, 97.5, axis=0)[0],
'EMCI': np.percentile(mean_adni_list, 97.5, axis=0)[1],
'LMCI': np.percentile(mean_adni_list, 97.5, axis=0)[2],
'AD':
|
np.percentile(mean_adni_list, 97.5, axis=0)
|
numpy.percentile
|
import unittest
import sys
import numpy as np
import os
import warnings
from pyiron.atomistics.structure.atom import Atom
from pyiron.atomistics.structure.atoms import Atoms, CrystalStructure
from pyiron.atomistics.structure.sparse_list import SparseList
from pyiron.atomistics.structure.periodic_table import PeriodicTable, ChemicalElement
from pyiron.base.generic.hdfio import FileHDFio
class TestAtoms(unittest.TestCase):
@classmethod
def tearDownClass(cls):
if sys.version_info[0] >= 3:
file_location = os.path.dirname(os.path.abspath(__file__))
if os.path.isfile(os.path.join(file_location, "../../static/atomistics/test_hdf")):
os.remove(os.path.join(file_location, "../../static/atomistics/test_hdf"))
@classmethod
def setUpClass(cls):
C = Atom('C').element
cls.C3 = Atoms([C, C, C], positions=[[0, 0, 0], [0, 0, 2], [0, 2, 0]])
cls.C2 = Atoms(2 * [Atom('C')])
def setUp(self):
# These atoms are reset before every test.
self.CO2 = Atoms("CO2", positions=[[0, 0, 0], [0, 0, 1.5], [0, 1.5, 0]])
def test__init__(self):
pos, cell = generate_fcc_lattice()
pse = PeriodicTable()
el = pse.element("Al")
basis = Atoms()
self.assertIsInstance(basis, Atoms)
self.assertIsInstance(basis.info, dict)
self.assertIsInstance(basis.arrays, dict)
self.assertIsInstance(basis.adsorbate_info, dict)
self.assertIsInstance(basis.units, dict)
self.assertIsInstance(basis.pbc, (bool, list, np.ndarray))
self.assertIsInstance(basis.indices, np.ndarray)
self.assertIsNone(basis.positions)
self.assertIsInstance(basis.species, list)
self.assertIsInstance(basis.elements, np.ndarray)
self.assertIsNone(basis.cell)
basis = Atoms(symbols='Al', positions=pos, cell=cell)
self.assertIsInstance(basis, Atoms)
self.assertEqual(basis.get_spacegroup()["Number"], 225)
basis = Atoms(elements='Al', positions=pos, cell=cell)
self.assertIsInstance(basis, Atoms)
basis = Atoms(elements=['Al'], positions=pos, cell=cell)
self.assertIsInstance(basis, Atoms)
self.assertRaises(ValueError, Atoms, symbols="Pt", elements='Al', positions=pos, cell=cell)
basis = Atoms(numbers=[13], positions=pos, cell=cell)
self.assertEqual(basis.get_majority_species()['symbol'], "Al")
basis = Atoms(species=[el], indices=[0], positions=pos, cell=cell)
self.assertEqual(basis.get_majority_species()['symbol'], "Al")
self.assertIsInstance(basis, Atoms)
self.assertIsInstance(basis.info, dict)
self.assertIsInstance(basis.arrays, dict)
self.assertIsInstance(basis.adsorbate_info, dict)
self.assertIsInstance(basis.units, dict)
self.assertIsInstance(basis.pbc, (bool, list, np.ndarray))
self.assertIsInstance(basis.indices, np.ndarray)
self.assertIsInstance(basis.species, list)
self.assertIsInstance(basis.cell, np.ndarray)
self.assertIsInstance(basis.positions, np.ndarray)
self.assertIsInstance(basis.get_scaled_positions(), np.ndarray)
self.assertIsInstance(basis.elements, np.ndarray)
def test_set_species(self):
pos, cell = generate_fcc_lattice()
pse = PeriodicTable()
el = pse.element("Pt")
basis = Atoms(symbols='Al', positions=pos, cell=cell)
self.assertEqual(basis.get_chemical_formula(), "Al")
basis.set_species([el])
self.assertEqual(basis.get_chemical_formula(), "Pt")
self.assertTrue("Al" not in [sp.Abbreviation] for sp in basis._species_to_index_dict.keys())
self.assertTrue("Pt" in [sp.Abbreviation] for sp in basis._species_to_index_dict.keys())
def test_new_array(self):
pos, cell = generate_fcc_lattice()
basis = Atoms(symbols='Al', positions=pos, cell=cell)
basis.set_repeat([10, 10, 10])
spins = np.ones(len(basis))
basis.new_array(name="spins", a=spins)
self.assertTrue(np.array_equal(basis.arrays['spins'], spins))
def test_set_array(self):
pos, cell = generate_fcc_lattice()
basis = Atoms(symbols='Al', positions=pos, cell=cell)
basis.set_repeat([10, 10, 10])
spins = np.ones(len(basis), dtype=float)
basis.set_array(name="spins", a=2*spins, dtype=int)
self.assertTrue(np.array_equal(basis.arrays['spins'], 2 * spins))
def test_get_array(self):
pos, cell = generate_fcc_lattice()
basis = Atoms(symbols='Al', positions=pos, cell=cell)
basis.set_repeat([10, 10, 10])
spins = np.ones(len(basis), dtype=float)
basis.set_array(name="spins", a=2*spins, dtype=int)
self.assertTrue(np.array_equal(basis.arrays['spins'], 2 * spins))
self.assertTrue(np.array_equal(basis.get_array(name="spins"), 2 * spins))
def test_add_tags(self):
self.CO2.add_tag(test_tag="a")
self.assertIsInstance(self.CO2.test_tag, SparseList)
self.assertEqual(self.CO2.test_tag[0], "a")
self.assertEqual(self.CO2.test_tag[0], self.CO2.test_tag[2])
self.assertIsInstance(self.CO2.test_tag.list(), list)
self.CO2.add_tag(selective_dynamics=[True, True, True])
self.CO2.selective_dynamics[1] = [True, False, True]
self.assertEqual(self.CO2.selective_dynamics[1], [True, False, True])
self.assertIsInstance(self.CO2.selective_dynamics.list(), list)
def test_get_tags(self):
self.CO2.add_tag(test_tag="a")
self.assertIsInstance(self.CO2.test_tag, SparseList)
self.assertIsInstance(self.CO2.get_tags(), type(dict().keys()))
def test_get_pbc(self):
self.assertTrue(np.array_equal(self.CO2.pbc, self.CO2.get_pbc()))
self.assertEqual(len(self.CO2.get_pbc()), 3)
def test_set_pbc(self):
self.CO2.set_pbc(value=[True, True, False])
self.assertTrue(np.array_equal(self.CO2.pbc, self.CO2.get_pbc()))
self.assertTrue(np.array_equal([True, True, False], self.CO2.get_pbc()))
self.CO2.set_pbc(value=False)
self.assertTrue(np.array_equal([False, False, False], self.CO2.get_pbc()))
self.assertTrue(np.array_equal(self.CO2.pbc, self.CO2.get_pbc()))
def test_chemical_element(self):
conv = self.CO2.convert_element('C')
self.assertIsInstance(conv, ChemicalElement)
self.assertIsInstance(self.CO2.convert_element(conv), ChemicalElement)
self.assertIsInstance(self.CO2.convert_element(self.CO2[0]), ChemicalElement)
with self.assertRaises(AssertionError):
self.assertIsInstance(self.CO2.convert_element(self.CO2), ChemicalElement)
self.assertEqual(len(self.CO2.species), 2)
def test_copy(self):
pos, cell = generate_fcc_lattice()
basis = Atoms(symbols='Al', positions=pos, cell=cell)
basis_copy = basis.copy()
self.assertEqual(basis, basis_copy)
basis_copy[:] = "Pt"
self.assertNotEqual(basis, basis_copy)
def test_numbers_to_elements(self):
num_list = [1, 12, 13, 6]
self.assertTrue(np.array_equal([el.Abbreviation for el in self.CO2.numbers_to_elements(num_list)],
['H', 'Mg', 'Al', 'C']))
def test_scaled_pos_xyz(self):
basis = Atoms(symbols='AlAl', positions=[3*[0], 3*[1]], cell=2*np.eye(3))
pos_xyz = basis.pos_xyz()
self.assertAlmostEqual(np.linalg.norm(pos_xyz[0]-np.array([0, 1])), 0)
scaled_pos_xyz = basis.scaled_pos_xyz()
self.assertAlmostEqual(np.linalg.norm(pos_xyz[0]-basis.cell[0,0]*scaled_pos_xyz[0]), 0)
def test_to_hdf(self):
if sys.version_info[0] >= 3:
filename = os.path.join(os.path.dirname(os.path.abspath(__file__)), "../../static/atomistics/test_hdf")
abs_filename = os.path.abspath(filename)
hdf_obj = FileHDFio(abs_filename)
pos, cell = generate_fcc_lattice()
basis = Atoms(symbols='Al', positions=pos, cell=cell)
basis.set_repeat([2, 2, 2])
basis.to_hdf(hdf_obj, "test_structure")
self.assertTrue(np.array_equal(hdf_obj["test_structure/positions"], basis.positions))
basis_new = Atoms().from_hdf(hdf_obj, "test_structure")
self.assertEqual(basis, basis_new)
def test_from_hdf(self):
if sys.version_info[0] >= 3:
filename = os.path.join(os.path.dirname(os.path.abspath(__file__)), "../../static/atomistics/test_hdf")
abs_filename = os.path.abspath(filename)
hdf_obj = FileHDFio(abs_filename)
pos, cell = generate_fcc_lattice()
basis_store = Atoms(symbols='Al', positions=pos, cell=cell)
basis_store.set_repeat([2, 2, 2])
basis_store.to_hdf(hdf_obj, "simple_structure")
basis = Atoms().from_hdf(hdf_obj, group_name="simple_structure")
self.assertEqual(len(basis), 8)
self.assertEqual(basis.get_majority_species()['symbol'], "Al")
self.assertEqual(basis.get_spacegroup()['Number'], 225)
def test_create_Fe_bcc(self):
self.pse = PeriodicTable()
self.pse.add_element("Fe", "Fe_up", spin="up", pseudo_name='GGA')
self.pse.add_element("Fe", "Fe_down", spin="down", pseudo_name='GGA')
Fe_up = self.pse.element("Fe_up")
Fe_down = self.pse.element("Fe_down")
self.Fe_bcc = Atoms([Fe_up, Fe_down], scaled_positions=[[0, 0, 0], [0.25, 0.25, 0.25]], cell=np.identity(3))
self.Fe_bcc.add_tag("group")
self.Fe_bcc.group[:] = 0
def test_convert_formula(self):
self.assertEqual(self.CO2.convert_formula('C'), ['C'])
self.assertEqual(self.CO2.convert_formula('C3'), ['C', 'C', 'C'])
self.assertEqual(self.CO2.convert_formula('CO2'), ['C', 'O', 'O'])
self.assertEqual(self.CO2.convert_formula('CO2Fe'), ['C', 'O', 'O', 'Fe'])
self.assertEqual(self.CO2.convert_formula('CO2FeF21'), ['C', 'O', 'O', 'Fe', 'F', 'F'])
def test__getitem__(self):
self.assertEqual(self.CO2[0].symbol, 'C')
self.assertEqual(self.C3[2].position.tolist(), [0, 2, 0])
self.assertTrue((self.C3[1:].positions == np.array([[0, 0, 2], [0, 2, 0]])).all())
short_basis = self.CO2[0]
self.assertIsInstance(short_basis, Atom)
short_basis = self.CO2[[0]]
self.assertIsInstance(short_basis, Atoms)
self.assertEqual(short_basis.indices[0], 0)
self.assertEqual(len(short_basis.species), 1)
short_basis = self.CO2[[2]]
self.assertIsInstance(short_basis, Atoms)
self.assertEqual(short_basis.indices[0], 0)
self.assertEqual(len(short_basis.species), 1)
basis_Mg = CrystalStructure("Mg", bravais_basis="fcc", lattice_constant=4.2)
basis_O = CrystalStructure("O", bravais_basis="fcc", lattice_constant=4.2)
basis_O.positions += [0., 0., 0.5]
basis = basis_Mg + basis_O
basis.center_coordinates_in_unit_cell()
basis.set_repeat([3, 3, 3])
mg_indices = basis.select_index("Mg")
o_indices = basis.select_index("O")
basis_new = basis[mg_indices] + basis[o_indices]
self.assertEqual(len(basis_new._tag_list), len(basis[mg_indices]) + len(basis[o_indices]))
self.assertEqual(basis_new.get_spacegroup()["Number"], 225)
def test_positions(self):
self.assertEqual(self.CO2[1:].positions[1:].tolist(), [[0.0, 1.5, 0.0]])
self.CO2.positions[1][0] = 5.
self.assertEqual(self.CO2.positions[1].tolist(), [5.0, 0, 1.5])
def test_set_positions(self):
pos, cell = generate_fcc_lattice()
basis = Atoms(symbols='Al', positions=pos, cell=cell)
basis.set_positions(np.array([[2.5, 2.5, 2.5]]))
self.assertTrue(np.array_equal(basis.positions, [[2.5, 2.5, 2.5]]))
def test_set_scaled_positions(self):
pos, cell = generate_fcc_lattice()
basis = Atoms(symbols='Al', positions=pos, cell=cell, a=4.2)
basis.set_scaled_positions(np.array([[0.5, 0.5, 0.5]]))
self.assertTrue(np.array_equal(basis.get_scaled_positions(), [[0.5, 0.5, 0.5]]))
self.assertTrue(np.array_equal(basis.positions, np.dot([[0.5, 0.5, 0.5]], basis.cell)))
with warnings.catch_warnings(record=True):
basis.scaled_positions = np.array([[0.5, 0.5, 0.5]])
self.assertTrue(np.array_equal(basis.scaled_positions, [[0.5, 0.5, 0.5]]))
def test_cell(self):
CO = Atoms("CO",
positions=[[0, 0, 0], [0, 0, 2]],
cell=[[1, 0, 0], [0, 1, 0], [0, 0, 1]],
pbc=[True, True, True])
self.assertTrue((CO.get_cell() == np.identity(3)).all())
self.assertTrue((CO.cell == np.identity(3)).all())
CO.cell[2][2] = 10.
self.assertTrue(CO.cell[2, 2] == 10.)
self.assertAlmostEqual(CO.get_volume(), 10)
self.assertAlmostEqual(CO.get_volume(per_atom=True), 0.5*10)
with self.assertRaises(ValueError):
CO.cell = -np.eye(3)
with self.assertRaises(ValueError):
CO.cell = [2,1]
def test_add(self):
COX = self.C2 + Atom("O", position=[0, 0, -2])
COX += Atom("O", position=[0, 0, -4])
COX += COX
n_objects = len(set(COX.get_species_objects()))
n_species = len(set(COX.get_chemical_elements()))
self.assertEqual(n_objects, n_species)
def test_pbc(self):
CO = Atoms("CO",
positions=[[0, 0, 0], [0, 0, 2]],
cell=[[1, 0, 0], [0, 1, 0], [0, 0, 1]],
pbc=[True, True, True])
self.assertTrue((CO.pbc == np.array([True, True, True])).all())
CO.set_pbc((True, True, False))
def test_get_masses_DOF(self):
self.assertEqual(len(self.CO2.get_masses_dof()), len(self.CO2.positions.flatten()))
def test_get_center_of_mass(self):
basis = Atoms(elements='AlFe', positions=[3*[0.5], 3*[1.5]], cell=2*np.eye(3))
mass = np.array(basis.get_masses())
self.assertAlmostEqual((mass[0]*0.5+mass[1]*1.5)/mass.sum(), basis.get_center_of_mass()[0])
basis.set_repeat(2)
self.assertAlmostEqual((mass[0]*0.5+mass[1]*1.5)/mass.sum()+1, basis.get_center_of_mass()[0])
def test_rotate(self):
unitcell = Atoms(elements='AlFe', positions=[3*[0], 3*[1]], cell=2*np.eye(3))
basis = unitcell.copy()
basis.rotate(vector=[0, 0, 0.1*np.pi])
self.assertAlmostEqual(np.arccos(basis.positions[1, :2].sum()/2)/np.pi, 0.1)
basis = unitcell.copy()
basis.rotate(vector=[0, 0, 1], angle=0.1*np.pi)
self.assertAlmostEqual(np.arccos(basis.positions[1, :2].sum()/2)/np.pi, 0.1)
basis = unitcell.copy()
center_of_mass = basis.get_center_of_mass()
basis.rotate(vector=[0, 0, 0.1*np.pi], center='com')
self.assertTrue(np.allclose(basis.get_center_of_mass(), center_of_mass))
basis = unitcell.copy()
center_of_positions = basis.positions.mean(axis=0)
basis.rotate(vector=[0, 0, 1], center='cop')
self.assertTrue(np.allclose(center_of_positions, basis.positions.mean(axis=0)))
basis = unitcell.copy()
position = basis.positions[1]
basis.rotate(vector=[0, 0, 1], center='cou')
self.assertTrue(np.allclose(position, basis.positions[1]))
basis = unitcell.copy()
basis.rotate(vector=np.random.random(3), rotate_cell=True)
self.assertAlmostEqual(basis.get_scaled_positions()[1,0], 0.5)
basis = unitcell.copy()
basis.rotate(vector=np.random.random(3), index_list=[0])
self.assertTrue(np.allclose(unitcell.positions.flatten(), basis.positions.flatten()))
def test_rotate_euler(self):
unitcell = Atoms(elements='AlFe', positions=[3*[0], 3*[1]], cell=2*np.eye(3))
basis = unitcell.copy()
basis.rotate_euler(phi=0.1*np.pi)
self.assertAlmostEqual(np.arccos(basis.positions[1, :2].sum()/2)/np.pi, 0.1)
basis = unitcell.copy()
center_of_mass = basis.get_center_of_mass()
basis.rotate_euler(phi=0.1*np.pi, center='com')
self.assertTrue(np.allclose(basis.get_center_of_mass(), center_of_mass))
basis = unitcell.copy()
center_of_positions = basis.positions.mean(axis=0)
basis.rotate_euler(phi=0.1*np.pi, center='cop')
self.assertTrue(np.allclose(center_of_positions, basis.positions.mean(axis=0)))
basis = unitcell.copy()
position = basis.positions[1]
basis.rotate_euler(phi=0.1*np.pi, center='cou')
self.assertTrue(np.allclose(position, basis.positions[1]))
def test_get_parent_basis(self):
periodic_table = PeriodicTable()
periodic_table.add_element(parent_element="O", new_element="O_up")
O_up = periodic_table.element("O_up")
O_basis = Atoms([O_up], cell=10.0 * np.eye(3), scaled_positions=[[0.5, 0.5, 0.5]])
O_simple = Atoms(["O"], cell=10.0 * np.eye(3), scaled_positions=[[0.5, 0.5, 0.5]])
O_parent = O_basis.get_parent_basis()
self.assertNotEqual(O_basis, O_parent)
self.assertEqual(O_simple, O_parent)
self.assertEqual(O_parent[0].symbol, "O")
periodic_table.add_element(parent_element="O", new_element="O_down")
O_down = periodic_table.element("O_down")
O_basis = Atoms([O_up, O_down], cell=10.0 * np.eye(3), scaled_positions=[[0.5, 0.5, 0.5], [0, 0, 0]])
O_simple = Atoms(["O", "O"], cell=10.0 * np.eye(3), scaled_positions=[[0.5, 0.5, 0.5]])
O_parent = O_basis.get_parent_basis()
self.assertNotEqual(O_basis, O_parent)
self.assertEqual(O_simple, O_parent)
self.assertEqual(O_parent.get_chemical_formula(), "O2")
self.assertEqual(len(O_basis.species), 2)
self.assertEqual(len(O_simple.species), 1)
self.assertEqual(len(O_parent.species), 1)
def test_profiling(self):
num = 1000
C100 = Atoms(num * ["C"], positions=[(0, 0, 0) for _ in range(num)])
self.assertEqual(len(C100), num)
def test_Au(self):
a = 4.05 # Gold lattice constant
b = a / 2.
fcc = Atoms(['Au'],
cell=[(0, b, b), (b, 0, b), (b, b, 0)],
pbc=True)
# print fcc
# print "volume: ", fcc.get_volume()
def test_set_absolute(self):
a = 4.05 # Gold lattice constant
b = a / 2.
positions = np.array([(0.5, 0.4, 0.)])
fcc = Atoms(symbols=['Au'],
scaled_positions=positions,
cell=[(0, b, b), (b, 0, b), (b, b, 0)],
pbc=True)
# fcc.set_absolute()
# print fcc.positions
# fcc.set_relative()
self.assertTrue(np.linalg.norm(fcc.get_scaled_positions() - positions) < 1e-10)
def test_set_relative(self):
lattice = CrystalStructure(element='Al', bravais_basis='fcc', lattice_constants=4)
basis_relative = lattice.copy()
basis_relative.set_relative()
basis_relative.cell[0,0] = 6
basis_absolute = lattice.copy()
basis_absolute.set_absolute()
basis_absolute.cell[0,0] = 6
self.assertAlmostEqual(basis_relative.positions[-1,0]*1.5, basis_absolute.positions[-1,0])
basis = lattice.copy()
self.assertAlmostEqual(basis.get_scaled_positions()[-1,0], basis_relative.get_scaled_positions()[-1,0])
basis.cell[0,0] = 6
self.assertAlmostEqual(basis.positions[-1,0], basis_absolute.positions[-1,0])
basis = lattice.copy()
basis_relative = lattice.copy()
basis_relative.set_relative()
basis.positions[-1,0] = 0.5
basis_relative.positions[-1,0] = 0.5
self.assertAlmostEqual(basis.positions[-1,0], basis_relative.positions[-1,0])
basis.cell = 3*np.ones(3)
self.assertAlmostEqual(basis.get_volume(), 27)
basis.cell = np.append(np.ones(3), 90-np.random.random(3)).flatten()
self.assertLess(basis.get_volume(), 1)
def test_repeat(self):
basis_Mg = CrystalStructure("Mg", bravais_basis="fcc", lattice_constant=4.2)
basis_O = CrystalStructure("O", bravais_basis="fcc", lattice_constant=4.2)
basis_O.set_scaled_positions(basis_O.get_scaled_positions()+[0., 0., 0.5])
basis = basis_Mg + basis_O
basis.center_coordinates_in_unit_cell()
basis.add_tag(selective_dynamics=[True, True, True])
basis.selective_dynamics[basis.select_index("O")] = [False, False, False]
len_before = len(basis)
sel_dyn_before = np.array(basis.selective_dynamics.list())
self.assertTrue(np.alltrue(np.logical_not(np.alltrue(sel_dyn_before[basis.select_index("O")], axis=1))))
self.assertTrue(np.alltrue(np.alltrue(sel_dyn_before[basis.select_index("Mg")], axis=1)))
basis.set_repeat([3, 3, 2])
sel_dyn_after = np.array(basis.selective_dynamics.list())
len_after = len(basis)
self.assertEqual(basis.get_spacegroup()["Number"], 225)
self.assertEqual(len_before * 18, len_after)
self.assertEqual(len(sel_dyn_before) * 18, len(sel_dyn_after))
self.assertTrue(np.alltrue(np.logical_not(np.alltrue(sel_dyn_after[basis.select_index("O")], axis=1))))
self.assertTrue(np.alltrue(np.alltrue(sel_dyn_after[basis.select_index("Mg")], axis=1)))
basis = basis_Mg + basis_O
basis.add_tag(spin=None)
basis.spin[basis.select_index("Mg")] = 1
basis.spin[basis.select_index("O")] = -1
self.assertTrue(np.array_equal(basis.spin[basis.select_index("Mg")].list(), 1 *
np.ones(len(basis.select_index("Mg")))))
self.assertTrue(np.array_equal(basis.spin[basis.select_index("O")].list(), -1 *
np.ones(len(basis.select_index("O")))))
basis.set_repeat(2)
self.assertTrue(np.array_equal(basis.spin[basis.select_index("Mg")].list(), 1 *
np.ones(len(basis.select_index("Mg")))))
self.assertTrue(np.array_equal(basis.spin[basis.select_index("O")].list(), -1 *
np.ones(len(basis.select_index("O")))))
basis = basis_Mg + basis_O
basis.add_tag(spin=None)
# Indices set as int
Mg_indices = np.array(basis.select_index("Mg"), dtype=int).tolist()
for ind in Mg_indices:
basis.spin[ind] = 1
O_indices = np.array(basis.select_index("O"), dtype=int).tolist()
for ind in O_indices:
basis.spin[ind] = -1
basis.set_repeat(2)
self.assertTrue(np.array_equal(basis.spin[basis.select_index("Mg")].list(), 1 *
np.ones(len(basis.select_index("Mg")))))
self.assertTrue(np.array_equal(basis.spin[basis.select_index("O")].list(), -1 *
np.ones(len(basis.select_index("O")))))
# Indices set as numpy.int
Mg_indices = np.array(basis.select_index("Mg"), dtype=np.int)
for ind in Mg_indices:
basis.spin[ind] = 1
O_indices = np.array(basis.select_index("O"), dtype=np.int)
for ind in O_indices:
basis.spin[ind] = -1
basis.set_repeat(2)
self.assertTrue(np.array_equal(basis.spin[basis.select_index("Mg")].list(), 1 *
np.ones(len(basis.select_index("Mg")))))
self.assertTrue(np.array_equal(basis.spin[basis.select_index("O")].list(), -1 *
np.ones(len(basis.select_index("O")))))
def test_boundary(self):
cell = 2.2 * np.identity(3)
NaCl = Atoms('NaCl', scaled_positions=[(0, 0, 0), (0.5, 0.5, 0.5)], cell=cell)
NaCl.set_repeat([3, 3, 3])
# NaCl.plot3d()
NaCl_bound = NaCl.get_boundary_region(0.2)
# NaCl_bound.plot3d()
def test_get_distance(self):
cell = 2.2 * np.identity(3)
NaCl = Atoms('NaCl', scaled_positions=[(0, 0, 0), (0.5, 0.5, 0.5)], cell=cell)
self.assertAlmostEqual(NaCl.get_distance(0, 1), 2.2*0.5*np.sqrt(3))
self.assertAlmostEqual(NaCl.get_distance(0, [0, 0, 0.5]), 0.5)
self.assertAlmostEqual(NaCl.get_distance([0, 0, 0], [0, 0, 0.5]), 0.5)
def test_get_neighborhood(self):
basis = Atoms('FeFe', scaled_positions=[(0, 0, 0), (0.5, 0.5, 0.5)], cell=np.identity(3))
neigh = basis.get_neighborhood([0, 0, 0.1])
self.assertEqual(neigh.distances[0], 0.1)
def test_get_neighbors(self):
cell = 2.2 * np.identity(3)
NaCl = Atoms('NaCl', scaled_positions=[(0, 0, 0), (0.5, 0.5, 0.5)], cell=cell)
# NaCl.repeat([3, 3, 3])
# NaCl.positions = [(1,1,1)]
boundary = NaCl.get_boundary_region(3.5)
extended_cell = NaCl + boundary
# extended_cell.plot3d()
nbr_dict = NaCl.get_neighbors(num_neighbors=12, t_vec=True)
basis = Atoms(symbols='FeFe', positions=[3*[0], 3*[1]], cell=2*np.eye(3))
neigh = basis.get_neighbors(include_boundary=False)
self.assertAlmostEqual(neigh.distances[0][0], np.sqrt(3))
basis.set_repeat(2)
self.assertAlmostEqual(neigh.distances[0][0], np.sqrt(3))
# print nbr_dict.distances
# print [set(s) for s in nbr_dict.shells]
def test_center_coordinates(self):
cell = 2.2 * np.identity(3)
NaCl = Atoms('NaCl', scaled_positions=[(0, 0, 0), (0.5, 0.5, 0.5)], cell=cell)
NaCl.set_repeat([3, 3, 3])
NaCl.positions += [2.2, 2.2, 2.2]
NaCl.center_coordinates_in_unit_cell(origin=-0.5)
self.assertTrue(-0.5 <= np.min(NaCl.get_scaled_positions()))
self.assertTrue(np.max(NaCl.get_scaled_positions() < 0.5))
NaCl.center_coordinates_in_unit_cell(origin=0.)
self.assertTrue(0 <= np.min(NaCl.positions))
self.assertTrue(np.max(NaCl.get_scaled_positions() < 1))
@unittest.skip("skip ovito because it is not installed in the test environment")
def test_analyse_ovito_cna_adaptive(self):
basis = Atoms('FeFe', scaled_positions=[(0, 0, 0), (0.5, 0.5, 0.5)], cell=np.identity(3))
basis.analyse_ovito_cna_adaptive()['CommonNeighborAnalysis.counts.BCC']==2
@unittest.skip("skip ovito because it is not installed in the test environment")
def test_analyse_ovito_centro_symmetry(self):
basis = Atoms('FeFe', scaled_positions=[(0, 0, 0), (0.5, 0.5, 0.5)], cell=np.identity(3))
self.assertTrue(all(basis.analyse_ovito_centro_symmetry()==np.array([0.75, 0.75])))
@unittest.skip("skip ovito because it is not installed in the test environment")
def test_analyse_ovito_voronoi_volume(self):
basis = Atoms('FeFe', scaled_positions=[(0, 0, 0), (0.5, 0.5, 0.5)], cell=np.identity(3))
self.assertTrue(all(basis.analyse_ovito_centro_symmetry()==np.array([0.5, 0.5])))
@unittest.skip("skip nglview because it is not installed in the test environment")
def test_plot3d(self):
basis = Atoms('FeFe', scaled_positions=[(0, 0, 0), (0.5, 0.5, 0.5)], cell=
|
np.identity(3)
|
numpy.identity
|
"""Classes for making pretty diagnostic plots for redmapper catalogs
"""
import os
import numpy as np
import fitsio
import esutil
import scipy.optimize
import scipy.ndimage
import copy
from .configuration import Configuration
from .utilities import gaussFunction, CubicSpline, interpol
class SpecPlot(object):
"""
Class to make plots comparing the cluster spec-z with cluster z_lambda (photo-z).
In the case of real data, the most likely central is used as a proxy for
the cluster redshift (so miscenters look like wrong redshifts). In the
case of mock data, the mean redshift of the members can be used instead.
"""
def __init__(self, conf, binsize=0.02, nsig=4.0):
"""
Instantiate a SpecPlot.
Parameters
----------
conf: `redmapper.Configuration` or `str`
Configuration object or filename
binsize: `float`, optional
Redshift smoothing bin size. Default is 0.02.
nsig: `float`, optional
Number of sigma to be considered a redshift outlier. Default is 4.0.
"""
if not isinstance(conf, Configuration):
self.config = Configuration(conf)
else:
self.config = conf
self.binsize = binsize
self.nsig = nsig
self._withversion = False
def plot_cluster_catalog(self, cat, title=None, figure_return=False, withversion=False):
"""
Plot the spectroscopic comparison for a cluster catalog, using the
default catalog values.
This plot will compare the catalog cg_spec_z (most likely central
galaxy redshift) with the cluster z_lambda.
Parameters
----------
cat: `redmapper.ClusterCatalog`
Cluster catalog to plot.
title: `str`, optional
Title string for plot. Default is None.
figure_return: `bool`, optional
Return the figure instead of saving a png. Default is False.
withversion: `bool`, optional
Plots should be saved with the version string.
Returns
-------
fig: `matplotlib.Figure` or `None`
Figure to show, if figure_return is True.
"""
return self.plot_values(cat.cg_spec_z, cat.z_lambda, cat.z_lambda_e, title=title,
figure_return=figure_return, withversion=withversion)
def plot_cluster_catalog_from_members(self, cat, mem, title=None, figure_return=False,
withversion=False):
"""
Plot the spectroscopic comparison for a cluster catalog, using the
average member redshift.
This plot will compare the catalog median redshift with the cluster
z_lambda. Generally only useful for simulated catalogs where you have
complete coverage.
Parameters
----------
cat: `redmapper.ClusterCatalog`
Cluster catalog to plot.
mem: `redmapper.Catalog`
Member catalog associated with cat.
title: `str`, optional
Title string for plot. Default is None.
figure_return: `bool`, optional
Return the figure instead of saving a png. Default is False.
withversion: `bool`, optional
Plots should be saved with the version string.
Returns
-------
fig: `matplotlib.Figure` or `None`
Figure to show, if figure_return is True.
"""
# Not sure why this is necessary on the mocks, but there are some -1s...
ok, = np.where(mem.zspec > 0.0)
mem = mem[ok]
a, b = esutil.numpy_util.match(cat.mem_match_id, mem.mem_match_id)
h, rev = esutil.stat.histogram(a, rev=True)
mem_zmed = np.zeros(cat.size)
for i in range(h.size):
if h[i] == 0:
continue
i1a = rev[rev[i]: rev[i + 1]]
mem_zmed[i] = np.median(mem.zspec[i1a])
return self.plot_values(mem_zmed, cat.z_lambda, cat.z_lambda_e, title=title,
figure_return=figure_return, withversion=withversion)
def plot_values(self, z_spec, z_phot, z_phot_e, name=r'z_\lambda', specname=r'z_{\mathrm{spec}}',
title=None, figure_return=False, calib_zrange=None, withversion=False):
"""
Make a pretty spectrscopic plot from an arbitrary list of values.
Parameters
----------
z_spec: `np.array`
Float array of spectroscopic redshifts
z_phot: `np.array`
Float array of photometric redshifts
z_phot_e: `np.array`
Float array of photometric redshift errors
name: `str`, optional
Name of photo-z field for label. Default is 'z_lambda'.
title: `str`, optional
Title string. Default is None
figure_return: `bool`, optional
Return the figure instead of saving a png. Default is False.
calib_zrange: `np.array` or `list`
2-element array with calibration redshift range to mark. Default is None.
withversion: `bool`, optional
Plots should be saved with the version string.
Returns
-------
fig: `matplotlib.Figure` or `None`
Figure to show, if figure_return is True.
"""
# We will need to remove any underscores in name for some usage...
name_clean = name.replace('_', '')
name_clean = name_clean.replace('^', '')
name_e = f'\delta_{{{name}}}'
use, = np.where(z_spec > 0.0)
import matplotlib.pyplot as plt
from matplotlib.ticker import MultipleLocator
plot_xrange = np.array([0.0, self.config.zrange[1] + 0.1])
fig = plt.figure(figsize=(8, 6))
fig.clf()
ax = fig.add_subplot(211)
# Need to do the map, then need to get the levels and make a contour
# z_bins, z_map = self._make_photoz_map(z_spec[use], z_phot[use])
#@jacobic: mara prefers z_spec on x axis
z_bins, z_map = self._make_photoz_map(z_phot[use], z_spec[use])
nlevs = 5
levbinsize = (1.0 - 0.1) / nlevs
levs = np.arange(nlevs) * levbinsize + 0.1
levs = np.append(levs, 10)
levs = -levs[::-1]
colors = ['#000000', '#333333', '#666666', '#9A9A9A', '#CECECE', '#FFFFFF']
ax.contourf(z_bins, z_bins, -z_map.T, levs, colors=colors)
ax.plot(plot_xrange, plot_xrange, 'b--', linewidth=3)
ax.set_xlim(plot_xrange)
ax.set_ylim(plot_xrange)
ax.tick_params(axis='y', which='major', labelsize=14, length=5, left=True, right=True, direction='in')
ax.tick_params(axis='y', which='minor', left=True, right=True, direction='in')
ax.tick_params(axis='x', labelbottom=False, which='major', length=5, direction='in', bottom=True, top=True)
ax.tick_params(axis='x', which='minor', bottom=True, top=True, direction='in')
minorLocator = MultipleLocator(0.05)
ax.yaxis.set_minor_locator(minorLocator)
minorLocator = MultipleLocator(0.02)
ax.xaxis.set_minor_locator(minorLocator)
# ax.set_ylabel(r'$%s$' % (specname), fontsize=16)
ax.set_ylabel(fr'${name}$', fontsize=16)
if calib_zrange is not None:
if len(calib_zrange) == 2:
ylim = ax.get_ylim()
ax.plot([calib_zrange[0], calib_zrange[0]], ylim, 'k:')
ax.plot([calib_zrange[1], calib_zrange[1]], ylim, 'k:')
# Plot the outliers
bad, = np.where(np.abs(z_phot[use] - z_spec[use]) / z_phot_e[use] > self.nsig)
self.fracout = float(bad.size) / float(use.size)
if bad.size > 0:
# ax.plot(z_phot[use[bad]], z_spec[use[bad]], 'r*')
ax.plot(z_spec[use[bad]], z_phot[use[bad]], 'r*',
label=f'${self.nsig}\sigma$ Outliers ({self.fracout * 100:.2f}%)')
ax.legend(loc=4, fontsize=14, title=f'{len(z_spec)} Spectroscopic '
f'Clusters')
# fout_label = fr'$f_\mathrm{{out}}({self.nsig:.2f} std) ={fracout:7.4f}$'
# fout_label = fr'$f_\mathrm{{out}}({specname} - {name} > {int(self.nsig):d} {name_e}) = {fracout:7.4f}$'
# ax.annotate(fout_label, (plot_xrange[0] + 0.1, self.config.zrange[1]),
# xycoords='data', ha='left', va='top', fontsize=16)
ax.set_title(title)
# Compute the bias / scatter
# h, rev = esutil.stat.histogram(z_phot[use], min=0.0, max=self.config.zrange[1]-0.001, rev=True, binsize=self.binsize)
h, rev = esutil.stat.histogram(z_spec[use], min=0.0,
max=self.config.zrange[1]-0.001, rev=True, binsize=self.binsize)
bins = np.arange(h.size) * self.binsize + 0.0 + self.binsize/2.
bias = np.zeros(h.size)
scatter = np.zeros_like(bias)
errs = np.zeros_like(bias)
gd, = np.where(h >= 3)
for ind in gd:
i1a = rev[rev[ind]: rev[ind + 1]]
bias[ind] =
|
np.median(z_spec[use[i1a]] - z_phot[use[i1a]])
|
numpy.median
|
import numpy as np
from numpy import random
from scipy.interpolate import interp1d
import pandas as pd
msun = 1.9891e30
rsun = 695500000.0
G = 6.67384e-11
AU = 149597870700.0
def component_noise(tessmag, readmod=1, zodimod=1):
sys = 59.785
star_mag_level, star_noise_level = np.array(
[
[4.3885191347753745, 12.090570910640581],
[12.023294509151416, 467.96434635620614],
[17.753743760399338, 7779.603209291808],
]
).T
star_pars = np.polyfit(star_mag_level, np.log10(star_noise_level), 1)
zodi_mag_level, zodi_noise_level = np.array(
[
[8.686356073211314, 18.112513551189224],
[13.08901830282862, 688.2812796087189],
[16.68801996672213, 19493.670323892282],
]
).T
zodi_pars = np.polyfit(zodi_mag_level, np.log10(zodi_noise_level), 1)
read_mag_level, read_noise_level = np.array(
[
[8.476705490848586, 12.31474807751376],
[13.019134775374376, 522.4985702369348],
[17.841098169717142, 46226.777232915076],
]
).T
read_pars = np.polyfit(read_mag_level, np.log10(read_noise_level), 1)
c1, c2, c3, c4 = (
10 ** (tessmag * star_pars[0] + star_pars[1]),
10 ** (tessmag * zodi_pars[0] + zodi_pars[1]),
10 ** (tessmag * read_pars[0] + read_pars[1]),
sys,
)
return np.sqrt(
c1 ** 2 + (readmod * c2) ** 2 + (zodimod * c3) ** 2 + c4 ** 2
)
def rndm(a, b, g, size=1):
"""Power-law gen for pdf(x)\propto x^{g-1} for a<=x<=b"""
r = np.random.random(size=size)
ag, bg = a ** g, b ** g
return (ag + (bg - ag) * r) ** (1.0 / g)
def Fressin13_select_extrap(nselect=1):
# create a pot for dressing numbers (balls)
balls = np.array([])
# pot 1 contains rp=0.8-0.1.25, p=0.8-2
p1 = np.zeros(180) + 1
# pot 2 contains rp=1.25-2.0, p=0.8-2
p2 = np.zeros(170) + 2
# pot 3 contains rp=2-4, p=0.8-2
p3 = np.zeros(35) + 3
# pot 4 contains rp=4-6, p=0.8-2
p4 = np.zeros(4) + 4
# pot 5 contains rp=6-22, p=0.8-2
p5 = np.zeros(15) + 5
# pot 6 contains rp=0.8-0.1.25, p=2-3.4
p6 = np.zeros(610) + 6
# pot 7 contains rp=1.25-2.0, p=2-3.4
p7 = np.zeros(740) + 7
# pot 8 contains rp=2-4, p=2-3.4
p8 = np.zeros(180) + 8
# pot 9 contains rp=4-6, p=2-3.4
p9 = np.zeros(6) + 9
# pot 10 contains rp=6-22, p=2-3.4
p10 = np.zeros(67) + 10
# pot 11 contains rp=0.8-0.1.25, p=3.4-5.9
p11 = np.zeros(1720) + 11
# pot 12 contains rp=1.25-2.0, p=3.4-5.9
p12 = np.zeros(1490) + 12
# pot 13 contains rp=2-4, p=3.4-5.9
p13 = np.zeros(730) + 13
# pot 14 contains rp=4-6, p=3.4-5.9
p14 = np.zeros(110) + 14
# pot 15 contains rp=6-22, p=3.4-5.9
p15 = np.zeros(170) + 15
# pot 16 contains rp=0.8-0.1.25, p=5.9-10
p16 = np.zeros(2700) + 16
# pot 17 contains rp=1.25-2.0, p=5.9-10
p17 = np.zeros(2900) + 17
# pot 18 contains rp=2-4, p=5.9-10
p18 = np.zeros(1930) + 18
# pot 19 contains rp=4-6, p=5.9-10
p19 = np.zeros(91) + 19
# pot 20 contains rp=6-22, p=5.9-10
p20 = np.zeros(180) + 20
# pot 21 contains rp=0.8-0.1.25, p=10-17
p21 = np.zeros(2700) + 21
# pot 22 contains rp=1.25-2.0, p=10-17
p22 = np.zeros(4300) + 22
# pot 23 contains rp=2-4, p=10-17
p23 = np.zeros(3670) + 23
# pot 24 contains rp=4-6, p=10-17
p24 = np.zeros(290) + 24
# pot 25 contains rp=6-22, p=10-17
p25 = np.zeros(270) + 25
# pot 26 contains rp=0.8-0.1.25, p=17-29
p26 = np.zeros(2930) + 26
# pot 27 contains rp=1.25-2.0, p=17-29
p27 = np.zeros(4490) + 27
# pot 28 contains rp=2-4, p=17-29
p28 = np.zeros(5290) + 28
# pot 29 contains rp=4-6, p=17-29
p29 = np.zeros(320) + 29
# pot 30 contains rp=6-22, p=17-29
p30 = np.zeros(230) + 30
# pot 31 contains rp=0.8-0.1.25, p=29-50
p31 = np.zeros(4080) + 31
# pot 32 contains rp=1.25-2.0, p=29-50
p32 = np.zeros(5290) + 32
# pot 33 contains rp=2-4, p=29-50
p33 = np.zeros(6450) + 33
# pot 34 contains rp=4-6, p=29-50
p34 = np.zeros(490) + 34
# pot 35 contains rp=6-22, p=29-50
p35 = np.zeros(350) + 35
# pot 36 contains rp=0.8-0.1.25, p=50-85
p36 = np.zeros(3460) + 36
# pot 37 contains rp=1.25-2.0, p=50-85
p37 = np.zeros(3660) + 37
# pot 38 contains rp=2-4, p=50-85
p38 = np.zeros(5250) + 38
# pot 39 contains rp=4-6, p=50-85
p39 = np.zeros(660) + 39
# pot 40 contains rp=6-22, p=50-85
p40 = np.zeros(710) + 40
# pot 36 contains rp=0.8-0.1.25, p=50-85
p41 = np.zeros(3460) + 41
# pot 37 contains rp=1.25-2.0, p=50-85
p42 = np.zeros(3660) + 42
# pot 38 contains rp=2-4, p=50-85
p43 = np.zeros(5250) + 43
# pot 39 contains rp=4-6, p=50-85
p44 = np.zeros(660) + 44
# pot 40 contains rp=6-22, p=50-85
p45 = np.zeros(710) + 45
# pot 36 contains rp=0.8-0.1.25, p=50-85
p46 = np.zeros(3460) + 46
# pot 37 contains rp=1.25-2.0, p=50-85
p47 = np.zeros(3660) + 47
# pot 38 contains rp=2-4, p=50-85
p48 = np.zeros(5250) + 48
# pot 39 contains rp=4-6, p=50-85
p49 = np.zeros(660) + 49
# pot 40 contains rp=6-22, p=50-85
p50 = np.zeros(710) + 50
# pot 36 contains rp=0.8-0.1.25, p=50-85
p51 = np.zeros(3460) + 51
# pot 37 contains rp=1.25-2.0, p=50-85
p52 = np.zeros(3660) + 52
# pot 38 contains rp=2-4, p=50-85
p53 = np.zeros(5250) + 53
# pot 39 contains rp=4-6, p=50-85
p54 = np.zeros(660) + 54
# pot 40 contains rp=6-22, p=50-85
p55 = np.zeros(710) + 55
balls = np.r_[
balls,
p1,
p2,
p3,
p4,
p5,
p6,
p7,
p8,
p9,
p10,
p11,
p12,
p13,
p14,
p15,
p16,
p17,
p18,
p19,
p20,
p21,
p22,
p23,
p24,
p25,
p26,
p27,
p28,
p29,
p30,
p31,
p32,
p33,
p34,
p35,
p36,
p37,
p38,
p39,
p40,
p41,
p42,
p43,
p44,
p45,
p46,
p47,
p48,
p49,
p50,
p51,
p52,
p53,
p54,
p55,
]
# lookup for what the balls mean
# outputs radlow, radhigh, Plow, Phigh
ball_lookup = {
0: [0.0, 0.0, 0.0, 0.0],
1: [0.8, 1.25, 0.8, 2.0],
2: [1.25, 2.0, 0.8, 2.0],
3: [2.0, 4.0, 0.8, 2.0],
4: [4.0, 6.0, 0.8, 2.0],
5: [6.0, 22.0, 0.8, 2.0],
6: [0.8, 1.25, 2.0, 3.4],
7: [1.25, 2.0, 2.0, 3.4],
8: [2.0, 4.0, 2.0, 3.4],
9: [4.0, 6.0, 2.0, 3.4],
10: [6.0, 22.0, 2.0, 3.4],
11: [0.8, 1.25, 3.4, 5.9],
12: [1.25, 2.0, 3.4, 5.9],
13: [2.0, 4.0, 3.4, 5.9],
14: [4.0, 6.0, 3.4, 5.9],
15: [6.0, 22.0, 3.4, 5.9],
16: [0.8, 1.25, 5.9, 10.0],
17: [1.25, 2.0, 5.9, 10.0],
18: [2.0, 4.0, 5.9, 10.0],
19: [4.0, 6.0, 5.9, 10.0],
20: [6.0, 22.0, 5.9, 10.0],
21: [0.8, 1.25, 10.0, 17.0],
22: [1.25, 2.0, 10.0, 17.0],
23: [2.0, 4.0, 10.0, 17.0],
24: [4.0, 6.0, 10.0, 17.0],
25: [6.0, 22.0, 10.0, 17.0],
26: [0.8, 1.25, 17.0, 29.0],
27: [1.25, 2.0, 17.0, 29.0],
28: [2.0, 4.0, 17.0, 29.0],
29: [4.0, 6.0, 17.0, 29.0],
30: [6.0, 22.0, 17.0, 29.0],
31: [0.8, 1.25, 29.0, 50.0],
32: [1.25, 2.0, 29.0, 50.0],
33: [2.0, 4.0, 29.0, 50.0],
34: [4.0, 6.0, 29.0, 50.0],
35: [6.0, 22.0, 29.0, 50.0],
36: [0.8, 1.25, 50.0, 85.0],
37: [1.25, 2.0, 50.0, 85.0],
38: [2.0, 4.0, 50.0, 85.0],
39: [4.0, 6.0, 50.0, 85.0],
40: [6.0, 22.0, 50.0, 85.0],
41: [0.8, 1.25, 50.0, 150.0],
42: [1.25, 2.0, 50.0, 150.0],
43: [2.0, 4.0, 50.0, 150.0],
44: [4.0, 6.0, 50.0, 150.0],
45: [6.0, 22.0, 50.0, 150.0],
46: [0.8, 1.25, 150.0, 270.0],
47: [1.25, 2.0, 150.0, 270.0],
48: [2.0, 4.0, 150.0, 270.0],
49: [4.0, 6.0, 150.0, 270.0],
50: [6.0, 22.0, 150.0, 270.0],
51: [0.8, 1.25, 270.0, 480.0],
52: [1.25, 2.0, 270.0, 480.0],
53: [2.0, 4.0, 270.0, 480.0],
54: [4.0, 6.0, 270.0, 480.0],
55: [6.0, 22.0, 270.0, 480.0],
}
rsamps = random.choice(balls, size=nselect)
radius = np.zeros(nselect)
period = np.zeros(nselect)
for i, samp in enumerate(rsamps):
rl, rh, pl, ph = ball_lookup[samp]
if samp in [5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55]:
# check for giant planets
# if a giant planet than draw power law
radius[i] = rndm(6, 22, -1.7)
else:
radius[i] = random.uniform(low=rl, high=rh)
period[i] = random.uniform(low=pl, high=ph)
return radius, period
def Dressing15_select_extrap(nselect=1):
"""
period bins = 0.5, 0.91, 1.66, 3.02, 5.49, 10.0, 18.2, 33.1, 60.3, 110., 200.
"""
# create a pot for dressing numbers (balls)
balls = np.array([])
# pot 1 contains rp=0.5-1.0, p=0.5-0.91
p1 = np.zeros(400) + 1
# pot 2 contains rp=1.0-1.5, p=0.5-0.91
p2 = np.zeros(460) + 2
# pot 3 contains rp=1.5-2.0, p=0.5-0.91
p3 = np.zeros(61) + 3
# pot 4 contains rp=2.0-2.5, p=0.5-0.91
p4 = np.zeros(2) + 4
# pot 5 contains rp=2.5-3.0, p=0.5-0.91
p5 = np.zeros(0) + 5
# pot 6 contains rp=3.0-3.5, p=0.5-0.91
p6 = np.zeros(0) + 6
# pot 7 contains rp=3.5-4.0, p=0.5-0.91
p7 = np.zeros(0) + 7
# pot 1 contains rp=0.5-1.0, p=0.91, 1.66
p8 = np.zeros(1500) + 8
# pot 2 contains rp=1.0-1.5, p=0.91, 1.66
p9 = np.zeros(1400) + 9
# pot 3 contains rp=1.5-2.0, p=0.91, 1.66
p10 = np.zeros(270) + 10
# pot 4 contains rp=2.0-2.5, p=0.91, 1.66
p11 = np.zeros(9) + 11
# pot 5 contains rp=2.5-3.0, p=0.91, 1.66
p12 = np.zeros(4) + 12
# pot 6 contains rp=3.0-3.5, p=0.91, 1.66
p13 = np.zeros(6) + 13
# pot 7 contains rp=3.5-4.0, p=0.91, 1.66
p14 = np.zeros(8) + 14
# pot 1 contains rp=0.5-1.0, p=1.66, 3.02
p15 = np.zeros(4400) + 15
# pot 2 contains rp=1.0-1.5, p=1.66, 3.02
p16 = np.zeros(3500) + 16
# pot 3 contains rp=1.5-2.0, p=1.66, 3.02
p17 = np.zeros(1200) + 17
# pot 4 contains rp=2.0-2.5, p=1.66, 3.02
p18 = np.zeros(420) + 18
# pot 5 contains rp=2.5-3.0, p=1.66, 3.02
p19 = np.zeros(230) + 19
# pot 6 contains rp=3.0-3.5, p=1.66, 3.02
p20 = np.zeros(170) + 20
# pot 7 contains rp=3.5-4.0, p=1.66, 3.02
p21 = np.zeros(180) + 21
# pot 1 contains rp=0.5-1.0, p=3.02, 5.49
p22 = np.zeros(5500) + 22
# pot 2 contains rp=1.0-1.5, p=3.02, 5.49
p23 = np.zeros(5700) + 23
# pot 3 contains rp=1.5-2.0, p=3.02, 5.49
p24 = np.zeros(2500) + 24
# pot 4 contains rp=2.0-2.5, p=3.02, 5.49
p25 = np.zeros(1800) + 25
# pot 5 contains rp=2.5-3.0, p=3.02, 5.49
p26 = np.zeros(960) + 26
# pot 6 contains rp=3.0-3.5, p=3.02, 5.49
p27 = np.zeros(420) + 27
# pot 7 contains rp=3.5-4.0, p=3.02, 5.49
p28 = np.zeros(180) + 28
# pot 1 contains rp=0.5-1.0, p=5.49, 10.0
p29 = np.zeros(10000) + 29
# pot 2 contains rp=1.0-1.5, p=5.49, 10.0
p30 = np.zeros(10000) + 30
# pot 3 contains rp=1.5-2.0, p=5.49, 10.0
p31 = np.zeros(6700) + 31
# pot 4 contains rp=2.0-2.5, p=5.49, 10.0
p32 = np.zeros(6400) + 32
# pot 5 contains rp=2.5-3.0, p=5.49, 10.0
p33 = np.zeros(2700) + 33
# pot 6 contains rp=3.0-3.5, p=5.49, 10.0
p34 = np.zeros(1100) + 34
# pot 7 contains rp=3.5-4.0, p=5.49, 10.0
p35 = np.zeros(360) + 35
# pot 1 contains rp=0.5-1.0, p=10.0, 18.2
p36 = np.zeros(12000) + 36
# pot 2 contains rp=1.0-1.5, p=10.0, 18.2
p37 = np.zeros(13000) + 37
# pot 3 contains rp=1.5-2.0, p=10.0, 18.2
p38 = np.zeros(13000) + 38
# pot 4 contains rp=2.0-2.5, p=10.0, 18.2
p39 = np.zeros(9300) + 39
# pot 5 contains rp=2.5-3.0, p=10.0, 18.2
p40 = np.zeros(3800) + 40
# pot 6 contains rp=3.0-3.5, p=10.0, 18.2
p41 = np.zeros(1400) + 41
# pot 7 contains rp=3.5-4.0, p=10.0, 18.2
p42 = np.zeros(510) + 42
# pot 1 contains rp=0.5-1.0, p=18.2, 33.1
p43 = np.zeros(11000) + 43
# pot 2 contains rp=1.0-1.5, p=18.2, 33.1
p44 = np.zeros(16000) + 44
# pot 3 contains rp=1.5-2.0, p=18.2, 33.1
p45 = np.zeros(14000) + 45
# pot 4 contains rp=2.0-2.5, p=18.2, 33.1
p46 = np.zeros(10000) + 46
# pot 5 contains rp=2.5-3.0, p=18.2, 33.1
p47 = np.zeros(4600) + 47
# pot 6 contains rp=3.0-3.5, p=18.2, 33.1
p48 = np.zeros(810) + 48
# pot 7 contains rp=3.5-4.0, p=18.2, 33.1
p49 = np.zeros(320) + 49
# pot 1 contains rp=0.5-1.0, p=33.1, 60.3
p50 = np.zeros(6400) + 50
# pot 2 contains rp=1.0-1.5, p=33.1, 60.3
p51 = np.zeros(6400) + 51
# pot 3 contains rp=1.5-2.0, p=33.1, 60.3
p52 = np.zeros(12000) + 52
# pot 4 contains rp=2.0-2.5, p=33.1, 60.3
p53 = np.zeros(12000) + 53
# pot 5 contains rp=2.5-3.0, p=33.1, 60.3
p54 = np.zeros(5800) + 54
# pot 6 contains rp=3.0-3.5, p=33.1, 60.3
p55 = np.zeros(1600) + 55
# pot 7 contains rp=3.5-4.0, p=33.1, 60.3
p56 = np.zeros(210) + 56
# pot 1 contains rp=0.5-1.0, p=60.3, 110.
p57 = np.zeros(10000) + 57
# pot 2 contains rp=1.0-1.5, p=60.3, 110.
p58 = np.zeros(10000) + 58
# pot 3 contains rp=1.5-2.0, p=60.3, 110.
p59 = np.zeros(8300) + 59
# pot 4 contains rp=2.0-2.5, p=60.3, 110.
p60 = np.zeros(9600) + 60
# pot 5 contains rp=2.5-3.0, p=60.3, 110.
p61 = np.zeros(4200) + 61
# pot 6 contains rp=3.0-3.5, p=60.3, 110.
p62 = np.zeros(1700) + 62
# pot 7 contains rp=3.5-4.0, p=60.3, 110.
p63 = np.zeros(420) + 63
# pot 1 contains rp=0.5-1.0, p=110., 200.
p64 = np.zeros(19000) + 64
# pot 2 contains rp=1.0-1.5, p=110., 200.
p65 = np.zeros(19000) + 65
# pot 3 contains rp=1.5-2.0, p=110., 200.
p66 = np.zeros(10000) + 66
# pot 4 contains rp=2.0-2.5, p=110., 200.
p67 = np.zeros(4500) + 67
# pot 5 contains rp=2.5-3.0, p=110., 200.
p68 = np.zeros(1100) + 68
# pot 6 contains rp=3.0-3.5, p=110., 200.
p69 = np.zeros(160) + 69
# pot 7 contains rp=3.5-4.0, p=110., 200.
p70 = np.zeros(80) + 70
# pot 1 contains rp=0.5-1.0, p=110., 200.
p71 = np.zeros(19000) + 71
# pot 2 contains rp=1.0-1.5, p=110., 200.
p72 = np.zeros(19000) + 72
# pot 3 contains rp=1.5-2.0, p=110., 200.
p73 = np.zeros(10000) + 73
# pot 4 contains rp=2.0-2.5, p=110., 200.
p74 = np.zeros(4500) + 74
# pot 5 contains rp=2.5-3.0, p=110., 200.
p75 = np.zeros(1100) + 75
# pot 6 contains rp=3.0-3.5, p=110., 200.
p76 = np.zeros(160) + 76
# pot 7 contains rp=3.5-4.0, p=110., 200.
p77 = np.zeros(80) + 77
balls = np.r_[
balls,
p1,
p2,
p3,
p4,
p5,
p6,
p7,
p8,
p9,
p10,
p11,
p12,
p13,
p14,
p15,
p16,
p17,
p18,
p19,
p20,
p21,
p22,
p23,
p24,
p25,
p26,
p27,
p28,
p29,
p30,
p31,
p32,
p33,
p34,
p35,
p36,
p37,
p38,
p39,
p40,
p41,
p42,
p43,
p44,
p45,
p46,
p47,
p48,
p49,
p50,
p51,
p52,
p53,
p54,
p55,
p56,
p57,
p58,
p59,
p60,
p61,
p62,
p63,
p64,
p65,
p66,
p67,
p68,
p69,
p70,
p71,
p72,
p73,
p74,
p75,
p76,
p77,
]
# lookup for what the balls mean
# outputs radlow, radhigh, Plow, Phigh
# 0.5, 0.91, 1.66, 3.02, 5.49, 10.0, 18.2, 33.1, 60.3, 110., 200.
ball_lookup = {
1: [0.5, 1.0, 0.5, 0.91],
2: [1.0, 1.5, 0.5, 0.91],
3: [1.5, 2.0, 0.5, 0.91],
4: [2.0, 2.5, 0.5, 0.91],
5: [2.5, 3.0, 0.5, 0.91],
6: [3.0, 3.5, 0.5, 0.91],
7: [3.5, 4.0, 0.5, 0.91],
8: [0.5, 1.0, 0.91, 1.66],
9: [1.0, 1.5, 0.91, 1.66],
10: [1.5, 2.0, 0.91, 1.66],
11: [2.0, 2.5, 0.91, 1.66],
12: [2.5, 3.0, 0.91, 1.66],
13: [3.0, 3.5, 0.91, 1.66],
14: [3.5, 4.0, 0.91, 1.66],
15: [0.5, 1.0, 1.66, 3.02],
16: [1.0, 1.5, 1.66, 3.02],
17: [1.5, 2.0, 1.66, 3.02],
18: [2.0, 2.5, 1.66, 3.02],
19: [2.5, 3.0, 1.66, 3.02],
20: [3.0, 3.5, 1.66, 3.02],
21: [3.5, 4.0, 1.66, 3.02],
22: [0.5, 1.0, 3.02, 5.49],
23: [1.0, 1.5, 3.02, 5.49],
24: [1.5, 2.0, 3.02, 5.49],
25: [2.0, 2.5, 3.02, 5.49],
26: [2.5, 3.0, 3.02, 5.49],
27: [3.0, 3.5, 3.02, 5.49],
28: [3.5, 4.0, 3.02, 5.49],
29: [0.5, 1.0, 5.49, 10.0],
30: [1.0, 1.5, 5.49, 10.0],
31: [1.5, 2.0, 5.49, 10.0],
32: [2.0, 2.5, 5.49, 10.0],
33: [2.5, 3.0, 5.49, 10.0],
34: [3.0, 3.5, 5.49, 10.0],
35: [3.5, 4.0, 5.49, 10.0],
36: [0.5, 1.0, 10.0, 18.2],
37: [1.0, 1.5, 10.0, 18.2],
38: [1.5, 2.0, 10.0, 18.2],
39: [2.0, 2.5, 10.0, 18.2],
40: [2.5, 3.0, 10.0, 18.2],
41: [3.0, 3.5, 10.0, 18.2],
42: [3.5, 4.0, 10.0, 18.2],
43: [0.5, 1.0, 18.2, 33.1],
44: [1.0, 1.5, 18.2, 33.1],
45: [1.5, 2.0, 18.2, 33.1],
46: [2.0, 2.5, 18.2, 33.1],
47: [2.5, 3.0, 18.2, 33.1],
48: [3.0, 3.5, 18.2, 33.1],
49: [3.5, 4.0, 18.2, 33.1],
50: [0.5, 1.0, 33.1, 60.3],
51: [1.0, 1.5, 33.1, 60.3],
52: [1.5, 2.0, 33.1, 60.3],
53: [2.0, 2.5, 33.1, 60.3],
54: [2.5, 3.0, 33.1, 60.3],
55: [3.0, 3.5, 33.1, 60.3],
56: [3.5, 4.0, 33.1, 60.3],
57: [0.5, 1.0, 60.3, 110.0],
58: [1.0, 1.5, 60.3, 110.0],
59: [1.5, 2.0, 60.3, 110.0],
60: [2.0, 2.5, 60.3, 110.0],
61: [2.5, 3.0, 60.3, 110.0],
62: [3.0, 3.5, 60.3, 110.0],
63: [3.5, 4.0, 60.3, 110.0],
64: [0.5, 1.0, 110.0, 200.0],
65: [1.0, 1.5, 110.0, 200.0],
66: [1.5, 2.0, 110.0, 200.0],
67: [2.0, 2.5, 110.0, 200.0],
68: [2.5, 3.0, 110.0, 200.0],
69: [3.0, 3.5, 110.0, 200.0],
70: [3.5, 4.0, 110.0, 200.0],
71: [0.5, 1.0, 200.0, 365.0],
72: [1.0, 1.5, 200.0, 365.0],
73: [1.5, 2.0, 200.0, 365.0],
74: [2.0, 2.5, 200.0, 365.0],
75: [2.5, 3.0, 200.0, 365.0],
76: [3.0, 3.5, 200.0, 365.0],
77: [3.5, 4.0, 200.0, 365.0],
}
rsamps = random.choice(balls, size=nselect)
radius = np.zeros(nselect)
period = np.zeros(nselect)
for i, samp in enumerate(rsamps):
rl, rh, pl, ph = ball_lookup[samp]
radius[i] = random.uniform(low=rl, high=rh)
period[i] = random.uniform(low=pl, high=ph)
return radius, period
def Petigura18_select(nselect=1):
# create a pot for pedigura numbers (balls)
balls =
|
np.array([])
|
numpy.array
|
"""
Generate figures for the DeepCytometer paper for v7 of the pipeline.
Partly deprecated by klf14_b6ntac_exp_0110_paper_figures_v8.py:
* Some figures have been updated to have v8 of the pipeline in the paper.
Code cannibalised from:
* klf14_b6ntac_exp_0097_full_slide_pipeline_v7.py
"""
"""
This file is part of Cytometer
Copyright 2021 Medical Research Council
SPDX-License-Identifier: Apache-2.0
Author: <NAME> <<EMAIL>>
"""
# script name to identify this experiment
experiment_id = 'klf14_b6ntac_exp_0099_paper_figures'
# cross-platform home directory
from pathlib import Path
home = str(Path.home())
import os
import sys
sys.path.extend([os.path.join(home, 'Software/cytometer')])
# json_annotation_files_dict here needs to have the same files as in
# klf14_b6ntac_exp_0098_full_slide_size_analysis_v7.py
# SQWAT: list of annotation files
json_annotation_files_dict = {}
json_annotation_files_dict['sqwat'] = [
'KLF14-B6NTAC 36.1d PAT 99-16 C1 - 2016-02-11 11.48.31.json',
'KLF14-B6NTAC-MAT-16.2d 214-16 C1 - 2016-02-17 16.02.46.json',
'KLF14-B6NTAC-MAT-17.1a 44-16 C1 - 2016-02-01 11.14.17.json',
'KLF14-B6NTAC-MAT-17.1e 48-16 C1 - 2016-02-01 16.27.05.json',
'KLF14-B6NTAC-MAT-18.2a 57-16 C1 - 2016-02-03 09.10.17.json',
'KLF14-B6NTAC-PAT-37.3c 414-16 C1 - 2016-03-15 17.15.41.json',
'KLF14-B6NTAC-MAT-18.1d 53-16 C1 - 2016-02-02 14.32.03.json',
'KLF14-B6NTAC-MAT-17.2b 65-16 C1 - 2016-02-04 10.24.22.json',
'KLF14-B6NTAC-MAT-17.2g 69-16 C1 - 2016-02-04 16.15.05.json',
'KLF14-B6NTAC 37.1a PAT 106-16 C1 - 2016-02-12 16.21.00.json',
'KLF14-B6NTAC-36.1b PAT 97-16 C1 - 2016-02-10 17.38.06.json',
# 'KLF14-B6NTAC-PAT-37.2d 411-16 C1 - 2016-03-15 12.42.26.json',
'KLF14-B6NTAC-MAT-17.2a 64-16 C1 - 2016-02-04 09.17.52.json',
'KLF14-B6NTAC-MAT-16.2f 216-16 C1 - 2016-02-18 10.28.27.json',
'KLF14-B6NTAC-MAT-17.1d 47-16 C1 - 2016-02-01 15.25.53.json',
'KLF14-B6NTAC-MAT-16.2e 215-16 C1 - 2016-02-18 09.19.26.json',
'KLF14-B6NTAC 36.1g PAT 102-16 C1 - 2016-02-11 17.20.14.json',
'KLF14-B6NTAC-37.1g PAT 112-16 C1 - 2016-02-16 13.33.09.json',
'KLF14-B6NTAC-38.1e PAT 94-16 C1 - 2016-02-10 12.13.10.json',
'KLF14-B6NTAC-MAT-18.2d 60-16 C1 - 2016-02-03 13.13.57.json',
'KLF14-B6NTAC-MAT-18.2g 63-16 C1 - 2016-02-03 16.58.52.json',
'KLF14-B6NTAC-MAT-18.2f 62-16 C1 - 2016-02-03 15.46.15.json',
'KLF14-B6NTAC-MAT-18.1b 51-16 C1 - 2016-02-02 09.59.16.json',
'KLF14-B6NTAC-MAT-19.2c 220-16 C1 - 2016-02-18 17.03.38.json',
'KLF14-B6NTAC-MAT-18.1f 55-16 C1 - 2016-02-02 16.14.30.json',
'KLF14-B6NTAC-PAT-36.3b 412-16 C1 - 2016-03-15 14.37.55.json',
'KLF14-B6NTAC-MAT-16.2c 213-16 C1 - 2016-02-17 14.51.18.json',
'KLF14-B6NTAC-PAT-37.4a 417-16 C1 - 2016-03-16 15.55.32.json',
'KLF14-B6NTAC 36.1e PAT 100-16 C1 - 2016-02-11 14.06.56.json',
'KLF14-B6NTAC-MAT-18.1c 52-16 C1 - 2016-02-02 12.26.58.json',
'KLF14-B6NTAC-MAT-18.2b 58-16 C1 - 2016-02-03 11.10.52.json',
'KLF14-B6NTAC-36.1a PAT 96-16 C1 - 2016-02-10 16.12.38.json',
'KLF14-B6NTAC-PAT-39.2d 454-16 C1 - 2016-03-17 14.33.38.json',
'KLF14-B6NTAC 36.1c PAT 98-16 C1 - 2016-02-11 10.45.00.json',
'KLF14-B6NTAC-MAT-18.2e 61-16 C1 - 2016-02-03 14.19.35.json',
'KLF14-B6NTAC-MAT-19.2g 222-16 C1 - 2016-02-25 15.13.00.json',
'KLF14-B6NTAC-PAT-37.2a 406-16 C1 - 2016-03-14 12.01.56.json',
'KLF14-B6NTAC 36.1j PAT 105-16 C1 - 2016-02-12 14.33.33.json',
'KLF14-B6NTAC-37.1b PAT 107-16 C1 - 2016-02-15 11.43.31.json',
'KLF14-B6NTAC-MAT-17.1c 46-16 C1 - 2016-02-01 14.02.04.json',
'KLF14-B6NTAC-MAT-19.2f 217-16 C1 - 2016-02-18 11.48.16.json',
'KLF14-B6NTAC-MAT-17.2d 67-16 C1 - 2016-02-04 12.34.32.json',
'KLF14-B6NTAC-MAT-18.3c 218-16 C1 - 2016-02-18 13.12.09.json',
'KLF14-B6NTAC-PAT-37.3a 413-16 C1 - 2016-03-15 15.54.12.json',
'KLF14-B6NTAC-MAT-19.1a 56-16 C1 - 2016-02-02 17.23.31.json',
'KLF14-B6NTAC-37.1h PAT 113-16 C1 - 2016-02-16 15.14.09.json',
'KLF14-B6NTAC-MAT-18.3d 224-16 C1 - 2016-02-26 11.13.53.json',
'KLF14-B6NTAC-PAT-37.2g 415-16 C1 - 2016-03-16 11.47.52.json',
'KLF14-B6NTAC-37.1e PAT 110-16 C1 - 2016-02-15 17.33.11.json',
'KLF14-B6NTAC-MAT-17.2f 68-16 C1 - 2016-02-04 15.05.54.json',
'KLF14-B6NTAC 36.1h PAT 103-16 C1 - 2016-02-12 10.15.22.json',
# 'KLF14-B6NTAC-PAT-39.1h 453-16 C1 - 2016-03-17 11.38.04.json',
'KLF14-B6NTAC-MAT-16.2b 212-16 C1 - 2016-02-17 12.49.00.json',
'KLF14-B6NTAC-MAT-17.1f 49-16 C1 - 2016-02-01 17.51.46.json',
'KLF14-B6NTAC-PAT-36.3d 416-16 C1 - 2016-03-16 14.44.11.json',
'KLF14-B6NTAC-MAT-16.2a 211-16 C1 - 2016-02-17 11.46.42.json',
'KLF14-B6NTAC-38.1f PAT 95-16 C1 - 2016-02-10 14.41.44.json',
'KLF14-B6NTAC-PAT-36.3a 409-16 C1 - 2016-03-15 10.18.46.json',
'KLF14-B6NTAC-MAT-19.2b 219-16 C1 - 2016-02-18 15.41.38.json',
'KLF14-B6NTAC-MAT-17.1b 45-16 C1 - 2016-02-01 12.23.50.json',
'KLF14-B6NTAC 36.1f PAT 101-16 C1 - 2016-02-11 15.23.06.json',
'KLF14-B6NTAC-MAT-18.1e 54-16 C1 - 2016-02-02 15.26.33.json',
'KLF14-B6NTAC-37.1d PAT 109-16 C1 - 2016-02-15 15.19.08.json',
'KLF14-B6NTAC-MAT-18.2c 59-16 C1 - 2016-02-03 11.56.52.json',
'KLF14-B6NTAC-PAT-37.2f 405-16 C1 - 2016-03-14 10.58.34.json',
'KLF14-B6NTAC-PAT-37.2e 408-16 C1 - 2016-03-14 16.23.30.json',
'KLF14-B6NTAC-MAT-19.2e 221-16 C1 - 2016-02-25 14.00.14.json',
# 'KLF14-B6NTAC-PAT-37.2c 407-16 C1 - 2016-03-14 14.13.54.json',
# 'KLF14-B6NTAC-PAT-37.2b 410-16 C1 - 2016-03-15 11.24.20.json',
'KLF14-B6NTAC-PAT-37.4b 419-16 C1 - 2016-03-17 10.22.54.json',
'KLF14-B6NTAC-37.1c PAT 108-16 C1 - 2016-02-15 14.49.45.json',
'KLF14-B6NTAC-MAT-18.1a 50-16 C1 - 2016-02-02 09.12.41.json',
'KLF14-B6NTAC 36.1i PAT 104-16 C1 - 2016-02-12 12.14.38.json',
'KLF14-B6NTAC-PAT-37.2h 418-16 C1 - 2016-03-16 17.01.17.json',
'KLF14-B6NTAC-MAT-17.2c 66-16 C1 - 2016-02-04 11.46.39.json',
'KLF14-B6NTAC-MAT-18.3b 223-16 C2 - 2016-02-26 10.35.52.json',
'KLF14-B6NTAC-37.1f PAT 111-16 C2 - 2016-02-16 11.26 (1).json',
'KLF14-B6NTAC-PAT 37.2b 410-16 C4 - 2020-02-14 10.27.23.json',
'KLF14-B6NTAC-PAT 37.2c 407-16 C4 - 2020-02-14 10.15.57.json',
# 'KLF14-B6NTAC-PAT 37.2d 411-16 C4 - 2020-02-14 10.34.10.json'
]
# GWAT: list of annotation files
json_annotation_files_dict['gwat'] = [
'KLF14-B6NTAC-36.1a PAT 96-16 B1 - 2016-02-10 15.32.31.json',
'KLF14-B6NTAC-36.1b PAT 97-16 B1 - 2016-02-10 17.15.16.json',
'KLF14-B6NTAC-36.1c PAT 98-16 B1 - 2016-02-10 18.32.40.json',
'KLF14-B6NTAC 36.1d PAT 99-16 B1 - 2016-02-11 11.29.55.json',
'KLF14-B6NTAC 36.1e PAT 100-16 B1 - 2016-02-11 12.51.11.json',
'KLF14-B6NTAC 36.1f PAT 101-16 B1 - 2016-02-11 14.57.03.json',
'KLF14-B6NTAC 36.1g PAT 102-16 B1 - 2016-02-11 16.12.01.json',
'KLF14-B6NTAC 36.1h PAT 103-16 B1 - 2016-02-12 09.51.08.json',
# 'KLF14-B6NTAC 36.1i PAT 104-16 B1 - 2016-02-12 11.37.56.json',
'KLF14-B6NTAC 36.1j PAT 105-16 B1 - 2016-02-12 14.08.19.json',
'KLF14-B6NTAC 37.1a PAT 106-16 B1 - 2016-02-12 15.33.02.json',
'KLF14-B6NTAC-37.1b PAT 107-16 B1 - 2016-02-15 11.25.20.json',
'KLF14-B6NTAC-37.1c PAT 108-16 B1 - 2016-02-15 12.33.10.json',
'KLF14-B6NTAC-37.1d PAT 109-16 B1 - 2016-02-15 15.03.44.json',
'KLF14-B6NTAC-37.1e PAT 110-16 B1 - 2016-02-15 16.16.06.json',
'KLF14-B6NTAC-37.1g PAT 112-16 B1 - 2016-02-16 12.02.07.json',
'KLF14-B6NTAC-37.1h PAT 113-16 B1 - 2016-02-16 14.53.02.json',
'KLF14-B6NTAC-38.1e PAT 94-16 B1 - 2016-02-10 11.35.53.json',
'KLF14-B6NTAC-38.1f PAT 95-16 B1 - 2016-02-10 14.16.55.json',
'KLF14-B6NTAC-MAT-16.2a 211-16 B1 - 2016-02-17 11.21.54.json',
'KLF14-B6NTAC-MAT-16.2b 212-16 B1 - 2016-02-17 12.33.18.json',
'KLF14-B6NTAC-MAT-16.2c 213-16 B1 - 2016-02-17 14.01.06.json',
'KLF14-B6NTAC-MAT-16.2d 214-16 B1 - 2016-02-17 15.43.57.json',
'KLF14-B6NTAC-MAT-16.2e 215-16 B1 - 2016-02-17 17.14.16.json',
'KLF14-B6NTAC-MAT-16.2f 216-16 B1 - 2016-02-18 10.05.52.json',
# 'KLF14-B6NTAC-MAT-17.1a 44-16 B1 - 2016-02-01 09.19.20.json',
'KLF14-B6NTAC-MAT-17.1b 45-16 B1 - 2016-02-01 12.05.15.json',
'KLF14-B6NTAC-MAT-17.1c 46-16 B1 - 2016-02-01 13.01.30.json',
'KLF14-B6NTAC-MAT-17.1d 47-16 B1 - 2016-02-01 15.11.42.json',
'KLF14-B6NTAC-MAT-17.1e 48-16 B1 - 2016-02-01 16.01.09.json',
'KLF14-B6NTAC-MAT-17.1f 49-16 B1 - 2016-02-01 17.12.31.json',
'KLF14-B6NTAC-MAT-17.2a 64-16 B1 - 2016-02-04 08.57.34.json',
'KLF14-B6NTAC-MAT-17.2b 65-16 B1 - 2016-02-04 10.06.00.json',
'KLF14-B6NTAC-MAT-17.2c 66-16 B1 - 2016-02-04 11.14.28.json',
'KLF14-B6NTAC-MAT-17.2d 67-16 B1 - 2016-02-04 12.20.20.json',
'KLF14-B6NTAC-MAT-17.2f 68-16 B1 - 2016-02-04 14.01.40.json',
'KLF14-B6NTAC-MAT-17.2g 69-16 B1 - 2016-02-04 15.52.52.json',
'KLF14-B6NTAC-MAT-18.1a 50-16 B1 - 2016-02-02 08.49.06.json',
'KLF14-B6NTAC-MAT-18.1b 51-16 B1 - 2016-02-02 09.46.31.json',
'KLF14-B6NTAC-MAT-18.1c 52-16 B1 - 2016-02-02 11.24.31.json',
'KLF14-B6NTAC-MAT-18.1d 53-16 B1 - 2016-02-02 14.11.37.json',
# 'KLF14-B6NTAC-MAT-18.1e 54-16 B1 - 2016-02-02 15.06.05.json',
'KLF14-B6NTAC-MAT-18.2a 57-16 B1 - 2016-02-03 08.54.27.json',
'KLF14-B6NTAC-MAT-18.2b 58-16 B1 - 2016-02-03 09.58.06.json',
'KLF14-B6NTAC-MAT-18.2c 59-16 B1 - 2016-02-03 11.41.32.json',
'KLF14-B6NTAC-MAT-18.2d 60-16 B1 - 2016-02-03 12.56.49.json',
'KLF14-B6NTAC-MAT-18.2e 61-16 B1 - 2016-02-03 14.02.25.json',
'KLF14-B6NTAC-MAT-18.2f 62-16 B1 - 2016-02-03 15.00.17.json',
'KLF14-B6NTAC-MAT-18.2g 63-16 B1 - 2016-02-03 16.40.37.json',
'KLF14-B6NTAC-MAT-18.3b 223-16 B1 - 2016-02-25 16.53.42.json',
'KLF14-B6NTAC-MAT-18.3c 218-16 B1 - 2016-02-18 12.51.46.json',
'KLF14-B6NTAC-MAT-18.3d 224-16 B1 - 2016-02-26 10.48.56.json',
'KLF14-B6NTAC-MAT-19.1a 56-16 B1 - 2016-02-02 16.57.46.json',
'KLF14-B6NTAC-MAT-19.2b 219-16 B1 - 2016-02-18 14.21.50.json',
'KLF14-B6NTAC-MAT-19.2c 220-16 B1 - 2016-02-18 16.40.48.json',
'KLF14-B6NTAC-MAT-19.2e 221-16 B1 - 2016-02-25 13.15.27.json',
'KLF14-B6NTAC-MAT-19.2f 217-16 B1 - 2016-02-18 11.23.22.json',
'KLF14-B6NTAC-MAT-19.2g 222-16 B1 - 2016-02-25 14.51.57.json',
'KLF14-B6NTAC-PAT-36.3a 409-16 B1 - 2016-03-15 09.24.54.json',
'KLF14-B6NTAC-PAT-36.3b 412-16 B1 - 2016-03-15 14.11.47.json',
'KLF14-B6NTAC-PAT-36.3d 416-16 B1 - 2016-03-16 14.22.04.json',
# 'KLF14-B6NTAC-PAT-37.2a 406-16 B1 - 2016-03-14 11.46.47.json',
'KLF14-B6NTAC-PAT-37.2b 410-16 B1 - 2016-03-15 11.12.01.json',
'KLF14-B6NTAC-PAT-37.2c 407-16 B1 - 2016-03-14 12.54.55.json',
'KLF14-B6NTAC-PAT-37.2d 411-16 B1 - 2016-03-15 12.01.13.json',
'KLF14-B6NTAC-PAT-37.2e 408-16 B1 - 2016-03-14 16.06.43.json',
'KLF14-B6NTAC-PAT-37.2f 405-16 B1 - 2016-03-14 09.49.45.json',
'KLF14-B6NTAC-PAT-37.2g 415-16 B1 - 2016-03-16 11.04.45.json',
'KLF14-B6NTAC-PAT-37.2h 418-16 B1 - 2016-03-16 16.42.16.json',
'KLF14-B6NTAC-PAT-37.3a 413-16 B1 - 2016-03-15 15.31.26.json',
'KLF14-B6NTAC-PAT-37.3c 414-16 B1 - 2016-03-15 16.49.22.json',
'KLF14-B6NTAC-PAT-37.4a 417-16 B1 - 2016-03-16 15.25.38.json',
'KLF14-B6NTAC-PAT-37.4b 419-16 B1 - 2016-03-17 09.10.42.json',
'KLF14-B6NTAC-PAT-38.1a 90-16 B1 - 2016-02-04 17.27.42.json',
'KLF14-B6NTAC-PAT-39.1h 453-16 B1 - 2016-03-17 11.15.50.json',
'KLF14-B6NTAC-PAT-39.2d 454-16 B1 - 2016-03-17 12.16.06.json'
]
########################################################################################################################
## Explore training/test data of different folds
########################################################################################################################
import pickle
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
os.environ['KERAS_BACKEND'] = 'tensorflow'
import keras.backend as K
import cytometer
import cytometer.data
import tensorflow as tf
# limit number of GPUs
os.environ['CUDA_VISIBLE_DEVICES'] = '1'
LIMIT_GPU_MEMORY = False
# limit GPU memory used
if LIMIT_GPU_MEMORY:
from keras.backend.tensorflow_backend import set_session
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.9
set_session(tf.Session(config=config))
# specify data format as (n, row, col, channel)
K.set_image_data_format('channels_last')
DEBUG = False
'''Directories and filenames'''
# data paths
klf14_root_data_dir = os.path.join(home, 'Data/cytometer_data/klf14')
klf14_training_dir = os.path.join(klf14_root_data_dir, 'klf14_b6ntac_training')
klf14_training_data_dir = os.path.join(klf14_root_data_dir, 'klf14_b6ntac_training')
klf14_training_non_overlap_data_dir = os.path.join(klf14_root_data_dir, 'klf14_b6ntac_training_non_overlap')
figures_dir = os.path.join(home, 'GoogleDrive/Research/20190727_cytometer_paper/figures')
saved_models_dir = os.path.join(klf14_root_data_dir, 'saved_models')
ndpi_dir = os.path.join(home, 'scan_srv2_cox/Maz Yon')
metainfo_dir = os.path.join(home, 'Data/cytometer_data/klf14')
saved_kfolds_filename = 'klf14_b6ntac_exp_0079_generate_kfolds.pickle'
saved_extra_kfolds_filename = 'klf14_b6ntac_exp_0094_generate_extra_training_images.pickle'
# original dataset used in pipelines up to v6 + extra "other" tissue images
kfold_filename = os.path.join(saved_models_dir, saved_extra_kfolds_filename)
with open(kfold_filename, 'rb') as f:
aux = pickle.load(f)
file_svg_list = aux['file_list']
idx_test_all = aux['idx_test']
idx_train_all = aux['idx_train']
# correct home directory
file_svg_list = [x.replace('/users/rittscher/rcasero', home) for x in file_svg_list]
file_svg_list = [x.replace('/home/rcasero', home) for x in file_svg_list]
# number of images
n_im = len(file_svg_list)
# CSV file with metainformation of all mice
metainfo_csv_file = os.path.join(metainfo_dir, 'klf14_b6ntac_meta_info.csv')
metainfo = pd.read_csv(metainfo_csv_file)
# HACK: If file_svg_list_extra is used above, this block will not work but you don't need it
# for the loop before that calculates the rows of Table MICE with the breakdown of
# cells/other/background objects by mouse
#
# loop the folds to get the ndpi files that correspond to testing of each fold,
ndpi_files_test_list = {}
for i_fold in range(len(idx_test_all)):
# list of .svg files for testing
file_svg_test = np.array(file_svg_list)[idx_test_all[i_fold]]
# list of .ndpi files that the .svg windows came from
file_ndpi_test = [os.path.basename(x).replace('.svg', '') for x in file_svg_test]
file_ndpi_test = np.unique([x.split('_row')[0] for x in file_ndpi_test])
# add to the dictionary {file: fold}
for file in file_ndpi_test:
ndpi_files_test_list[file] = i_fold
if DEBUG:
# list of NDPI files
for key in ndpi_files_test_list.keys():
print(key)
# init dataframe to aggregate training numbers of each mouse
table = pd.DataFrame(columns=['Cells', 'Other', 'Background', 'Windows', 'Windows with cells'])
# loop files with hand traced contours
for i, file_svg in enumerate(file_svg_list):
print('file ' + str(i) + '/' + str(len(file_svg_list) - 1) + ': ' + os.path.basename(file_svg))
# read the ground truth cell contours in the SVG file. This produces a list [contour_0, ..., contour_N-1]
# where each contour_i = [(X_0, Y_0), ..., (X_P-1, X_P-1)]
cell_contours = cytometer.data.read_paths_from_svg_file(file_svg, tag='Cell', add_offset_from_filename=False,
minimum_npoints=3)
other_contours = cytometer.data.read_paths_from_svg_file(file_svg, tag='Other', add_offset_from_filename=False,
minimum_npoints=3)
brown_contours = cytometer.data.read_paths_from_svg_file(file_svg, tag='Brown', add_offset_from_filename=False,
minimum_npoints=3)
background_contours = cytometer.data.read_paths_from_svg_file(file_svg, tag='Background', add_offset_from_filename=False,
minimum_npoints=3)
contours = cell_contours + other_contours + brown_contours + background_contours
# make a list with the type of cell each contour is classified as
contour_type = [np.zeros(shape=(len(cell_contours),), dtype=np.uint8), # 0: white-adipocyte
np.ones(shape=(len(other_contours),), dtype=np.uint8), # 1: other types of tissue
np.ones(shape=(len(brown_contours),), dtype=np.uint8), # 1: brown cells (treated as "other" tissue)
np.zeros(shape=(len(background_contours),), dtype=np.uint8)] # 0: background
contour_type = np.concatenate(contour_type)
print('Cells: ' + str(len(cell_contours)) + '. Other: ' + str(len(other_contours))
+ '. Brown: ' + str(len(brown_contours)) + '. Background: ' + str(len(background_contours)))
# create dataframe for this image
df_common = cytometer.data.tag_values_with_mouse_info(metainfo=metainfo, s=os.path.basename(file_svg),
values=[i,], values_tag='i',
tags_to_keep=['id', 'ko_parent', 'sex'])
# mouse ID as a string
id = df_common['id'].values[0]
sex = df_common['sex'].values[0]
ko = df_common['ko_parent'].values[0]
# row to add to the table
df = pd.DataFrame(
[(sex, ko,
len(cell_contours), len(other_contours) + len(brown_contours), len(background_contours), 1, int(len(cell_contours)>0))],
columns=['Sex', 'Genotype', 'Cells', 'Other', 'Background', 'Windows', 'Windows with cells'], index=[id])
if id in table.index:
num_cols = ['Cells', 'Other', 'Background', 'Windows', 'Windows with cells']
table.loc[id, num_cols] = (table.loc[id, num_cols] + df.loc[id, num_cols])
else:
table = table.append(df, sort=False, ignore_index=False, verify_integrity=True)
# alphabetical order by mouse IDs
table = table.sort_index()
# total number of sampled windows
print('Total number of windows = ' + str(np.sum(table['Windows'])))
print('Total number of windows with cells = ' + str(np.sum(table['Windows with cells'])))
# total number of "Other" and background areas
print('Total number of Other areas = ' + str(np.sum(table['Other'])))
print('Total number of Background areas = ' + str(np.sum(table['Background'])))
# aggregate by sex and genotype
idx_f = table['Sex'] == 'f'
idx_m = table['Sex'] == 'm'
idx_pat = table['Genotype'] == 'PAT'
idx_mat = table['Genotype'] == 'MAT'
print('f PAT: ' + str(np.sum(table.loc[idx_f * idx_pat, 'Cells'])))
print('f MAT: ' + str(np.sum(table.loc[idx_f * idx_mat, 'Cells'])))
print('m PAT: ' + str(np.sum(table.loc[idx_m * idx_pat, 'Cells'])))
print('m MAT: ' + str(np.sum(table.loc[idx_m * idx_mat, 'Cells'])))
# find folds that test images belong to
for i_file, ndpi_file_kernel in enumerate(ndpi_files_test_list):
# fold where the current .ndpi image was not used for training
i_fold = ndpi_files_test_list[ndpi_file_kernel]
print('File ' + str(i_file) + '/' + str(len(ndpi_files_test_list) - 1) + ': ' + ndpi_file_kernel
+ '. Fold = ' + str(i_fold))
# mean and std of mouse weight
weight_f_mat = [22.07, 26.39, 30.65, 24.28, 27.72]
weight_f_pat = [31.42, 29.25, 27.18, 23.69, 21.20]
weight_m_mat = [46.19, 40.87, 40.02, 41.98, 34.52, 36.08]
weight_m_pat = [36.55, 40.77, 36.98, 36.11]
print('f MAT: mean = ' + str(np.mean(weight_f_mat)) + ', std = ' + str(np.std(weight_f_mat)))
print('f PAT: mean = ' + str(np.mean(weight_f_pat)) + ', std = ' + str(np.std(weight_f_pat)))
print('m MAT: mean = ' + str(np.mean(weight_m_mat)) + ', std = ' + str(np.std(weight_m_mat)))
print('m PAT: mean = ' + str(np.mean(weight_m_pat)) + ', std = ' + str(np.std(weight_m_pat)))
########################################################################################################################
## Statistics of hand traced white adipocytes
########################################################################################################################
import shapely
import cytometer.utils
import scipy
rectangle_sides_ratios = []
areas = []
perimeters = []
sphericities = []
# loop files with hand traced contours
for i, file_svg in enumerate(file_svg_list):
print('file ' + str(i) + '/' + str(len(file_svg_list) - 1) + ': ' + os.path.basename(file_svg))
# read the ground truth cell contours in the SVG file. This produces a list [contour_0, ..., contour_N-1]
# where each contour_i = [(X_0, Y_0), ..., (X_P-1, X_P-1)]
cell_contours = cytometer.data.read_paths_from_svg_file(file_svg, tag='Cell', add_offset_from_filename=False,
minimum_npoints=3)
# compute contour properties
for j, cell_contour in enumerate(cell_contours):
poly_cell = shapely.geometry.Polygon(cell_contour)
x, y = poly_cell.minimum_rotated_rectangle.exterior.coords.xy
edge_length = (shapely.geometry.Point(x[0], y[0]).distance(shapely.geometry.Point(x[1], y[1])),
shapely.geometry.Point(x[1], y[1]).distance(shapely.geometry.Point(x[2], y[2])))
rectangle_sides_ratio = np.max(edge_length) / np.min(edge_length)
area = poly_cell.area
perimeter = poly_cell.length
inv_compactness = poly_cell.length ** 2 / (4 * np.pi * area)
sphericity = cytometer.utils.sphericity(poly_cell)
# if j == 58:
# raise ValueError('foo: ' + str(j))
# if (inv_compactness) < 2.2 and (inv_compactness) > 2.15:
# raise ValueError('foo: ' + str(j))
rectangle_sides_ratios.append(rectangle_sides_ratio)
areas.append(area)
perimeters.append(perimeter)
sphericities.append(sphericity)
# compactness measure
inv_compactnesses = list(np.array(perimeters)**2 / (4 * np.pi * np.array(areas)))
print('Max rectangle sides ratio: ' + str(np.max(rectangle_sides_ratios)))
print('Min area: ' + str(np.min(areas)))
print('Max area: ' + str(np.max(areas)))
print('Min perimeter: ' + str(np.min(perimeters)))
print('Max perimeter: ' + str(np.max(perimeters)))
print('Min sphericity: ' + str(np.min(sphericities)))
print('Max sphericity: ' + str(np.max(sphericities)))
print('Min inv_compactness: ' + str(np.min(inv_compactnesses)))
print('Max inv_compactness: ' + str(np.max(inv_compactnesses)))
if DEBUG:
plt.clf()
plt.hist(rectangle_sides_ratios, bins=100)
plt.clf()
plt.boxplot(rectangle_sides_ratios)
plt.ylabel('Rectangle sides ratio')
q = scipy.stats.mstats.hdquantiles(rectangle_sides_ratios, prob=[0.98], axis=0)
plt.plot([0.75, 1.25], [q, q], 'r', 'LineWidth', 2)
plt.xlim(0.5, 1.5)
plt.legend(['$Q_{98\%}$ = ' + '%.2f' %q])
plt.clf()
plt.boxplot(areas)
plt.ylabel('Pixel$^2$')
q = scipy.stats.mstats.hdquantiles(areas, prob=[0.98], axis=0)
plt.plot([0.75, 1.25], [q, q], 'r', 'LineWidth', 2)
plt.xlim(0.5, 1.5)
plt.legend(['$Q_{98\%}$ = ' + '%.0f' %q])
plt.clf()
plt.boxplot(inv_compactnesses)
plt.ylabel('Compatncess$^{-1}$ ratio')
q = scipy.stats.mstats.hdquantiles(inv_compactnesses, prob=[0.98], axis=0)
plt.plot([0.75, 1.25], [q, q], 'r', 'LineWidth', 2)
plt.xlim(0.5, 1.5)
plt.legend(['$Q_{98\%}$ = ' + '%.2f' %q])
plt.clf()
plt.boxplot(sphericities)
plt.ylabel('Sphericity')
q = scipy.stats.mstats.hdquantiles(sphericities, prob=[0.02], axis=0)
plt.plot([0.75, 1.25], [q, q], 'r', 'LineWidth', 2)
plt.xlim(0.5, 1.5)
plt.legend(['$Q_{2\%}$ = ' + '%.2f' %q])
########################################################################################################################
## Plots of get_next_roi_to_process(): Adaptive Block Algorithm
#
# Note: the quantitative comparison versus uniform tiling is provided in klf14_b6ntac_exp_0105_adaptive_blocks_analysis.py
########################################################################################################################
import pickle
import cytometer.utils
# Filter out INFO & WARNING messages
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# # limit number of GPUs
# os.environ['CUDA_VISIBLE_DEVICES'] = '0,1'
os.environ['KERAS_BACKEND'] = 'tensorflow'
import time
import openslide
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
from scipy.signal import fftconvolve
from cytometer.utils import rough_foreground_mask
import PIL
from keras import backend as K
LIMIT_GPU_MEMORY = False
# limit GPU memory used
if LIMIT_GPU_MEMORY:
import tensorflow as tf
from keras.backend.tensorflow_backend import set_session
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.95
set_session(tf.Session(config=config))
DEBUG = False
SAVE_FIGS = False
root_data_dir = os.path.join(home, 'Data/cytometer_data/klf14')
data_dir = os.path.join(home, 'scan_srv2_cox/Maz Yon')
training_dir = os.path.join(home, root_data_dir, 'klf14_b6ntac_training')
figures_dir = os.path.join(home, 'GoogleDrive/Research/20190727_cytometer_paper/figures')
saved_models_dir = os.path.join(root_data_dir, 'saved_models')
results_dir = os.path.join(root_data_dir, 'klf14_b6ntac_results')
annotations_dir = os.path.join(home, 'Data/cytometer_data/aida_data_Klf14_v7/annotations')
# k-folds file
saved_extra_kfolds_filename = 'klf14_b6ntac_exp_0094_generate_extra_training_images.pickle'
# model names
dmap_model_basename = 'klf14_b6ntac_exp_0086_cnn_dmap'
contour_model_basename = 'klf14_b6ntac_exp_0091_cnn_contour_after_dmap'
classifier_model_basename = 'klf14_b6ntac_exp_0095_cnn_tissue_classifier_fcn'
correction_model_basename = 'klf14_b6ntac_exp_0089_cnn_segmentation_correction_overlapping_scaled_contours'
# full resolution image window and network expected receptive field parameters
fullres_box_size = np.array([2751, 2751])
receptive_field = np.array([131, 131])
# rough_foreground_mask() parameters
downsample_factor = 8.0
dilation_size = 25
component_size_threshold = 1e6
hole_size_treshold = 8000
# contour parameters
contour_downsample_factor = 0.1
bspline_k = 1
# block_split() parameters in downsampled image
block_len = np.ceil((fullres_box_size - receptive_field) / downsample_factor)
block_overlap = np.ceil((receptive_field - 1) / 2 / downsample_factor).astype(np.int)
# segmentation parameters
min_cell_area = 1500
max_cell_area = 100e3
min_mask_overlap = 0.8
phagocytosis = True
min_class_prop = 0.5
correction_window_len = 401
correction_smoothing = 11
batch_size = 16
# segmentation correction parameters
# load list of images, and indices for training vs. testing indices
saved_kfolds_filename = os.path.join(saved_models_dir, saved_extra_kfolds_filename)
with open(saved_kfolds_filename, 'rb') as f:
aux = pickle.load(f)
file_svg_list = aux['file_list']
idx_test_all = aux['idx_test']
idx_train_all = aux['idx_train']
# correct home directory
file_svg_list = [x.replace('/users/rittscher/rcasero', home) for x in file_svg_list]
file_svg_list = [x.replace('/home/rcasero', home) for x in file_svg_list]
# loop the folds to get the ndpi files that correspond to testing of each fold
ndpi_files_test_list = {}
for i_fold in range(len(idx_test_all)):
# list of .svg files for testing
file_svg_test = np.array(file_svg_list)[idx_test_all[i_fold]]
# list of .ndpi files that the .svg windows came from
file_ndpi_test = [os.path.basename(x).replace('.svg', '') for x in file_svg_test]
file_ndpi_test = np.unique([x.split('_row')[0] for x in file_ndpi_test])
# add to the dictionary {file: fold}
for file in file_ndpi_test:
ndpi_files_test_list[file] = i_fold
# File 4/19: KLF14-B6NTAC-MAT-17.1c 46-16 C1 - 2016-02-01 14.02.04. Fold = 2
i_file = 4
# File 10/19: KLF14-B6NTAC-36.1a PAT 96-16 C1 - 2016-02-10 16.12.38. Fold = 5
i_file = 10
ndpi_file_kernel = list(ndpi_files_test_list.keys())[i_file]
# for i_file, ndpi_file_kernel in enumerate(ndpi_files_test_list):
# fold where the current .ndpi image was not used for training
i_fold = ndpi_files_test_list[ndpi_file_kernel]
print('File ' + str(i_file) + '/' + str(len(ndpi_files_test_list) - 1) + ': ' + ndpi_file_kernel
+ '. Fold = ' + str(i_fold))
# make full path to ndpi file
ndpi_file = os.path.join(data_dir, ndpi_file_kernel + '.ndpi')
contour_model_file = os.path.join(saved_models_dir, contour_model_basename + '_model_fold_' + str(i_fold) + '.h5')
dmap_model_file = os.path.join(saved_models_dir, dmap_model_basename + '_model_fold_' + str(i_fold) + '.h5')
classifier_model_file = os.path.join(saved_models_dir,
classifier_model_basename + '_model_fold_' + str(i_fold) + '.h5')
correction_model_file = os.path.join(saved_models_dir,
correction_model_basename + '_model_fold_' + str(i_fold) + '.h5')
# name of file to save annotations
annotations_file = os.path.basename(ndpi_file)
annotations_file = os.path.splitext(annotations_file)[0]
annotations_file = os.path.join(annotations_dir, annotations_file + '_exp_0097.json')
# name of file to save areas and contours
results_file = os.path.basename(ndpi_file)
results_file = os.path.splitext(results_file)[0]
results_file = os.path.join(results_dir, results_file + '_exp_0097.npz')
# rough segmentation of the tissue in the image
lores_istissue0, im_downsampled = rough_foreground_mask(ndpi_file, downsample_factor=downsample_factor,
dilation_size=dilation_size,
component_size_threshold=component_size_threshold,
hole_size_treshold=hole_size_treshold,
return_im=True)
if DEBUG:
plt.clf()
plt.imshow(im_downsampled)
plt.axis('off')
plt.tight_layout()
plt.savefig(os.path.join(figures_dir, 'klf14_b6ntac_exp_0099_histology_i_file_' + str(i_file) + '.png'),
bbox_inches='tight')
plt.clf()
plt.imshow(im_downsampled)
plt.contour(lores_istissue0, colors='k')
plt.axis('off')
plt.tight_layout()
plt.savefig(os.path.join(figures_dir, 'klf14_b6ntac_exp_0099_rough_mask_i_file_' + str(i_file) + '.png'),
bbox_inches='tight')
# segmentation copy, to keep track of what's left to do
lores_istissue = lores_istissue0.copy()
# open full resolution histology slide
im = openslide.OpenSlide(ndpi_file)
# pixel size
assert(im.properties['tiff.ResolutionUnit'] == 'centimeter')
xres = 1e-2 / float(im.properties['tiff.XResolution'])
yres = 1e-2 / float(im.properties['tiff.YResolution'])
# # init empty list to store area values and contour coordinates
# areas_all = []
# contours_all = []
# keep extracting histology windows until we have finished
step = -1
time_0 = time_curr = time.time()
while np.count_nonzero(lores_istissue) > 0:
# next step (it starts from 0)
step += 1
time_prev = time_curr
time_curr = time.time()
print('File ' + str(i_file) + '/' + str(len(ndpi_files_test_list) - 1) + ': step ' +
str(step) + ': ' +
str(np.count_nonzero(lores_istissue)) + '/' + str(np.count_nonzero(lores_istissue0)) + ': ' +
"{0:.1f}".format(100.0 - np.count_nonzero(lores_istissue) / np.count_nonzero(lores_istissue0) * 100) +
'% completed: ' +
'step time ' + "{0:.2f}".format(time_curr - time_prev) + ' s' +
', total time ' + "{0:.2f}".format(time_curr - time_0) + ' s')
## Code extracted from:
## get_next_roi_to_process()
# variables for get_next_roi_to_process()
seg = lores_istissue.copy()
downsample_factor = downsample_factor
max_window_size = fullres_box_size
border = np.round((receptive_field - 1) / 2)
# convert to np.array so that we can use algebraic operators
max_window_size = np.array(max_window_size)
border = np.array(border)
# convert segmentation mask to [0, 1]
seg = (seg != 0).astype('int')
# approximate measures in the downsampled image (we don't round them)
lores_max_window_size = max_window_size / downsample_factor
lores_border = border / downsample_factor
# kernels that flipped correspond to top line and left line. They need to be pre-flipped
# because the convolution operation internally flips them (two flips cancel each other)
kernel_top = np.zeros(shape=np.round(lores_max_window_size - 2 * lores_border).astype('int'))
kernel_top[int((kernel_top.shape[0] - 1) / 2), :] = 1
kernel_left = np.zeros(shape=np.round(lores_max_window_size - 2 * lores_border).astype('int'))
kernel_left[:, int((kernel_top.shape[1] - 1) / 2)] = 1
if DEBUG:
plt.clf()
plt.imshow(kernel_top)
plt.axis('off')
plt.tight_layout()
plt.savefig(os.path.join(figures_dir, 'klf14_b6ntac_exp_0099_kernel_top_i_file_' + str(i_file) + '.png'),
bbox_inches='tight')
plt.imshow(kernel_left)
plt.axis('off')
plt.tight_layout()
plt.savefig(os.path.join(figures_dir, 'klf14_b6ntac_exp_0099_kernel_left_i_file_' + str(i_file) + '.png'),
bbox_inches='tight')
seg_top = np.round(fftconvolve(seg, kernel_top, mode='same'))
seg_left = np.round(fftconvolve(seg, kernel_left, mode='same'))
# window detections
detection_idx = np.nonzero(seg_left * seg_top)
# set top-left corner of the box = top-left corner of first box detected
lores_first_row = detection_idx[0][0]
lores_first_col = detection_idx[1][0]
# first, we look within a window with the maximum size
lores_last_row = detection_idx[0][0] + lores_max_window_size[0] - 2 * lores_border[0]
lores_last_col = detection_idx[1][0] + lores_max_window_size[1] - 2 * lores_border[1]
# second, if the segmentation is smaller than the window, we reduce the window size
window = seg[lores_first_row:int(np.round(lores_last_row)), lores_first_col:int(np.round(lores_last_col))]
idx = np.any(window, axis=1) # reduce rows size
last_segmented_pixel_len = np.max(np.where(idx))
lores_last_row = detection_idx[0][0] + np.min((lores_max_window_size[0] - 2 * lores_border[0],
last_segmented_pixel_len))
idx = np.any(window, axis=0) # reduce cols size
last_segmented_pixel_len = np.max(np.where(idx))
lores_last_col = detection_idx[1][0] + np.min((lores_max_window_size[1] - 2 * lores_border[1],
last_segmented_pixel_len))
# save coordinates for plot (this is only for a figure in the paper and doesn't need to be done in the real
# implementation)
lores_first_col_bak = lores_first_col
lores_first_row_bak = lores_first_row
lores_last_col_bak = lores_last_col
lores_last_row_bak = lores_last_row
# add a border around the window
lores_first_row = np.max([0, lores_first_row - lores_border[0]])
lores_first_col = np.max([0, lores_first_col - lores_border[1]])
lores_last_row = np.min([seg.shape[0], lores_last_row + lores_border[0]])
lores_last_col = np.min([seg.shape[1], lores_last_col + lores_border[1]])
# convert low resolution indices to high resolution
first_row = np.int(np.round(lores_first_row * downsample_factor))
last_row = np.int(np.round(lores_last_row * downsample_factor))
first_col = np.int(np.round(lores_first_col * downsample_factor))
last_col = np.int(np.round(lores_last_col * downsample_factor))
# round down indices in downsampled segmentation
lores_first_row = int(lores_first_row)
lores_last_row = int(lores_last_row)
lores_first_col = int(lores_first_col)
lores_last_col = int(lores_last_col)
# load window from full resolution slide
tile = im.read_region(location=(first_col, first_row), level=0,
size=(last_col - first_col, last_row - first_row))
tile = np.array(tile)
tile = tile[:, :, 0:3]
# interpolate coarse tissue segmentation to full resolution
istissue_tile = lores_istissue[lores_first_row:lores_last_row, lores_first_col:lores_last_col]
istissue_tile = cytometer.utils.resize(istissue_tile, size=(last_col - first_col, last_row - first_row),
resample=PIL.Image.NEAREST)
if DEBUG:
plt.clf()
plt.imshow(tile)
plt.imshow(istissue_tile, alpha=0.5)
plt.contour(istissue_tile, colors='k')
plt.title('Yellow: Tissue. Purple: Background')
plt.axis('off')
# clear keras session to prevent each segmentation iteration from getting slower. Note that this forces us to
# reload the models every time
K.clear_session()
# segment histology, split into individual objects, and apply segmentation correction
labels, labels_class, todo_edge, \
window_im, window_labels, window_labels_corrected, window_labels_class, index_list, scaling_factor_list \
= cytometer.utils.segmentation_pipeline6(tile,
dmap_model=dmap_model_file,
contour_model=contour_model_file,
correction_model=correction_model_file,
classifier_model=classifier_model_file,
min_cell_area=min_cell_area,
mask=istissue_tile,
min_mask_overlap=min_mask_overlap,
phagocytosis=phagocytosis,
min_class_prop=min_class_prop,
correction_window_len=correction_window_len,
correction_smoothing=correction_smoothing,
return_bbox=True, return_bbox_coordinates='xy',
batch_size=batch_size)
# downsample "to do" mask so that the rough tissue segmentation can be updated
lores_todo_edge = PIL.Image.fromarray(todo_edge.astype(np.uint8))
lores_todo_edge = lores_todo_edge.resize((lores_last_col - lores_first_col,
lores_last_row - lores_first_row),
resample=PIL.Image.NEAREST)
lores_todo_edge = np.array(lores_todo_edge)
# update coarse tissue mask (this is only necessary here to plot figures for the paper. In the actual code,
# the coarse mask gets directly updated, without this intermediate step)
seg_updated = seg.copy()
seg_updated[lores_first_row:lores_last_row, lores_first_col:lores_last_col] = lores_todo_edge
if DEBUG:
plt.clf()
fig = plt.imshow(seg, cmap='Greys')
plt.contour(seg_left * seg_top > 0, colors='r')
rect = Rectangle((lores_first_col, lores_first_row),
lores_last_col - lores_first_col, lores_last_row - lores_first_row,
alpha=0.5, facecolor='g', edgecolor='g', zorder=2)
fig.axes.add_patch(rect)
rect2 = Rectangle((lores_first_col_bak, lores_first_row_bak),
lores_last_col_bak - lores_first_col_bak, lores_last_row_bak - lores_first_row_bak,
alpha=1.0, facecolor=None, fill=False, edgecolor='g', lw=1, zorder=3)
fig.axes.add_patch(rect2)
plt.scatter(detection_idx[1][0], detection_idx[0][0], color='k', s=5, zorder=3)
plt.axis('off')
plt.tight_layout()
plt.savefig(os.path.join(figures_dir, 'klf14_b6ntac_exp_0099_fftconvolve_i_file_' + str(i_file) +
'_step_' + str(step) + '.png'),
bbox_inches='tight')
if DEBUG:
plt.clf()
fig = plt.imshow(seg, cmap='Greys')
plt.contour(seg_left * seg_top > 0, colors='r')
plt.contour(seg_updated, colors='w', zorder=4)
rect = Rectangle((lores_first_col, lores_first_row),
lores_last_col - lores_first_col, lores_last_row - lores_first_row,
alpha=0.5, facecolor='g', edgecolor='g', zorder=2)
fig.axes.add_patch(rect)
rect2 = Rectangle((lores_first_col_bak, lores_first_row_bak),
lores_last_col_bak - lores_first_col_bak, lores_last_row_bak - lores_first_row_bak,
alpha=1.0, facecolor=None, fill=False, edgecolor='g', lw=3, zorder=3)
fig.axes.add_patch(rect2)
plt.scatter(detection_idx[1][0], detection_idx[0][0], color='k', s=5, zorder=3)
plt.axis('off')
plt.tight_layout()
plt.xlim(int(lores_first_col - 50), int(lores_last_col + 50))
plt.ylim(int(lores_last_row + 50), int(lores_first_row - 50))
plt.savefig(os.path.join(figures_dir, 'klf14_b6ntac_exp_0099_fftconvolve_detail_i_file_' + str(i_file) +
'_step_' + str(step) + '.png'),
bbox_inches='tight')
# update coarse tissue mask for next iteration
lores_istissue[lores_first_row:lores_last_row, lores_first_col:lores_last_col] = lores_todo_edge
########################################################################################################################
## Show examples of what each deep CNN do (code cannibilised from the "inspect" scripts of the networks)
########################################################################################################################
import pickle
import warnings
# other imports
import numpy as np
import cv2
import matplotlib.pyplot as plt
os.environ['KERAS_BACKEND'] = 'tensorflow'
import keras
import keras.backend as K
import cytometer.data
import cytometer.utils
import cytometer.model_checkpoint_parallel
import tensorflow as tf
import skimage
from PIL import Image, ImageDraw
import math
LIMIT_GPU_MEMORY = False
# limit GPU memory used
if LIMIT_GPU_MEMORY:
from keras.backend.tensorflow_backend import set_session
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.9
set_session(tf.Session(config=config))
# specify data format as (n, row, col, channel)
K.set_image_data_format('channels_last')
DEBUG = False
'''Directories and filenames'''
# data paths
klf14_root_data_dir = os.path.join(home, 'Data/cytometer_data/klf14')
klf14_training_dir = os.path.join(klf14_root_data_dir, 'klf14_b6ntac_training')
klf14_training_non_overlap_data_dir = os.path.join(klf14_root_data_dir, 'klf14_b6ntac_training_non_overlap')
figures_dir = os.path.join(home, 'GoogleDrive/Research/20190727_cytometer_paper/figures')
saved_models_dir = os.path.join(klf14_root_data_dir, 'saved_models')
saved_kfolds_filename = 'klf14_b6ntac_exp_0079_generate_kfolds.pickle'
# model names
dmap_model_basename = 'klf14_b6ntac_exp_0086_cnn_dmap'
contour_model_basename = 'klf14_b6ntac_exp_0091_cnn_contour_after_dmap'
classifier_model_basename = 'klf14_b6ntac_exp_0095_cnn_tissue_classifier_fcn'
correction_model_basename = 'klf14_b6ntac_exp_0089_cnn_segmentation_correction_overlapping_scaled_contours'
'''Load folds'''
# load list of images, and indices for training vs. testing indices
contour_model_kfold_filename = os.path.join(saved_models_dir, saved_kfolds_filename)
with open(contour_model_kfold_filename, 'rb') as f:
aux = pickle.load(f)
svg_file_list = aux['file_list']
idx_test_all = aux['idx_test']
idx_train_all = aux['idx_train']
if DEBUG:
for i, file in enumerate(svg_file_list):
print(str(i) + ': ' + file)
# correct home directory
svg_file_list = [x.replace('/home/rcasero', home) for x in svg_file_list]
# KLF14-B6NTAC-36.1b PAT 97-16 C1 - 2016-02-10 17.38.06_row_017204_col_019444.tif (fold 5 for testing. No .svg)
# KLF14-B6NTAC-36.1b PAT 97-16 C1 - 2016-02-10 17.38.06_row_009644_col_061660.tif (fold 5 for testing. No .svg)
# KLF14-B6NTAC 36.1c PAT 98-16 C1 - 2016-02-11 10.45.00_row_019228_col_015060.svg (fold 7 for testing. With .svg)
# find which fold the testing image belongs to
np.where(['36.1c' in x for x in svg_file_list])
idx_test_all[7]
# TIFF files that correspond to the SVG files (without augmentation)
im_orig_file_list = []
for i, file in enumerate(svg_file_list):
im_orig_file_list.append(file.replace('.svg', '.tif'))
im_orig_file_list[i] = os.path.join(os.path.dirname(im_orig_file_list[i]) + '_augmented',
'im_seed_nan_' + os.path.basename(im_orig_file_list[i]))
# check that files exist
if not os.path.isfile(file):
# warnings.warn('i = ' + str(i) + ': File does not exist: ' + os.path.basename(file))
warnings.warn('i = ' + str(i) + ': File does not exist: ' + file)
if not os.path.isfile(im_orig_file_list[i]):
# warnings.warn('i = ' + str(i) + ': File does not exist: ' + os.path.basename(im_orig_file_list[i]))
warnings.warn('i = ' + str(i) + ': File does not exist: ' + im_orig_file_list[i])
'''Inspect model results'''
# for i_fold, idx_test in enumerate(idx_test_all):
i_fold = 7; idx_test = idx_test_all[i_fold]
print('Fold ' + str(i_fold) + '/' + str(len(idx_test_all)-1))
'''Load data'''
# split the data list into training and testing lists
im_test_file_list, im_train_file_list = cytometer.data.split_list(im_orig_file_list, idx_test)
# load the test data (im, dmap, mask)
test_dataset, test_file_list, test_shuffle_idx = \
cytometer.data.load_datasets(im_test_file_list, prefix_from='im', prefix_to=['im', 'dmap', 'mask', 'contour'],
nblocks=1, shuffle_seed=None)
# fill in the little gaps in the mask
kernel = np.ones((3, 3), np.uint8)
for i in range(test_dataset['mask'].shape[0]):
test_dataset['mask'][i, :, :, 0] = cv2.dilate(test_dataset['mask'][i, :, :, 0].astype(np.uint8),
kernel=kernel, iterations=1)
# load dmap model, and adjust input size
saved_model_filename = os.path.join(saved_models_dir, dmap_model_basename + '_model_fold_' + str(i_fold) + '.h5')
dmap_model = keras.models.load_model(saved_model_filename)
if dmap_model.input_shape[1:3] != test_dataset['im'].shape[1:3]:
dmap_model = cytometer.utils.change_input_size(dmap_model, batch_shape=test_dataset['im'].shape)
# estimate dmaps
pred_dmap = dmap_model.predict(test_dataset['im'], batch_size=4)
if DEBUG:
for i in range(test_dataset['im'].shape[0]):
plt.clf()
plt.subplot(221)
plt.imshow(test_dataset['im'][i, :, :, :])
plt.axis('off')
plt.subplot(222)
plt.imshow(test_dataset['dmap'][i, :, :, 0])
plt.axis('off')
plt.subplot(223)
plt.imshow(test_dataset['mask'][i, :, :, 0])
plt.axis('off')
plt.subplot(224)
plt.imshow(pred_dmap[i, :, :, 0])
plt.axis('off')
# KLF14-B6NTAC 36.1c PAT 98-16 C1 - 2016-02-11 10.45.00_row_019228_col_015060.svg
i = 2
if DEBUG:
plt.clf()
plt.imshow(test_dataset['im'][i, :, :, :])
plt.axis('off')
plt.tight_layout()
plt.savefig(os.path.join(figures_dir, os.path.basename(im_test_file_list[i]).replace('.tif', '.png')),
bbox_inches='tight')
plt.clf()
plt.imshow(test_dataset['dmap'][i, :, :, 0])
plt.axis('off')
plt.tight_layout()
plt.savefig(os.path.join(figures_dir, 'dmap_' + os.path.basename(im_test_file_list[i]).replace('.tif', '.png')),
bbox_inches='tight')
plt.clf()
plt.imshow(pred_dmap[i, :, :, 0])
plt.axis('off')
plt.tight_layout()
plt.savefig(os.path.join(figures_dir, 'pred_dmap_' + os.path.basename(im_test_file_list[i]).replace('.tif', '.png')),
bbox_inches='tight')
# load dmap to contour model, and adjust input size
saved_model_filename = os.path.join(saved_models_dir, contour_model_basename + '_model_fold_' + str(i_fold) + '.h5')
contour_model = keras.models.load_model(saved_model_filename)
if contour_model.input_shape[1:3] != pred_dmap.shape[1:3]:
contour_model = cytometer.utils.change_input_size(contour_model, batch_shape=pred_dmap.shape)
# estimate contours
pred_contour = contour_model.predict(pred_dmap, batch_size=4)
if DEBUG:
plt.clf()
plt.imshow(test_dataset['contour'][i, :, :, 0])
plt.axis('off')
plt.tight_layout()
plt.savefig(os.path.join(figures_dir, 'contour_' + os.path.basename(im_test_file_list[i]).replace('.tif', '.png')),
bbox_inches='tight')
plt.clf()
plt.imshow(pred_contour[i, :, :, 0])
plt.axis('off')
plt.tight_layout()
plt.savefig(os.path.join(figures_dir, 'pred_contour_' + os.path.basename(im_test_file_list[i]).replace('.tif', '.png')),
bbox_inches='tight')
# load classifier model, and adjust input size
saved_model_filename = os.path.join(saved_models_dir, classifier_model_basename + '_model_fold_' + str(i_fold) + '.h5')
classifier_model = keras.models.load_model(saved_model_filename)
if classifier_model.input_shape[1:3] != test_dataset['im'].shape[1:3]:
classifier_model = cytometer.utils.change_input_size(classifier_model, batch_shape=test_dataset['im'].shape)
# estimate pixel-classification
pred_class = classifier_model.predict(test_dataset['im'], batch_size=4)
if DEBUG:
plt.clf()
plt.imshow(pred_class[i, :, :, 0])
plt.contour(pred_class[i, :, :, 0] > 0.5, colors='r', linewidhts=3)
plt.axis('off')
plt.tight_layout()
plt.savefig(os.path.join(figures_dir, 'pred_class_' + os.path.basename(im_test_file_list[i]).replace('.tif', '.png')),
bbox_inches='tight')
plt.clf()
plt.imshow(pred_class[i, :, :, 0] > 0.5)
plt.axis('off')
plt.tight_layout()
plt.savefig(os.path.join(figures_dir, 'pred_class_thresh_' + os.path.basename(im_test_file_list[i]).replace('.tif', '.png')),
bbox_inches='tight')
## plot of classifier ground truth
# print('file ' + str(i) + '/' + str(len(file_svg_list) - 1))
# init output
im_array_all = []
out_class_all = []
out_mask_all = []
contour_type_all = []
file_tif = os.path.join(klf14_training_dir, os.path.basename(im_test_file_list[i]))
file_tif = file_tif.replace('im_seed_nan_', '')
# change file extension from .svg to .tif
file_svg = file_tif.replace('.tif', '.svg')
# open histology training image
im = Image.open(file_tif)
# make array copy
im_array = np.array(im)
# read the ground truth cell contours in the SVG file. This produces a list [contour_0, ..., contour_N-1]
# where each contour_i = [(X_0, Y_0), ..., (X_P-1, X_P-1)]
cell_contours = cytometer.data.read_paths_from_svg_file(file_svg, tag='Cell', add_offset_from_filename=False,
minimum_npoints=3)
other_contours = cytometer.data.read_paths_from_svg_file(file_svg, tag='Other', add_offset_from_filename=False,
minimum_npoints=3)
brown_contours = cytometer.data.read_paths_from_svg_file(file_svg, tag='Brown', add_offset_from_filename=False,
minimum_npoints=3)
background_contours = cytometer.data.read_paths_from_svg_file(file_svg, tag='Background',
add_offset_from_filename=False,
minimum_npoints=3)
contours = cell_contours + other_contours + brown_contours + background_contours
# make a list with the type of cell each contour is classified as
contour_type = [np.zeros(shape=(len(cell_contours),), dtype=np.uint8), # 0: white-adipocyte
np.ones(shape=(len(other_contours),), dtype=np.uint8), # 1: other types of tissue
np.ones(shape=(len(brown_contours),), dtype=np.uint8), # 1: brown cells (treated as "other" tissue)
np.zeros(shape=(len(background_contours),), dtype=np.uint8)] # 0: background
contour_type = np.concatenate(contour_type)
contour_type_all.append(contour_type)
print('Cells: ' + str(len(cell_contours)))
print('Other: ' + str(len(other_contours)))
print('Brown: ' + str(len(brown_contours)))
print('Background: ' + str(len(background_contours)))
# initialise arrays for training
out_class = np.zeros(shape=im_array.shape[0:2], dtype=np.uint8)
out_mask = np.zeros(shape=im_array.shape[0:2], dtype=np.uint8)
# loop ground truth cell contours
for j, contour in enumerate(contours):
plt.plot([p[0] for p in contour], [p[1] for p in contour])
plt.text(contour[0][0], contour[0][1], str(j))
if DEBUG:
plt.clf()
plt.subplot(121)
plt.imshow(im_array)
plt.plot([p[0] for p in contour], [p[1] for p in contour])
xy_c = (np.mean([p[0] for p in contour]), np.mean([p[1] for p in contour]))
plt.scatter(xy_c[0], xy_c[1])
# rasterise current ground truth segmentation
cell_seg_gtruth = Image.new("1", im_array.shape[0:2][::-1], "black") # I = 32-bit signed integer pixels
draw = ImageDraw.Draw(cell_seg_gtruth)
draw.polygon(contour, outline="white", fill="white")
cell_seg_gtruth = np.array(cell_seg_gtruth, dtype=np.bool)
# we are going to save the ground truth segmentation of the cell that we are going to later use in the figures
if j == 106:
cell_seg_gtruth_106 = cell_seg_gtruth.copy()
if DEBUG:
plt.subplot(122)
plt.cla()
plt.imshow(im_array)
plt.contour(cell_seg_gtruth.astype(np.uint8))
# add current object to training output and mask
out_mask[cell_seg_gtruth] = 1
out_class[cell_seg_gtruth] = contour_type[j]
if DEBUG:
plt.clf()
aux = (1- out_class).astype(np.float32)
aux = np.ma.masked_where(out_mask < 0.5, aux)
plt.imshow(aux)
plt.axis('off')
plt.tight_layout()
plt.savefig(os.path.join(figures_dir, 'class_' + os.path.basename(im_test_file_list[i]).replace('.tif', '.png')),
bbox_inches='tight')
## Segmentation correction CNN
# segmentation parameters
min_cell_area = 1500
max_cell_area = 100e3
min_mask_overlap = 0.8
phagocytosis = True
min_class_prop = 0.5
correction_window_len = 401
correction_smoothing = 11
batch_size = 2
# segment histology
labels, labels_class, _ \
= cytometer.utils.segment_dmap_contour_v6(im_array,
contour_model=contour_model, dmap_model=dmap_model,
classifier_model=classifier_model,
border_dilation=0)
labels = labels[0, :, :]
labels_class = labels_class[0, :, :, 0]
if DEBUG:
plt.clf()
plt.imshow(labels)
if DEBUG:
plt.clf()
plt.imshow(labels)
plt.clf()
aux = skimage.segmentation.find_boundaries(labels, mode='thick')
kernel = np.ones((3, 3), np.uint8)
aux = cv2.dilate(aux.astype(np.uint8), kernel=kernel, iterations=1)
plt.imshow(aux)
plt.axis('off')
plt.tight_layout()
plt.savefig(os.path.join(figures_dir, 'watershed_' + os.path.basename(im_test_file_list[i]).replace('.tif', '.png')),
bbox_inches='tight')
# remove labels that touch the edges, that are too small or too large, don't overlap enough with the tissue mask,
# are fully surrounded by another label or are not white adipose tissue
labels, todo_edge = cytometer.utils.clean_segmentation(
labels, min_cell_area=min_cell_area, max_cell_area=max_cell_area,
remove_edge_labels=True, mask=None, min_mask_overlap=min_mask_overlap,
phagocytosis=phagocytosis,
labels_class=labels_class, min_class_prop=min_class_prop)
if DEBUG:
plt.clf()
plt.imshow(im_array)
plt.contour(labels, levels=np.unique(labels), colors='k')
plt.contourf(labels == 0)
plt.clf()
aux = skimage.segmentation.find_boundaries(labels, mode='thick')
kernel = np.ones((3, 3), np.uint8)
aux = cv2.dilate(aux.astype(np.uint8), kernel=kernel, iterations=1)
plt.imshow(aux)
plt.axis('off')
plt.tight_layout()
plt.savefig(os.path.join(figures_dir, 'cleaned_' + os.path.basename(im_test_file_list[i]).replace('.tif', '.png')),
bbox_inches='tight')
# split image into individual labels
im_array = np.expand_dims(im_array, axis=0)
labels = np.expand_dims(labels, axis=0)
labels_class = np.expand_dims(labels_class, axis=0)
cell_seg_gtruth_106 = np.expand_dims(cell_seg_gtruth_106, axis=0)
window_mask = None
(window_labels, window_im, window_labels_class, window_cell_seg_gtruth_106), index_list, scaling_factor_list \
= cytometer.utils.one_image_per_label_v2((labels, im_array, labels_class, cell_seg_gtruth_106.astype(np.uint8)),
resize_to=(correction_window_len, correction_window_len),
resample=(Image.NEAREST, Image.LINEAR, Image.NEAREST, Image.NEAREST),
only_central_label=True, return_bbox=True)
# load correction model
saved_model_filename = os.path.join(saved_models_dir, correction_model_basename + '_model_fold_' + str(i_fold) + '.h5')
correction_model = keras.models.load_model(saved_model_filename)
if correction_model.input_shape[1:3] != window_im.shape[1:3]:
correction_model = cytometer.utils.change_input_size(correction_model, batch_shape=window_im.shape)
# multiply image by mask
window_im_masked = cytometer.utils.quality_model_mask(
np.expand_dims(window_labels, axis=-1), im=window_im, quality_model_type='-1_1')
# process (histology * mask) to estimate which pixels are underestimated and which overestimated in the segmentation
window_im_masked = correction_model.predict(window_im_masked, batch_size=batch_size)
# compute the correction to be applied to the segmentation
correction = (window_im[:, :, :, 0].copy() * 0).astype(np.float32)
correction[window_im_masked[:, :, :, 0] >= 0.5] = 1 # the segmentation went too far
correction[window_im_masked[:, :, :, 0] <= -0.5] = -1 # the segmentation fell short
if DEBUG:
j = 0
plt.clf()
plt.imshow(correction[j, :, :])
# plt.contour(window_labels[j, ...], colors='r', linewidths=1)
plt.axis('off')
plt.tight_layout()
plt.savefig(os.path.join(figures_dir, 'pred_correction_' + os.path.basename(im_test_file_list[i]).replace('.tif', '.png')),
bbox_inches='tight')
plt.clf()
plt.imshow(correction[j, :, :])
plt.contour(window_labels[j, ...], colors='r', linewidths=1)
plt.contour(window_cell_seg_gtruth_106[j, ...], colors='w', linewidths=1)
plt.axis('off')
plt.tight_layout()
plt.savefig(
os.path.join(figures_dir, 'pred_correction_gtruth_' + os.path.basename(im_test_file_list[i]).replace('.tif', '.png')),
bbox_inches='tight')
# correct segmentation (full operation)
window_im = window_im.astype(np.float32)
window_im /= 255.0
window_labels_corrected = cytometer.utils.correct_segmentation(
im=window_im, seg=window_labels,
correction_model=correction_model, model_type='-1_1',
smoothing=correction_smoothing,
batch_size=batch_size)
if DEBUG:
# plot input to and output from Correction CNN examples
for j in [13, 15, 18]:
plt.clf()
plt.imshow(window_im[j, :, :, :])
plt.contour(window_labels[j, ...], colors='g', linewidths=4)
plt.axis('off')
plt.tight_layout()
plt.savefig(
os.path.join(figures_dir, 'correction_input_j_' + str(j) + '_'
+ os.path.basename(im_test_file_list[i]).replace('.tif', '.png')),
bbox_inches='tight')
plt.savefig(
os.path.join(figures_dir, 'correction_input_j_' + str(j) + '_'
+ os.path.basename(im_test_file_list[i]).replace('.tif', '.svg')),
bbox_inches='tight')
plt.clf()
plt.imshow(window_im[j, :, :, :])
plt.contour(window_labels_corrected[j, ...], colors='r', linewidths=4)
plt.axis('off')
plt.tight_layout()
plt.savefig(
os.path.join(figures_dir, 'correction_output_j_' + str(j) + '_'
+ os.path.basename(im_test_file_list[i]).replace('.tif', '.png')),
bbox_inches='tight')
plt.savefig(
os.path.join(figures_dir, 'correction_output_j_' + str(j) + '_'
+ os.path.basename(im_test_file_list[i]).replace('.tif', '.svg')),
bbox_inches='tight')
# convert overlap labels in cropped images to contours (points), and add cropping window offset so that the
# contours are in the tile-window coordinates
offset_xy = np.array(index_list)[:, [2, 3]] # index_list: [i, lab, x0, y0, xend, yend]
contours = cytometer.utils.labels2contours(window_labels, offset_xy=offset_xy,
scaling_factor_xy=scaling_factor_list)
contours_corrected = cytometer.utils.labels2contours(window_labels_corrected, offset_xy=offset_xy,
scaling_factor_xy=scaling_factor_list)
# crop contours that overflow the edges
for j in range(len(contours_corrected)):
contours_corrected[j] = np.clip(contours_corrected[j], a_min=0, a_max=1000)
if DEBUG:
# plot corrected overlapping contours all together
ax = plt.clf()
plt.imshow(labels[0, :, :] * 0)
for j in range(len(contours_corrected)):
plt.fill(contours_corrected[j][:, 1], contours_corrected[j][:, 0],
edgecolor=(0.993248, 0.906157, 0.143936, 1.0), fill=False, lw=1.5)
plt.axis('off')
plt.tight_layout()
plt.savefig(
os.path.join(figures_dir,
'corrected_contours_' + os.path.basename(im_test_file_list[i]).replace('.tif', '.png')),
bbox_inches='tight')
plt.savefig(
os.path.join(figures_dir,
'corrected_contours_' + os.path.basename(im_test_file_list[i]).replace('.tif', '.svg')),
bbox_inches='tight')
if DEBUG:
j = 0
plt.clf()
plt.imshow(window_im[j, ...])
plt.contour(window_labels[j, ...], colors='r', linewidths=3)
plt.text(185, 210, '+1', fontsize=30)
plt.text(116, 320, '-1', fontsize=30)
plt.axis('off')
plt.tight_layout()
plt.savefig(os.path.join(figures_dir, 'im_for_correction_' + os.path.basename(im_test_file_list[i]).replace('.tif', '.png')),
bbox_inches='tight')
plt.clf()
plt.imshow(window_im[j, ...])
plt.contour(window_labels_corrected[j, ...], colors='g', linewidths=3)
plt.text(185, 210, '+1', fontsize=30)
plt.text(116, 320, '-1', fontsize=30)
plt.axis('off')
plt.tight_layout()
plt.savefig(os.path.join(figures_dir, 'corrected_seg_' + os.path.basename(im_test_file_list[i]).replace('.tif', '.png')),
bbox_inches='tight')
aux = np.array(contours[j])
plt.plot(aux[:, 0], aux[:, 1])
########################################################################################################################
## Check whether manual correction of pipeline results makes a difference
# For this experiment, we corrected by hand on AIDA the automatic segmentations produced by the pipeline,
# and compared the segmentation error.
########################################################################################################################
import matplotlib.pyplot as plt
import cytometer.data
from shapely.geometry import Polygon
import openslide
import numpy as np
import scipy.stats
import pandas as pd
from mlxtend.evaluate import permutation_test
from statsmodels.stats.multitest import multipletests
import math
# directories
klf14_root_data_dir = os.path.join(home, 'Data/cytometer_data/klf14')
annotations_dir = os.path.join(home, 'Data/cytometer_data/aida_data_Klf14_v7/annotations')
ndpi_dir = os.path.join(home, 'scan_srv2_cox/Maz Yon')
figures_dir = os.path.join(home, 'GoogleDrive/Research/20190727_cytometer_paper/figures')
metainfo_dir = os.path.join(home, 'Data/cytometer_data/klf14')
DEBUG = False
depot = 'sqwat'
# depot = 'gwat'
permutation_sample_size = 9 # the factorial of this number is the number of repetitions in the permutation tests
# list of annotation files for this depot
json_annotation_files = json_annotation_files_dict[depot]
# modify filenames to select the particular segmentation we want (e.g. the automatic ones, or the manually refined ones)
json_pipeline_annotation_files = [x.replace('.json', '_exp_0097_corrected_monolayer_left.json') for x in json_annotation_files]
json_pipeline_annotation_files = [os.path.join(annotations_dir, x) for x in json_pipeline_annotation_files]
# modify filenames to select the particular segmentation we want (e.g. the automatic ones, or the manually refined ones)
json_refined_annotation_files = [x.replace('.json', '_exp_0097_refined_left.json') for x in json_annotation_files]
json_refined_annotation_files = [os.path.join(annotations_dir, x) for x in json_refined_annotation_files]
# CSV file with metainformation of all mice
metainfo_csv_file = os.path.join(metainfo_dir, 'klf14_b6ntac_meta_info.csv')
metainfo = pd.read_csv(metainfo_csv_file)
# make sure that in the boxplots PAT comes before MAT
metainfo['sex'] = metainfo['sex'].astype(pd.api.types.CategoricalDtype(categories=['f', 'm'], ordered=True))
metainfo['ko_parent'] = metainfo['ko_parent'].astype(pd.api.types.CategoricalDtype(categories=['PAT', 'MAT'], ordered=True))
metainfo['genotype'] = metainfo['genotype'].astype(pd.api.types.CategoricalDtype(categories=['KLF14-KO:WT', 'KLF14-KO:Het'], ordered=True))
quantiles = np.linspace(0, 1, 11)
quantiles = quantiles[1:-1]
pipeline_area_q_all = []
refined_area_q_all = []
pipeline_area_mean_all = []
refined_area_mean_all = []
id_all = []
for i_file, (json_pipeline_annotation_file, json_refined_annotation_file) in enumerate(zip(json_pipeline_annotation_files, json_refined_annotation_files)):
# create dataframe for this image
df_common = cytometer.data.tag_values_with_mouse_info(metainfo=metainfo, s=os.path.basename(json_pipeline_annotation_file),
values=[i_file, ], values_tag='i_file',
tags_to_keep=['id', 'ko_parent', 'genotype', 'sex',
'BW', 'gWAT', 'SC'])
# mouse ID as a string
id = df_common['id'].values[0]
# we have only refined some of the segmentations for testing
if not id in ['16.2a', '16.2b', '16.2c', '16.2d', '16.2e', '16.2f', '17.1a', '17.1b', '17.1c', '17.1d', '17.1e',
'17.1f', '17.2a', '17.2b', '17.2c', '17.2d', '17.2f', '17.2g', '18.1a', '18.1b', '18.1c', '18.1d']:
continue
print('File ' + str(i_file) + '/' + str(len(json_annotation_files)-1) + ': ')
if os.path.isfile(json_pipeline_annotation_file):
print('\t' + os.path.basename(json_pipeline_annotation_file))
else:
print('\t' + os.path.basename(json_pipeline_annotation_file) + ' ... missing')
if os.path.isfile(json_refined_annotation_file):
print('\t' + os.path.basename(json_refined_annotation_file))
else:
print('\t' + os.path.basename(json_refined_annotation_file) + ' ... missing')
# ndpi file that corresponds to this .json file
ndpi_file = json_pipeline_annotation_file.replace('_exp_0097_corrected_monolayer_left.json', '.ndpi')
ndpi_file = ndpi_file.replace(annotations_dir, ndpi_dir)
# open full resolution histology slide
im = openslide.OpenSlide(ndpi_file)
# pixel size
assert (im.properties['tiff.ResolutionUnit'] == 'centimeter')
xres = 1e-2 / float(im.properties['tiff.XResolution']) # m
yres = 1e-2 / float(im.properties['tiff.YResolution']) # m
# ko = df_common['ko_parent'].values[0]
# genotype = df_common['genotype'].values[0]
# sex = df_common['sex'].values[0]
# bw = df_common['BW'].values[0]
# gwat = df_common['gWAT'].values[0]
# sc = df_common['SC'].values[0]
# read contours from AIDA annotations
pipeline_contours = cytometer.data.aida_get_contours(json_pipeline_annotation_file, layer_name='White adipocyte.*')
refined_contours = cytometer.data.aida_get_contours(json_refined_annotation_file, layer_name='White adipocyte.*')
# compute area of each contour
pipeline_areas = [Polygon(c).area * xres * yres for c in pipeline_contours] # (um^2)
refined_areas = [Polygon(c).area * xres * yres for c in refined_contours] # (um^2)
# compute HD quantiles
pipeline_area_q = scipy.stats.mstats.hdquantiles(pipeline_areas, prob=quantiles, axis=0)
refined_area_q = scipy.stats.mstats.hdquantiles(refined_areas, prob=quantiles, axis=0)
# compute average cell size
pipeline_area_mean = np.mean(pipeline_areas)
refined_area_mean = np.mean(refined_areas)
pipeline_area_q_all.append(pipeline_area_q)
refined_area_q_all.append(refined_area_q)
pipeline_area_mean_all.append(pipeline_area_mean)
refined_area_mean_all.append(refined_area_mean)
id_all.append(id)
print('Removed cells: %.2f' % (1 - len(refined_areas) / len(pipeline_areas)))
print((np.array(pipeline_area_q) - np.array(refined_area_q)) * 1e12)
if DEBUG:
plt.clf()
plt.plot(quantiles, pipeline_area_q * 1e12, label='Pipeline', linewidth=3)
plt.plot(quantiles, refined_area_q * 1e12, label='Refined', linewidth=3)
plt.tick_params(axis='both', which='major', labelsize=14)
plt.xlabel('Quantiles', fontsize=14)
plt.ylabel('Area ($\mu m^2$)', fontsize=14)
plt.legend(fontsize=12)
# convert the list of vectors into a matrix
pipeline_area_q_all = np.vstack(pipeline_area_q_all)
refined_area_q_all = np.vstack(refined_area_q_all)
refined_area_mean_all = np.array(refined_area_mean_all)
pipeline_area_mean_all = np.array(pipeline_area_mean_all)
if DEBUG:
plt.clf()
pipeline_area_q_mean = np.mean(pipeline_area_q_all, axis=0)
for i in range(pipeline_area_q_all.shape[0]):
plt.plot(quantiles, 100 * (refined_area_q_all[i, :] - pipeline_area_q_all[i, :]) / pipeline_area_q_mean, label=id_all[i], linewidth=3)
plt.tick_params(axis='both', which='major', labelsize=14)
plt.xlabel('Quantiles', fontsize=14)
plt.ylabel('Area change with refinement (%)', fontsize=14)
plt.legend(fontsize=12)
plt.tight_layout()
if DEBUG:
plt.clf()
plt.boxplot(100 * (refined_area_mean_all - pipeline_area_mean_all) / pipeline_area_mean_all, labels=['Mean size'])
plt.tick_params(axis='both', which='major', labelsize=14)
plt.ylabel('Area change with refinement (%)', fontsize=14)
plt.tight_layout()
########################################################################################################################
## Plots of segmented full slides with quantile colourmaps
########################################################################################################################
# This is done in klf14_b6ntac_exp_0098_full_slide_size_analysis_v7
########################################################################################################################
## Analysis of time and blocks that took to compute full slide segmentation from the server logs
########################################################################################################################
# The results were noted down in klf14_b6ntac_exp_0097_full_slide_pipeline_v7_logs.csv. This file is in the GoogleDrive
# directory with the rest of the paper.
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import openslide
import statsmodels.api as sm
import scipy.stats
DEBUG = False
'''Directories and filenames'''
# data paths
klf14_root_data_dir = os.path.join(home, 'Data/cytometer_data/klf14')
saved_models_dir = os.path.join(klf14_root_data_dir, 'saved_models')
ndpi_dir = os.path.join(home, 'scan_srv2_cox/Maz Yon')
annotations_dir = os.path.join(home, 'Data/cytometer_data/aida_data_Klf14_v7/annotations')
times_dir = os.path.join(home, 'GoogleDrive/Research/20190727_cytometer_paper')
# load filenames, number of blocks and time it took to segment them
times_file = 'klf14_b6ntac_exp_0097_full_slide_pipeline_v7_logs.csv'
times_file = os.path.join(times_dir, times_file)
times_df = pd.read_csv(times_file)
# read rough masks of the files in the dataframe, to measure the tissue area in each
for i, file in enumerate(times_df['File']):
# filename of the coarse tissue mask
coarse_mask_file = os.path.join(annotations_dir, file + '_rough_mask.npz')
# load coarse tissue mask
with np.load(coarse_mask_file) as data:
mask = data['lores_istissue0']
if DEBUG:
plt.clf()
plt.imshow(mask)
# open full resolution histology slide to get pixel size
ndpi_file = os.path.join(ndpi_dir, file + '.ndpi')
im = openslide.OpenSlide(ndpi_file)
# pixel size
assert (im.properties['tiff.ResolutionUnit'] == 'centimeter')
xres = 1e-2 / float(im.properties['tiff.XResolution'])
yres = 1e-2 / float(im.properties['tiff.YResolution'])
# scaling factor to get pixel size in the coarse mask
k = np.array(im.dimensions) / mask.shape[::-1]
# add tissue area to the dataframe. Calculations for full resolution slide, even though they
# are computed from the coarse mask
times_df.loc[i, 'tissue_area_pix'] = np.count_nonzero(mask) * k[0] * k[1]
times_df.loc[i, 'tissue_area_mm2'] = times_df.loc[i, 'tissue_area_pix'] * xres * yres * 1e6
if DEBUG:
# plot tissue area vs. time to compute
plt.clf()
plt.scatter(times_df['tissue_area_mm2'], times_df['Blocks Time (s)'])
# fit linear model
model = sm.formula.ols('Q("Blocks Time (s)") ~ tissue_area_mm2', data=times_df).fit()
print(model.summary())
# Pearson coefficient
rho, rho_p = scipy.stats.pearsonr(times_df['tissue_area_mm2'], times_df['Blocks Time (s)'])
print('Pearson coeff = ' + str(rho))
print('p-val = ' + str(rho_p))
# tissue area
print('Tissue area')
print('min = ' + str(np.min(times_df['tissue_area_mm2'])) + ' mm2 = ' + str(np.min(times_df['tissue_area_pix']) * 1e-6) + ' Mpix')
print('max = ' + str(np.max(times_df['tissue_area_mm2'])) + ' mm2 = ' + str(np.max(times_df['tissue_area_pix']) * 1e-6) + ' Mpix')
# corresponding time to compute
print('Time to compute')
time_pred = model.predict(times_df['tissue_area_mm2'])
print('min = ' + str(np.min(time_pred) / 3600) + ' h')
print('max = ' + str(np.max(time_pred) / 3600) + ' h')
tissue_area_mm2_q1 = scipy.stats.mstats.hdquantiles(times_df['tissue_area_mm2'], prob=0.25, axis=0)
tissue_area_mm2_q2 = scipy.stats.mstats.hdquantiles(times_df['tissue_area_mm2'], prob=0.5, axis=0)
tissue_area_mm2_q3 = scipy.stats.mstats.hdquantiles(times_df['tissue_area_mm2'], prob=0.75, axis=0)
aux_df = times_df.loc[0:2, :].copy()
aux_df.loc[0, 'tissue_area_mm2'] = tissue_area_mm2_q1
aux_df.loc[1, 'tissue_area_mm2'] = tissue_area_mm2_q2
aux_df.loc[2, 'tissue_area_mm2'] = tissue_area_mm2_q3
tissue_time_pred_q = model.predict(aux_df)
print('q1 = ' + str(tissue_area_mm2_q1) + ' mm2 -> '
+ str(tissue_time_pred_q[0] / 3600) + ' h')
print('q2 = ' + str(tissue_area_mm2_q2) + ' mm2 -> '
+ str(tissue_time_pred_q[1] / 3600) + ' h')
print('q3 = ' + str(tissue_area_mm2_q3) + ' mm2 -> '
+ str(tissue_time_pred_q[2] / 3600) + ' h')
########################################################################################################################
## Segmentation validation
########################################################################################################################
# This is done in klf14_b6ntac_exp_0096_pipeline_v7_validation.py
########################################################################################################################
## Time that it takes to do Auto vs. Corrected segmentation
########################################################################################################################
import numpy as np
import time
import pandas as pd
import pickle
import matplotlib.pyplot as plt
import cytometer.data
import cytometer.utils
from PIL import Image, ImageDraw, ImageEnhance
DEBUG = False
# data paths
klf14_root_data_dir = os.path.join(home, 'Data/cytometer_data/klf14')
saved_models_dir = os.path.join(klf14_root_data_dir, 'saved_models')
metainfo_dir = os.path.join(home, 'Data/cytometer_data/klf14')
times_dir = os.path.join(home, 'GoogleDrive/Research/20190727_cytometer_paper')
saved_kfolds_filename = 'klf14_b6ntac_exp_0079_generate_kfolds.pickle'
saved_extra_kfolds_filename = 'klf14_b6ntac_exp_0094_generate_extra_training_images.pickle'
# model names
dmap_model_basename = 'klf14_b6ntac_exp_0086_cnn_dmap'
contour_model_basename = 'klf14_b6ntac_exp_0091_cnn_contour_after_dmap'
classifier_model_basename = 'klf14_b6ntac_exp_0095_cnn_tissue_classifier_fcn'
correction_model_basename = 'klf14_b6ntac_exp_0089_cnn_segmentation_correction_overlapping_scaled_contours'
# training window length
training_window_len = 401
# segmentation parameters
min_cell_area = 1500
max_cell_area = 100e3
min_mask_overlap = 0.8
phagocytosis = True
# min_class_prop = 0.5
# correction_window_len = 401
# correction_smoothing = 11
batch_size = 2
'''Load folds'''
# load list of images, and indices for training vs. testing indices
saved_kfolds_filename = os.path.join(saved_models_dir, saved_kfolds_filename)
with open(saved_kfolds_filename, 'rb') as f:
aux = pickle.load(f)
file_svg_list = aux['file_list']
idx_test_all = aux['idx_test']
idx_train_all = aux['idx_train']
# correct home directory
file_svg_list = [x.replace('/users/rittscher/rcasero', home) for x in file_svg_list]
file_svg_list = [x.replace('/home/rcasero', home) for x in file_svg_list]
# number of images
n_im = len(file_svg_list)
# number of folds
n_folds = len(idx_test_all)
# CSV file with metainformation of all mice
metainfo_csv_file = os.path.join(metainfo_dir, 'klf14_b6ntac_meta_info.csv')
metainfo = pd.read_csv(metainfo_csv_file)
# # correct home directory in file paths
# file_svg_list = cytometer.data.change_home_directory(list(file_svg_list), '/home/rcasero', home, check_isfile=True)
# file_svg_list = cytometer.data.change_home_directory(list(file_svg_list), '/users/rittscher/rcasero', home, check_isfile=True)
## compute and save results (you can skip this section if this has been done before, and go straight where you load the
## results)
# load data computed in 0096 validation script
data_filename = os.path.join(saved_models_dir, 'klf14_b6ntac_exp_0096_pipeline_v7_validation' + '_data.npz')
with np.load(data_filename) as data:
im_array_all = data['im_array_all']
rough_mask_all = data['rough_mask_all']
out_class_all = 1 - data['out_class_all'] # encode as 0: other, 1: WAT
out_mask_all = data['out_mask_all']
# init dataframes
df_manual_all = pd.DataFrame()
df_auto_all = pd.DataFrame()
# init time vectors
time_auto = []
time_corrected = []
for i_fold in range(len(idx_test_all)):
# start timer
t0 = time.time()
''' Get the images/masks/classification that were not used for training of this particular fold '''
print('# Fold ' + str(i_fold) + '/' + str(len(idx_test_all) - 1))
# test and training image indices. These indices refer to file_list
idx_test = idx_test_all[i_fold]
# list of test files (used later for the dataframe)
file_list_test = np.array(file_svg_list)[idx_test]
print('## len(idx_test) = ' + str(len(idx_test)))
# split data into training and testing
im_array_test = im_array_all[idx_test, :, :, :]
rough_mask_test = rough_mask_all[idx_test, :, :]
out_class_test = out_class_all[idx_test, :, :, :]
out_mask_test = out_mask_all[idx_test, :, :]
''' Segmentation into non-overlapping objects '''
# names of contour, dmap and tissue classifier models
contour_model_filename = \
os.path.join(saved_models_dir, contour_model_basename + '_model_fold_' + str(i_fold) + '.h5')
dmap_model_filename = \
os.path.join(saved_models_dir, dmap_model_basename + '_model_fold_' + str(i_fold) + '.h5')
classifier_model_filename = \
os.path.join(saved_models_dir, classifier_model_basename + '_model_fold_' + str(i_fold) + '.h5')
# segment histology
pred_seg_test, pred_class_test, _ \
= cytometer.utils.segment_dmap_contour_v6(im_array_test,
dmap_model=dmap_model_filename,
contour_model=contour_model_filename,
classifier_model=classifier_model_filename,
border_dilation=0, batch_size=batch_size)
if DEBUG:
i = 0
plt.clf()
plt.subplot(221)
plt.cla()
plt.imshow(im_array_test[i, :, :, :])
plt.axis('off')
plt.subplot(222)
plt.cla()
plt.imshow(im_array_test[i, :, :, :])
plt.contourf(pred_class_test[i, :, :, 0].astype(np.float32), alpha=0.5)
plt.axis('off')
plt.subplot(223)
plt.cla()
plt.imshow(im_array_test[i, :, :, :])
plt.contour(pred_seg_test[i, :, :], levels=np.unique(pred_seg_test[i, :, :]), colors='k')
plt.axis('off')
plt.subplot(224)
plt.cla()
plt.imshow(im_array_test[i, :, :, :])
plt.contourf(pred_class_test[i, :, :, 0].astype(np.float32), alpha=0.5)
plt.contour(pred_seg_test[i, :, :], levels=np.unique(pred_seg_test[i, :, :]), colors='k')
plt.axis('off')
plt.tight_layout()
# clean segmentation: remove labels that are too small or that don't overlap enough with
# the rough foreground mask
pred_seg_test, _ \
= cytometer.utils.clean_segmentation(pred_seg_test, min_cell_area=min_cell_area, max_cell_area=max_cell_area,
remove_edge_labels=False,
mask=rough_mask_test, min_mask_overlap=min_mask_overlap,
phagocytosis=phagocytosis, labels_class=None)
# record processing time
time_auto.append((time.time() - t0) / im_array_test.shape[0])
if DEBUG:
plt.clf()
aux = np.stack((rough_mask_test[i, :, :],) * 3, axis=2)
plt.imshow(im_array_test[i, :, :, :] * aux)
plt.contour(pred_seg_test[i, ...], levels=np.unique(pred_seg_test[i, ...]), colors='k')
plt.axis('off')
''' Split image into individual labels and correct segmentation to take overlaps into account '''
(window_seg_test, window_im_test, window_class_test, window_rough_mask_test), index_list, scaling_factor_list \
= cytometer.utils.one_image_per_label_v2((pred_seg_test, im_array_test,
pred_class_test[:, :, :, 0].astype(np.uint8),
rough_mask_test.astype(np.uint8)),
resize_to=(training_window_len, training_window_len),
resample=(Image.NEAREST, Image.LINEAR, Image.NEAREST, Image.NEAREST),
only_central_label=True)
# correct segmentations
correction_model_filename = os.path.join(saved_models_dir,
correction_model_basename + '_model_fold_' + str(i_fold) + '.h5')
window_seg_corrected_test = cytometer.utils.correct_segmentation(im=window_im_test, seg=window_seg_test,
correction_model=correction_model_filename,
model_type='-1_1', batch_size=batch_size,
smoothing=11)
# record processing time
time_corrected.append((time.time() - t0 - time_auto[-1]) / im_array_test.shape[0])
# save for later use
times_file = os.path.join(times_dir, 'klf14_b6ntac_exp_0099_time_comparison_auto_corrected.npz')
np.savez(times_file, time_auto=time_auto, time_corrected=time_corrected)
# compute what proportion of time the algorithm spends on the Auto segmentatiom vs. corrected segmentation
time_auto_ratio = np.array(time_auto) / (np.array(time_auto) + np.array(time_corrected))
print('Time Auto ratio:')
print('mean = ' + str(100 * np.mean(time_auto_ratio)) + ' %')
print('std = ' + str(100 * np.std(time_auto_ratio)) + ' %')
print('Ratio of total time to Auto')
print('mean = ' + 'x' + str(np.mean(1 / time_auto_ratio)))
print('std = ' + 'x' + str(np.std(1 / time_auto_ratio)))
time_corrected_ratio = np.array(time_corrected) / (np.array(time_auto) + np.array(time_corrected))
print('Time Corrected ratio:')
print('mean = ' + str(100 * np.mean(time_corrected_ratio)) + ' %')
print('std = ' + str(100 * np.std(time_corrected_ratio)) + ' %')
########################################################################################################################
## Cell populations from automatically segmented images in two depots: SQWAT and GWAT.
## This section needs to be run for each of the depots. But the results are saved, so in later sections, it's possible
## to get all the data together
### USED IN PAPER
########################################################################################################################
import matplotlib.pyplot as plt
import cytometer.data
from shapely.geometry import Polygon
import openslide
import numpy as np
import scipy.stats
import pandas as pd
from mlxtend.evaluate import permutation_test
from statsmodels.stats.multitest import multipletests
import math
# directories
klf14_root_data_dir = os.path.join(home, 'Data/cytometer_data/klf14')
annotations_dir = os.path.join(home, 'Data/cytometer_data/aida_data_Klf14_v8/annotations')
ndpi_dir = os.path.join(home, 'scan_srv2_cox/Maz Yon')
figures_dir = os.path.join(home, 'GoogleDrive/Research/20190727_cytometer_paper/figures')
metainfo_dir = os.path.join(home, 'Data/cytometer_data/klf14')
DEBUG = False
permutation_sample_size = 9 # the factorial of this number is the number of repetitions in the permutation tests
for depot in ['sqwat', 'gwat']:
# list of annotation files for this depot
json_annotation_files = json_annotation_files_dict[depot]
# modify filenames to select the particular segmentation we want (e.g. the automatic ones, or the manually refined ones)
json_annotation_files = [x.replace('.json', '_exp_0097_corrected.json') for x in json_annotation_files]
json_annotation_files = [os.path.join(annotations_dir, x) for x in json_annotation_files]
# CSV file with metainformation of all mice
metainfo_csv_file = os.path.join(metainfo_dir, 'klf14_b6ntac_meta_info.csv')
metainfo = pd.read_csv(metainfo_csv_file)
# make sure that in the boxplots PAT comes before MAT
metainfo['sex'] = metainfo['sex'].astype(pd.api.types.CategoricalDtype(categories=['f', 'm'], ordered=True))
metainfo['ko_parent'] = metainfo['ko_parent'].astype(pd.api.types.CategoricalDtype(categories=['PAT', 'MAT'], ordered=True))
metainfo['genotype'] = metainfo['genotype'].astype(pd.api.types.CategoricalDtype(categories=['KLF14-KO:WT', 'KLF14-KO:Het'], ordered=True))
quantiles = np.linspace(0, 1, 11)
quantiles = quantiles[1:-1]
# compute areas of the rough masks
filename_rough_mask_area = os.path.join(figures_dir, 'klf14_b6ntac_exp_0099_rough_mask_area_' + depot + '.npz')
id_all = []
rough_mask_area_all = []
for i_file, json_file in enumerate(json_annotation_files):
print('File ' + str(i_file) + '/' + str(len(json_annotation_files)-1) + ': ' + os.path.basename(json_file))
if not os.path.isfile(json_file):
print('Missing file')
# continue
# open full resolution histology slide
ndpi_file = json_file.replace('_exp_0097_corrected.json', '.ndpi')
ndpi_file = os.path.join(ndpi_dir, os.path.basename(ndpi_file))
im = openslide.OpenSlide(ndpi_file)
# pixel size
assert (im.properties['tiff.ResolutionUnit'] == 'centimeter')
xres = 1e-2 / float(im.properties['tiff.XResolution'])
yres = 1e-2 / float(im.properties['tiff.YResolution'])
# load mask
rough_mask_file = json_file.replace('_exp_0097_corrected.json', '_rough_mask.npz')
rough_mask_file = os.path.join(annotations_dir, rough_mask_file)
if not os.path.isfile(rough_mask_file):
print('No mask: ' + rough_mask_file)
aux = np.load(rough_mask_file)
lores_istissue0 = aux['lores_istissue0']
if DEBUG:
foo = aux['im_downsampled']
foo = PIL.Image.fromarray(foo)
foo = foo.resize(tuple((np.round(np.array(foo.size[0:2]) / 4)).astype(np.int)))
plt.imshow(foo)
plt.title(os.path.basename(ndpi_file))
# compute scaling factor between downsampled mask and original image
size_orig = np.array(im.dimensions) # width, height
size_downsampled = np.array(lores_istissue0.shape)[::-1] # width, height
downsample_factor = size_orig / size_downsampled # width, height
# create dataframe for this image
rough_mask_area = np.count_nonzero(lores_istissue0) * (xres * downsample_factor[0]) * (yres * downsample_factor[1]) # m^2
df_common = cytometer.data.tag_values_with_mouse_info(metainfo=metainfo, s=os.path.basename(json_file),
values=[rough_mask_area,], values_tag='SC_rough_mask_area',
tags_to_keep=['id', 'ko_parent', 'sex'])
# mouse ID as a string
id = df_common['id'].values[0]
# correct area because most slides contain two slices, but some don't
if depot == 'sqwat' and not (id in ['16.2d', '17.1e', '17.2g', '16.2e', '18.1f', '37.4a', '37.2e']):
# two slices in the slide, so slice area is approx. one half
rough_mask_area /= 2
elif depot == 'gwat' and not (id in ['36.1d', '16.2a', '16.2b', '16.2c', '16.2d', '16.2e', '17.1b', '17.1d',
'17.1e', '17.1f', '17.2c', '17.2d', '17.2f', '17.2g', '18.1b', '18.1c',
'18.1d', '18.2a', '18.2c', '18.2d', '18.2f', '18.2g', '18.3c', '19.1a',
'19.2e', '19.2f', '19.2g', '36.3d', '37.2e', '37.2f', '37.2g', '37.2h',
'37.3a', '37.4a', '37.4b', '39.2d']):
# two slices in the slide, so slice area is approx. one half
rough_mask_area /= 2
# add to output
id_all.append(id)
rough_mask_area_all.append(rough_mask_area)
# save results
np.savez_compressed(filename_rough_mask_area, id_all=id_all, rough_mask_area_all=rough_mask_area_all)
# load or compute area quantiles
filename_quantiles = os.path.join(figures_dir, 'klf14_b6ntac_exp_0099_area_quantiles_' + depot + '.npz')
if os.path.isfile(filename_quantiles):
with np.load(filename_quantiles) as aux:
area_mean_all = aux['area_mean_all']
area_q_all = aux['area_q_all']
id_all = aux['id_all']
ko_all = aux['ko_all']
genotype_all = aux['genotype_all']
sex_all = aux['sex_all']
else:
area_mean_all = []
area_q_all = []
id_all = []
ko_all = []
genotype_all = []
sex_all = []
bw_all = []
gwat_all = []
sc_all = []
for i_file, json_file in enumerate(json_annotation_files):
print('File ' + str(i_file) + '/' + str(len(json_annotation_files)-1) + ': ' + os.path.basename(json_file))
if not os.path.isfile(json_file):
print('Missing file')
continue
# ndpi file that corresponds to this .json file
ndpi_file = json_file.replace('_exp_0097_corrected.json', '.ndpi')
ndpi_file = ndpi_file.replace(annotations_dir, ndpi_dir)
# open full resolution histology slide
im = openslide.OpenSlide(ndpi_file)
# pixel size
assert (im.properties['tiff.ResolutionUnit'] == 'centimeter')
xres = 1e-2 / float(im.properties['tiff.XResolution']) # m
yres = 1e-2 / float(im.properties['tiff.YResolution']) # m
# create dataframe for this image
df_common = cytometer.data.tag_values_with_mouse_info(metainfo=metainfo, s=os.path.basename(json_file),
values=[i_file,], values_tag='i_file',
tags_to_keep=['id', 'ko_parent', 'genotype', 'sex',
'BW', 'gWAT', 'SC'])
# mouse ID as a string
id = df_common['id'].values[0]
ko = df_common['ko_parent'].values[0]
genotype = df_common['genotype'].values[0]
sex = df_common['sex'].values[0]
bw = df_common['BW'].values[0]
gwat = df_common['gWAT'].values[0]
sc = df_common['SC'].values[0]
# read contours from AIDA annotations
contours = cytometer.data.aida_get_contours(os.path.join(annotations_dir, json_file), layer_name='White adipocyte.*')
# compute area of each contour
areas = [Polygon(c).area * xres * yres for c in contours] # (um^2)
# compute average area of all contours
area_mean = np.mean(areas)
# compute HD quantiles
area_q = scipy.stats.mstats.hdquantiles(areas, prob=quantiles, axis=0)
# append to totals
area_mean_all.append(area_mean)
area_q_all.append(area_q)
id_all.append(id)
ko_all.append(ko)
genotype_all.append(genotype)
sex_all.append(sex)
bw_all.append(bw)
gwat_all.append(gwat)
sc_all.append(sc)
# reorder from largest to smallest final area value
area_mean_all = np.array(area_mean_all)
area_q_all = np.array(area_q_all)
id_all = np.array(id_all)
ko_all = np.array(ko_all)
genotype_all = np.array(genotype_all)
sex_all = np.array(sex_all)
bw_all = np.array(bw_all)
gwat_all = np.array(gwat_all)
sc_all = np.array(sc_all)
idx = np.argsort(area_q_all[:, -1])
idx = idx[::-1] # sort from larger to smaller
area_mean_all = area_mean_all[idx]
area_q_all = area_q_all[idx, :]
id_all = id_all[idx]
ko_all = ko_all[idx]
genotype_all = genotype_all[idx]
sex_all = sex_all[idx]
bw_all = bw_all[idx]
gwat_all = gwat_all[idx]
sc_all = sc_all[idx]
np.savez_compressed(filename_quantiles, area_mean_all=area_mean_all, area_q_all=area_q_all, id_all=id_all,
ko_all=ko_all, genotype_all=genotype_all, sex_all=sex_all,
bw_all=bw_all, gwat_all=gwat_all, sc_all=sc_all)
if DEBUG:
plt.clf()
for i in range(len(area_q_all)):
# plot
if ko_all[i] == 'PAT':
color = 'g'
elif ko_all[i] == 'MAT':
color = 'r'
else:
raise ValueError('Unknown ko value: ' + ko)
if sex_all[i] == 'f':
plt.subplot(121)
plt.plot(quantiles, area_q_all[i] * 1e12 * 1e-3, color=color)
elif sex_all[i] == 'm':
plt.subplot(122)
plt.plot(quantiles, area_q_all[i] * 1e12 * 1e-3, color=color)
else:
raise ValueError('Unknown sex value: ' + sex)
legend_f = [i + ' ' + j.replace('KLF14-KO:', '') for i, j
in zip(id_all[sex_all == 'f'], genotype_all[sex_all == 'f'])]
legend_m = [i + ' ' + j.replace('KLF14-KO:', '') for i, j
in zip(id_all[sex_all == 'm'], genotype_all[sex_all == 'm'])]
plt.subplot(121)
plt.title('Female', fontsize=14)
plt.tick_params(labelsize=14)
plt.xlabel('Quantile', fontsize=14)
plt.ylabel('Area ($10^{3}\ \mu m^2$)', fontsize=14)
plt.legend(legend_f, fontsize=12)
plt.subplot(122)
plt.title('Male', fontsize=14)
plt.tick_params(labelsize=14)
plt.xlabel('Quantile', fontsize=14)
plt.legend(legend_m, fontsize=12)
# DEBUG:
# area_q_all = np.vstack((area_q_all, area_q_all))
# id_all = np.hstack((id_all, id_all))
# ko_all = np.hstack((ko_all, ko_all))
# genotype_all = np.hstack((genotype_all, genotype_all))
# sex_all = np.hstack((sex_all, sex_all))
# compute variability of area values for each quantile
area_q_f_pat = area_q_all[(sex_all == 'f') * (ko_all == 'PAT'), :]
area_q_m_pat = area_q_all[(sex_all == 'm') * (ko_all == 'PAT'), :]
area_q_f_mat = area_q_all[(sex_all == 'f') * (ko_all == 'MAT'), :]
area_q_m_mat = area_q_all[(sex_all == 'm') * (ko_all == 'MAT'), :]
area_interval_f_pat = scipy.stats.mstats.hdquantiles(area_q_f_pat, prob=[0.025, 0.25, 0.5, 0.75, 0.975], axis=0)
area_interval_m_pat = scipy.stats.mstats.hdquantiles(area_q_m_pat, prob=[0.025, 0.25, 0.5, 0.75, 0.975], axis=0)
area_interval_f_mat = scipy.stats.mstats.hdquantiles(area_q_f_mat, prob=[0.025, 0.25, 0.5, 0.75, 0.975], axis=0)
area_interval_m_mat = scipy.stats.mstats.hdquantiles(area_q_m_mat, prob=[0.025, 0.25, 0.5, 0.75, 0.975], axis=0)
area_q_f_pat_wt = area_q_all[(sex_all == 'f') * (ko_all == 'PAT') * (genotype_all == 'KLF14-KO:WT'), :]
area_q_m_pat_wt = area_q_all[(sex_all == 'm') * (ko_all == 'PAT') * (genotype_all == 'KLF14-KO:WT'), :]
area_q_f_mat_wt = area_q_all[(sex_all == 'f') * (ko_all == 'MAT') * (genotype_all == 'KLF14-KO:WT'), :]
area_q_m_mat_wt = area_q_all[(sex_all == 'm') * (ko_all == 'MAT') * (genotype_all == 'KLF14-KO:WT'), :]
area_interval_f_pat_wt = scipy.stats.mstats.hdquantiles(area_q_f_pat_wt, prob=[0.025, 0.25, 0.5, 0.75, 0.975], axis=0)
area_interval_m_pat_wt = scipy.stats.mstats.hdquantiles(area_q_m_pat_wt, prob=[0.025, 0.25, 0.5, 0.75, 0.975], axis=0)
area_interval_f_mat_wt = scipy.stats.mstats.hdquantiles(area_q_f_mat_wt, prob=[0.025, 0.25, 0.5, 0.75, 0.975], axis=0)
area_interval_m_mat_wt = scipy.stats.mstats.hdquantiles(area_q_m_mat_wt, prob=[0.025, 0.25, 0.5, 0.75, 0.975], axis=0)
area_q_f_pat_het = area_q_all[(sex_all == 'f') * (ko_all == 'PAT') * (genotype_all == 'KLF14-KO:Het'), :]
area_q_m_pat_het = area_q_all[(sex_all == 'm') * (ko_all == 'PAT') * (genotype_all == 'KLF14-KO:Het'), :]
area_q_f_mat_het = area_q_all[(sex_all == 'f') * (ko_all == 'MAT') * (genotype_all == 'KLF14-KO:Het'), :]
area_q_m_mat_het = area_q_all[(sex_all == 'm') * (ko_all == 'MAT') * (genotype_all == 'KLF14-KO:Het'), :]
area_interval_f_pat_het = scipy.stats.mstats.hdquantiles(area_q_f_pat_het, prob=[0.025, 0.25, 0.5, 0.75, 0.975], axis=0)
area_interval_m_pat_het = scipy.stats.mstats.hdquantiles(area_q_m_pat_het, prob=[0.025, 0.25, 0.5, 0.75, 0.975], axis=0)
area_interval_f_mat_het = scipy.stats.mstats.hdquantiles(area_q_f_mat_het, prob=[0.025, 0.25, 0.5, 0.75, 0.975], axis=0)
area_interval_m_mat_het = scipy.stats.mstats.hdquantiles(area_q_m_mat_het, prob=[0.025, 0.25, 0.5, 0.75, 0.975], axis=0)
n_f_pat_wt = area_q_f_pat_wt.shape[0]
n_m_pat_wt = area_q_m_pat_wt.shape[0]
n_f_mat_wt = area_q_f_mat_wt.shape[0]
n_m_mat_wt = area_q_m_mat_wt.shape[0]
n_f_pat_het = area_q_f_pat_het.shape[0]
n_m_pat_het = area_q_m_pat_het.shape[0]
n_f_mat_het = area_q_f_mat_het.shape[0]
n_m_mat_het = area_q_m_mat_het.shape[0]
if DEBUG:
# plots of female median ECDF^-1 with quartile shaded area
plt.clf()
plt.subplot(121)
plt.plot(quantiles, area_interval_f_pat_wt[2, :] * 1e12 * 1e-3, 'C0', linewidth=3, label=str(n_f_pat_wt) + ' WT')
plt.fill_between(quantiles, area_interval_f_pat_wt[1, :] * 1e12 * 1e-3, area_interval_f_pat_wt[3, :] * 1e12 * 1e-3,
facecolor='C0', alpha=0.3)
plt.plot(quantiles, area_interval_f_pat_het[2, :] * 1e12 * 1e-3, 'k', linewidth=3, label=str(n_f_pat_het) + ' Het')
plt.fill_between(quantiles, area_interval_f_pat_het[1, :] * 1e12 * 1e-3, area_interval_f_pat_het[3, :] * 1e12 * 1e-3,
facecolor='k', alpha=0.3)
plt.title('Female PAT', fontsize=14)
plt.xlabel('Quantile', fontsize=14)
plt.ylabel('White adipocyte area ($\cdot 10^3 \mu$m$^2$)', fontsize=14)
plt.tick_params(axis='both', which='major', labelsize=14)
plt.legend(loc='upper left', prop={'size': 12})
plt.ylim(0, 15)
plt.tight_layout()
plt.subplot(122)
plt.plot(quantiles, area_interval_f_mat_wt[2, :] * 1e12 * 1e-3, 'C0', linewidth=3, label=str(n_f_mat_wt) + ' WT')
plt.fill_between(quantiles, area_interval_f_mat_wt[1, :] * 1e12 * 1e-3, area_interval_f_mat_wt[3, :] * 1e12 * 1e-3,
facecolor='C0', alpha=0.3)
plt.plot(quantiles, area_interval_f_mat_het[2, :] * 1e12 * 1e-3, 'k', linewidth=3, label=str(n_f_mat_het) + ' Het')
plt.fill_between(quantiles, area_interval_f_mat_het[1, :] * 1e12 * 1e-3, area_interval_f_mat_het[3, :] * 1e12 * 1e-3,
facecolor='k', alpha=0.3)
plt.title('Female MAT', fontsize=14)
plt.xlabel('Quantile', fontsize=14)
plt.tick_params(axis='both', which='major', labelsize=14)
plt.legend(loc='upper left', prop={'size': 12})
plt.ylim(0, 15)
plt.tight_layout()
plt.savefig(os.path.join(figures_dir, 'exp_0099_' + depot + '_cell_area_female_pat_vs_mat_bands.svg'))
plt.savefig(os.path.join(figures_dir, 'exp_0099_' + depot + '_cell_area_female_pat_vs_mat_bands.png'))
if DEBUG:
# plots of male median ECDF^-1 with quartile shaded area
plt.clf()
plt.subplot(121)
plt.plot(quantiles, area_interval_m_pat_wt[2, :] * 1e12 * 1e-3, 'C0', linewidth=3, label=str(n_m_pat_wt) + ' WT')
plt.fill_between(quantiles, area_interval_m_pat_wt[1, :] * 1e12 * 1e-3, area_interval_m_pat_wt[3, :] * 1e12 * 1e-3,
facecolor='C0', alpha=0.3)
plt.plot(quantiles, area_interval_m_pat_het[2, :] * 1e12 * 1e-3, 'k', linewidth=3, label=str(n_m_pat_het) + ' Het')
plt.fill_between(quantiles, area_interval_m_pat_het[1, :] * 1e12 * 1e-3, area_interval_m_pat_het[3, :] * 1e12 * 1e-3,
facecolor='k', alpha=0.3)
plt.title('Male PAT', fontsize=14)
plt.xlabel('Quantile', fontsize=14)
plt.ylabel('White adipocyte area ($\cdot 10^3 \mu$m$^2$)', fontsize=14)
plt.tick_params(axis='both', which='major', labelsize=14)
plt.legend(loc='upper left', prop={'size': 12})
plt.ylim(0, 16)
plt.tight_layout()
plt.subplot(122)
plt.plot(quantiles, area_interval_m_mat_wt[2, :] * 1e12 * 1e-3, 'C0', linewidth=3, label=str(n_m_mat_wt) + ' WT')
plt.fill_between(quantiles, area_interval_m_mat_wt[1, :] * 1e12 * 1e-3, area_interval_m_mat_wt[3, :] * 1e12 * 1e-3,
facecolor='C0', alpha=0.3)
plt.plot(quantiles, area_interval_m_mat_het[2, :] * 1e12 * 1e-3, 'k', linewidth=3, label=str(n_m_mat_het) + ' Het')
plt.fill_between(quantiles, area_interval_m_mat_het[1, :] * 1e12 * 1e-3, area_interval_m_mat_het[3, :] * 1e12 * 1e-3,
facecolor='k', alpha=0.3)
plt.title('Male MAT', fontsize=14)
plt.xlabel('Quantile', fontsize=14)
plt.tick_params(axis='both', which='major', labelsize=14)
plt.legend(loc='upper left', prop={'size': 12})
plt.ylim(0, 16)
plt.tight_layout()
plt.savefig(os.path.join(figures_dir, 'exp_0099_' + depot + '_cell_area_male_pat_vs_mat_bands.svg'))
plt.savefig(os.path.join(figures_dir, 'exp_0099_' + depot + '_cell_area_male_pat_vs_mat_bands.png'))
filename_pvals = os.path.join(figures_dir, 'klf14_b6ntac_exp_0099_pvals_' + depot + '.npz')
if os.path.isfile(filename_pvals):
with np.load(filename_pvals) as aux:
pval_perc_f_pat2mat = aux['pval_perc_f_pat2mat']
pval_perc_m_pat2mat = aux['pval_perc_m_pat2mat']
pval_perc_f_pat_wt2het = aux['pval_perc_f_pat_wt2het']
pval_perc_f_mat_wt2het = aux['pval_perc_f_mat_wt2het']
pval_perc_m_pat_wt2het = aux['pval_perc_m_pat_wt2het']
pval_perc_m_mat_wt2het = aux['pval_perc_m_mat_wt2het']
permutation_sample_size = aux['permutation_sample_size']
else:
# test whether the median values are different enough between two groups
func = lambda x, y: np.abs(scipy.stats.mstats.hdquantiles(x, prob=0.5, axis=0).data[0]
- scipy.stats.mstats.hdquantiles(y, prob=0.5, axis=0).data[0])
# func = lambda x, y: np.abs(np.mean(x) - np.mean(y))
## PAT vs. MAT
# test whether the median values are different enough between PAT vs. MAT
pval_perc_f_pat2mat = np.zeros(shape=(len(quantiles),))
for i, q in enumerate(quantiles):
pval_perc_f_pat2mat[i] = permutation_test(x=area_q_f_pat[:, i], y=area_q_f_mat[:, i],
func=func, seed=None,
method='approximate', num_rounds=math.factorial(permutation_sample_size))
pval_perc_m_pat2mat = np.zeros(shape=(len(quantiles),))
for i, q in enumerate(quantiles):
pval_perc_m_pat2mat[i] = permutation_test(x=area_q_m_pat[:, i], y=area_q_m_mat[:, i],
func=func, seed=None,
method='approximate', num_rounds=math.factorial(permutation_sample_size))
## WT vs. Het
# PAT Females
pval_perc_f_pat_wt2het = np.zeros(shape=(len(quantiles),))
for i, q in enumerate(quantiles):
pval_perc_f_pat_wt2het[i] = permutation_test(x=area_q_f_pat_wt[:, i], y=area_q_f_pat_het[:, i],
func=func, seed=None,
method='approximate',
num_rounds=math.factorial(permutation_sample_size))
# MAT Females
pval_perc_f_mat_wt2het = np.zeros(shape=(len(quantiles),))
for i, q in enumerate(quantiles):
pval_perc_f_mat_wt2het[i] = permutation_test(x=area_q_f_mat_wt[:, i], y=area_q_f_mat_het[:, i],
func=func, seed=None,
method='approximate',
num_rounds=math.factorial(permutation_sample_size))
# PAT Males
pval_perc_m_pat_wt2het = np.zeros(shape=(len(quantiles),))
for i, q in enumerate(quantiles):
pval_perc_m_pat_wt2het[i] = permutation_test(x=area_q_m_pat_wt[:, i], y=area_q_m_pat_het[:, i],
func=func, seed=None,
method='approximate',
num_rounds=math.factorial(permutation_sample_size))
# MAT Males
pval_perc_m_mat_wt2het = np.zeros(shape=(len(quantiles),))
for i, q in enumerate(quantiles):
pval_perc_m_mat_wt2het[i] = permutation_test(x=area_q_m_mat_wt[:, i], y=area_q_m_mat_het[:, i],
func=func, seed=None,
method='approximate',
num_rounds=math.factorial(permutation_sample_size))
np.savez_compressed(filename_pvals, permutation_sample_size=permutation_sample_size,
pval_perc_f_pat2mat=pval_perc_f_pat2mat, pval_perc_m_pat2mat=pval_perc_m_pat2mat,
pval_perc_f_pat_wt2het=pval_perc_f_pat_wt2het, pval_perc_f_mat_wt2het=pval_perc_f_mat_wt2het,
pval_perc_m_pat_wt2het=pval_perc_m_pat_wt2het, pval_perc_m_mat_wt2het=pval_perc_m_mat_wt2het)
# data has been loaded or computed
np.set_printoptions(precision=2)
print('PAT vs. MAT before multitest correction')
print('Female:')
print(pval_perc_f_pat2mat)
print('Male:')
print(pval_perc_m_pat2mat)
np.set_printoptions(precision=8)
# multitest correction using Benjamini-Hochberg
_, pval_perc_f_pat2mat, _, _ = multipletests(pval_perc_f_pat2mat, method='fdr_bh', alpha=0.05, returnsorted=False)
_, pval_perc_m_pat2mat, _, _ = multipletests(pval_perc_m_pat2mat, method='fdr_bh', alpha=0.05, returnsorted=False)
np.set_printoptions(precision=2)
print('PAT vs. MAT with multitest correction')
print('Female:')
print(pval_perc_f_pat2mat)
print('Male:')
print(pval_perc_m_pat2mat)
np.set_printoptions(precision=8)
np.set_printoptions(precision=3)
print('WT vs. Het before multitest correction')
print('Female:')
print(pval_perc_f_pat_wt2het)
print(pval_perc_f_mat_wt2het)
print('Male:')
print(pval_perc_m_pat_wt2het)
print(pval_perc_m_mat_wt2het)
np.set_printoptions(precision=8)
# multitest correction using Benjamini-Hochberg
_, pval_perc_f_pat_wt2het, _, _ = multipletests(pval_perc_f_pat_wt2het, method='fdr_bh', alpha=0.05, returnsorted=False)
_, pval_perc_f_mat_wt2het, _, _ = multipletests(pval_perc_f_mat_wt2het, method='fdr_bh', alpha=0.05, returnsorted=False)
_, pval_perc_m_pat_wt2het, _, _ = multipletests(pval_perc_m_pat_wt2het, method='fdr_bh', alpha=0.05, returnsorted=False)
_, pval_perc_m_mat_wt2het, _, _ = multipletests(pval_perc_m_mat_wt2het, method='fdr_bh', alpha=0.05, returnsorted=False)
np.set_printoptions(precision=3)
print('WT vs. Het with multitest correction')
print('Female:')
print(pval_perc_f_pat_wt2het)
print(pval_perc_f_mat_wt2het)
print('Male:')
print(pval_perc_m_pat_wt2het)
print(pval_perc_m_mat_wt2het)
np.set_printoptions(precision=8)
# # plot the median difference and the population quantiles at which the difference is significant
# if DEBUG:
# plt.clf()
# idx = pval_perc_f_pat2mat < 0.05
# delta_a_f_pat2mat = (area_interval_f_mat[1, :] - area_interval_f_pat[1, :]) / area_interval_f_pat[1, :]
# if np.any(idx):
# plt.stem(quantiles[idx], 100 * delta_a_f_pat2mat[idx],
# markerfmt='.', linefmt='C6-', basefmt='C6',
# label='p-val$_{\mathrm{PAT}}$ < 0.05')
#
# idx = pval_perc_m_pat2mat < 0.05
# delta_a_m_pat2mat = (area_interval_m_mat[1, :] - area_interval_m_pat[1, :]) / area_interval_m_pat[1, :]
# if np.any(idx):
# plt.stem(quantiles[idx], 100 * delta_a_m_pat2mat[idx],
# markerfmt='.', linefmt='C7-', basefmt='C7', bottom=250,
# label='p-val$_{\mathrm{MAT}}$ < 0.05')
#
# plt.plot(quantiles, 100 * delta_a_f_pat2mat, 'C6', linewidth=3, label='Female PAT to MAT')
# plt.plot(quantiles, 100 * delta_a_m_pat2mat, 'C7', linewidth=3, label='Male PAT to MAT')
#
# plt.xlabel('Cell population quantile', fontsize=14)
# plt.ylabel('Area change (%)', fontsize=14)
# plt.tick_params(axis='both', which='major', labelsize=14)
# plt.legend(loc='lower right', prop={'size': 12})
# plt.tight_layout()
#
# plt.savefig(os.path.join(figures_dir, 'exp_0099_' + depot + '_cell_area_change_pat_2_mat.svg'))
# plt.savefig(os.path.join(figures_dir, 'exp_0099_' + depot + '_cell_area_change_pat_2_mat.png'))
########################################################################################################################
## Linear models of body weight (BW), fat depots weight (SC and gWAT), and categorical variables (sex, ko, genotype)
########################################################################################################################
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats
import pandas as pd
import statsmodels.api as sm
import re
# directories
klf14_root_data_dir = os.path.join(home, 'Data/cytometer_data/klf14')
annotations_dir = os.path.join(home, 'Data/cytometer_data/aida_data_Klf14_v7/annotations')
ndpi_dir = os.path.join(home, 'scan_srv2_cox/Maz Yon')
figures_dir = os.path.join(home, 'GoogleDrive/Research/20190727_cytometer_paper/figures')
metainfo_dir = os.path.join(home, 'Data/cytometer_data/klf14')
DEBUG = False
quantiles = np.linspace(0, 1, 11)
quantiles = quantiles[1:-1]
# load metainfo file
metainfo_csv_file = os.path.join(metainfo_dir, 'klf14_b6ntac_meta_info.csv')
metainfo = pd.read_csv(metainfo_csv_file)
# make sure that in the boxplots PAT comes before MAT
metainfo['sex'] = metainfo['sex'].astype(pd.api.types.CategoricalDtype(categories=['f', 'm'], ordered=True))
metainfo['ko_parent'] = metainfo['ko_parent'].astype(pd.api.types.CategoricalDtype(categories=['PAT', 'MAT'], ordered=True))
metainfo['genotype'] = metainfo['genotype'].astype(pd.api.types.CategoricalDtype(categories=['KLF14-KO:WT', 'KLF14-KO:Het'], ordered=True))
# add litter and mother ID columns to the data
for i, id in enumerate(metainfo['id']):
litter = re.sub('[a-zA-Z]', '', id)
metainfo.loc[i, 'litter'] = litter
metainfo.loc[i, 'mother_id'] = litter.split('.')[0]
# add rough mask area column to the data
depot = 'sqwat'
filename_rough_mask_area = os.path.join(figures_dir, 'klf14_b6ntac_exp_0099_rough_mask_area_' + depot + '.npz')
aux = np.load(filename_rough_mask_area)
id_all = aux['id_all']
rough_mask_area_all = aux['rough_mask_area_all']
for id, rough_mask_area in zip(id_all, rough_mask_area_all):
metainfo.loc[metainfo['id'] == id, 'SC_rough_mask_area'] = rough_mask_area
# add rough mask area column to the data
depot = 'gwat'
filename_rough_mask_area = os.path.join(figures_dir, 'klf14_b6ntac_exp_0099_rough_mask_area_' + depot + '.npz')
aux = np.load(filename_rough_mask_area)
aux = np.load(filename_rough_mask_area)
id_all = aux['id_all']
rough_mask_area_all = aux['rough_mask_area_all']
for id, rough_mask_area in zip(id_all, rough_mask_area_all):
metainfo.loc[metainfo['id'] == id, 'gWAT_rough_mask_area'] = rough_mask_area
########################################################################################################################
### Plot SC vs. gWAT to look for outliers
########################################################################################################################
idx_not_nan = np.where(~np.isnan(metainfo['SC']) * ~np.isnan(metainfo['gWAT']) * ~np.isnan(metainfo['BW']))[0]
if DEBUG:
idx = idx_not_nan
plt.clf()
plt.scatter(metainfo['SC'][idx], metainfo['gWAT'][idx], color='k', label='All mice')
for i in idx:
plt.annotate(metainfo['id'][i], (metainfo['SC'][i], metainfo['gWAT'][i]))
plt.xlabel('SC', fontsize=14)
plt.ylabel('gWAT', fontsize=14)
plt.legend()
if DEBUG:
plt.clf()
plt.subplot(221)
idx = np.where((metainfo['sex'] == 'f') * (metainfo['ko_parent'] == 'PAT'))[0]
plt.scatter(metainfo['SC'][idx], metainfo['gWAT'][idx], color='k', label='f PAT')
for i in idx:
plt.annotate(metainfo['id'][i], (metainfo['SC'][i], metainfo['gWAT'][i]))
plt.xlabel('SC', fontsize=14)
plt.ylabel('gWAT', fontsize=14)
plt.legend()
plt.subplot(222)
idx = np.where((metainfo['sex'] == 'f') * (metainfo['ko_parent'] == 'MAT'))[0]
plt.scatter(metainfo['SC'][idx], metainfo['gWAT'][idx], color='k', label='f MAT')
for i in idx:
plt.annotate(metainfo['id'][i], (metainfo['SC'][i], metainfo['gWAT'][i]))
plt.xlabel('SC', fontsize=14)
plt.ylabel('gWAT', fontsize=14)
plt.legend()
plt.subplot(223)
idx = np.where((metainfo['sex'] == 'm') * (metainfo['ko_parent'] == 'PAT'))[0]
plt.scatter(metainfo['SC'][idx], metainfo['gWAT'][idx], color='k', label='m PAT')
for i in idx:
plt.annotate(metainfo['id'][i], (metainfo['SC'][i], metainfo['gWAT'][i]))
plt.xlabel('SC', fontsize=14)
plt.ylabel('gWAT', fontsize=14)
plt.legend()
plt.subplot(224)
idx = np.where((metainfo['sex'] == 'm') * (metainfo['ko_parent'] == 'MAT'))[0]
plt.scatter(metainfo['SC'][idx], metainfo['gWAT'][idx], color='k', label='m MAT')
for i in idx:
plt.annotate(metainfo['id'][i], (metainfo['SC'][i], metainfo['gWAT'][i]))
plt.xlabel('SC', fontsize=14)
plt.ylabel('gWAT', fontsize=14)
plt.legend()
# 64 and 65 are outliers.
########################################################################################################################
### Plot SC and gWAT vs. corresponding rough mask areas
########################################################################################################################
if DEBUG:
plt.clf()
plt.subplot(121)
plt.scatter(np.power(metainfo['SC_rough_mask_area'] * 1e6, 3/2), metainfo['SC'])
# for i in range(metainfo.shape[0]):
# plt.annotate(metainfo['id'][i], (np.power(metainfo['SC_rough_mask_area'][i] * 1e6, 3/2), metainfo['SC'][i]))
plt.xlabel('Subcutaneous vol from slice area mm$^3$', fontsize=14)
plt.ylabel('m$_{SC}$ (g)', fontsize=14)
plt.tick_params(labelsize=14)
plt.subplot(122)
plt.scatter(np.power(metainfo['gWAT_rough_mask_area'] * 1e6, 3/2), metainfo['gWAT'])
# for i in range(metainfo.shape[0]):
# plt.annotate(metainfo['id'][i], (metainfo['gWAT_rough_mask_area'][i] * 1e6, metainfo['gWAT'][i]))
plt.xlabel('Gonadal slice area mm$^2$', fontsize=14)
plt.ylabel('m$_{G}$ (g)', fontsize=14)
plt.tick_params(labelsize=14)
########################################################################################################################
# Explore mice dataset in terms of weights alive and culled
########################################################################################################################
# plot weight when alive vs. weight after death
idx = np.where(~np.isnan(metainfo['BW_alive']) * ~np.isnan(metainfo['BW']))[0]
plt.clf()
plt.scatter(metainfo['BW_alive'], metainfo['BW'])
plt.plot([20, 50], [20, 50])
for i in idx:
plt.annotate(i, (metainfo['BW_alive'][i], metainfo['BW'][i]))
# plot weight evolution over time
plt.clf()
for i in idx:
plt.plot([metainfo['BW_alive_date'][i], metainfo['cull_age'][i]], [metainfo['BW_alive'][i], metainfo['BW'][i]])
# plot SC vs. BW (females)
if DEBUG:
plt.clf()
plt.subplot(121)
for litter in np.unique(metainfo['litter']):
# index of animals from this litter
idx = np.where((metainfo['litter'] == litter) * (metainfo['sex'] == 'f'))[0]
if (len(idx) > 1):
# order in order of increasing SC
idx_sort = np.argsort(metainfo['SC'][idx])
if np.array(metainfo['ko_parent'][idx])[0] == 'PAT':
color = 'k'
elif np.array(metainfo['ko_parent'][idx])[0] == 'MAT':
color = 'r'
plt.scatter(np.array(metainfo.loc[idx, 'SC'])[idx_sort], np.array(metainfo.loc[idx, 'BW'])[idx_sort], color=color)
plt.plot(np.array(metainfo.loc[idx, 'SC'])[idx_sort], np.array(metainfo.loc[idx, 'BW'])[idx_sort], color=color)
# if litter in ['19.1', '19.2']:
for i in idx:
plt.annotate(metainfo['id'][i], (metainfo['SC'][i], metainfo['BW'][i]))
plt.title('Females')
plt.xlabel('$m_{SC}$ (g)', fontsize=14)
plt.ylabel('BW (g)', fontsize=14)
plt.tick_params(labelsize=14)
plt.tight_layout()
# plot SC vs. BW (males)
plt.subplot(122)
for litter in np.unique(metainfo['litter']):
# index of animals from this litter
idx = np.where((metainfo['litter'] == litter) * (metainfo['sex'] == 'm'))[0]
if (len(idx) > 1):
# order in order of increasing SC
idx_sort = np.argsort(metainfo['SC'][idx])
if np.array(metainfo['ko_parent'][idx])[0] == 'PAT':
color = 'k'
elif np.array(metainfo['ko_parent'][idx])[0] == 'MAT':
color = 'r'
plt.scatter(np.array(metainfo.loc[idx, 'SC'])[idx_sort], np.array(metainfo.loc[idx, 'BW'])[idx_sort], color=color)
plt.plot(np.array(metainfo.loc[idx, 'SC'])[idx_sort], np.array(metainfo.loc[idx, 'BW'])[idx_sort], color=color)
# if litter in ['19.1', '19.2']:
for i in idx:
plt.annotate(metainfo['id'][i], (metainfo['SC'][i], metainfo['BW'][i]))
plt.title('Males')
plt.xlabel('$m_{SC}$ (g)', fontsize=14)
plt.ylabel('BW (g)', fontsize=14)
plt.tick_params(labelsize=14)
plt.tight_layout()
# plot gWAT vs. BW (female)
plt.clf()
for litter in np.unique(metainfo['litter']):
# index of animals from this litter
idx = np.where((metainfo['litter'] == litter) * (metainfo['sex'] == 'f'))[0]
if len(idx) > 1:
# order in order of increasing gWAT
idx_sort = np.argsort(metainfo['gWAT'][idx])
if np.array(metainfo['ko_parent'][idx])[0] == 'PAT':
color = 'k'
elif np.array(metainfo['ko_parent'][idx])[0] == 'MAT':
color = 'r'
plt.scatter(np.array(metainfo.loc[idx, 'gWAT'])[idx_sort], np.array(metainfo.loc[idx, 'BW'])[idx_sort], color=color)
plt.plot(np.array(metainfo.loc[idx, 'gWAT'])[idx_sort], np.array(metainfo.loc[idx, 'BW'])[idx_sort], color=color)
# if (litter == '19.2'):
for i in idx:
plt.annotate(i, (metainfo['gWAT'][i], metainfo['BW'][i]))
plt.xlabel('$m_{G}$ (g)', fontsize=14)
plt.ylabel('BW (g)', fontsize=14)
plt.tick_params(labelsize=14)
plt.tight_layout()
########################################################################################################################
### Model BW ~ (C(sex) + C(ko_parent) + C(genotype)) * (SC * gWAT)
### WARNING: This model is an example of having too many variables
########################################################################################################################
idx_not_nan = np.where(~np.isnan(metainfo['SC']) * ~np.isnan(metainfo['gWAT']) * ~np.isnan(metainfo['BW']))[0]
model = sm.formula.ols('BW ~ (C(sex) + C(ko_parent) + C(genotype)) * (SC * gWAT)', data=metainfo, subset=idx_not_nan).fit()
print(model.summary())
# OLS Regression Results
# ==============================================================================
# Dep. Variable: BW R-squared: 0.816
# Model: OLS Adj. R-squared: 0.769
# Method: Least Squares F-statistic: 17.69
# Date: Mon, 24 Feb 2020 Prob (F-statistic): 1.19e-16
# Time: 15:32:07 Log-Likelihood: -197.81
# No. Observations: 76 AIC: 427.6
# Df Residuals: 60 BIC: 464.9
# Df Model: 15
# Covariance Type: nonrobust
# =======================================================================================================
# coef std err t P>|t| [0.025 0.975]
# -------------------------------------------------------------------------------------------------------
# Intercept 20.8129 3.915 5.317 0.000 12.982 28.644
# C(sex)[T.m] 8.2952 5.920 1.401 0.166 -3.547 20.138
# C(ko_parent)[T.MAT] -3.1038 4.417 -0.703 0.485 -11.938 5.731
# C(genotype)[T.KLF14-KO:Het] 0.4508 4.154 0.109 0.914 -7.858 8.759
# SC -1.7885 8.152 -0.219 0.827 -18.096 14.519
# C(sex)[T.m]:SC 14.4534 9.416 1.535 0.130 -4.382 33.289
# C(ko_parent)[T.MAT]:SC 13.9682 12.136 1.151 0.254 -10.307 38.244
# C(genotype)[T.KLF14-KO:Het]:SC -4.4397 8.417 -0.527 0.600 -21.277 12.397
# gWAT 4.6153 4.514 1.022 0.311 -4.414 13.644
# C(sex)[T.m]:gWAT -1.1889 5.786 -0.205 0.838 -12.763 10.385
# C(ko_parent)[T.MAT]:gWAT 11.1354 4.622 2.409 0.019 1.890 20.380
# C(genotype)[T.KLF14-KO:Het]:gWAT -0.6653 4.608 -0.144 0.886 -9.883 8.553
# SC:gWAT 5.7281 7.709 0.743 0.460 -9.692 21.148
# C(sex)[T.m]:SC:gWAT -8.8374 8.256 -1.070 0.289 -25.352 7.678
# C(ko_parent)[T.MAT]:SC:gWAT -20.5613 9.163 -2.244 0.029 -38.889 -2.234
# C(genotype)[T.KLF14-KO:Het]:SC:gWAT 1.2538 7.420 0.169 0.866 -13.589 16.096
# ==============================================================================
# Omnibus: 0.893 Durbin-Watson: 1.584
# Prob(Omnibus): 0.640 Jarque-Bera (JB): 0.419
# Skew: 0.138 Prob(JB): 0.811
# Kurtosis: 3.236 Cond. No. 103.
# ==============================================================================
# Warnings:
# [1] Standard Errors assume that the covariance matrix of the errors is correctly specified.
# partial regression and influence plots
if DEBUG:
sm.graphics.plot_partregress_grid(model)
sm.graphics.influence_plot(model, criterion="cooks")
# list of point with high influence (large residuals and leverage)
idx_influence = [35, 36, 37, 64, 65]
idx_no_influence = list(set(range(metainfo.shape[0])) - set(idx_influence))
print(metainfo.loc[idx_influence, ['id', 'ko_parent', 'sex', 'genotype', 'BW', 'SC', 'gWAT']])
model = sm.formula.ols('BW ~ (C(sex) + C(ko_parent) + C(genotype)) * (SC * gWAT)', data=metainfo, subset=idx_no_influence).fit()
print(model.summary())
# OLS Regression Results
# ==============================================================================
# Dep. Variable: BW R-squared: 0.817
# Model: OLS Adj. R-squared: 0.768
# Method: Least Squares F-statistic: 16.41
# Date: Wed, 26 Feb 2020 Prob (F-statistic): 3.87e-15
# Time: 14:08:33 Log-Likelihood: -183.06
# No. Observations: 71 AIC: 398.1
# Df Residuals: 55 BIC: 434.3
# Df Model: 15
# Covariance Type: nonrobust
# =======================================================================================================
# coef std err t P>|t| [0.025 0.975]
# -------------------------------------------------------------------------------------------------------
# Intercept 22.4929 5.698 3.948 0.000 11.074 33.912
# C(sex)[T.m] 7.2408 6.769 1.070 0.289 -6.324 20.806
# C(ko_parent)[T.MAT] -2.6859 4.448 -0.604 0.548 -11.600 6.229
# C(genotype)[T.KLF14-KO:Het] -0.0971 4.440 -0.022 0.983 -8.996 8.802
# SC -9.2532 23.095 -0.401 0.690 -55.537 37.031
# C(sex)[T.m]:SC 21.7051 21.391 1.015 0.315 -21.164 64.574
# C(ko_parent)[T.MAT]:SC 10.9030 13.041 0.836 0.407 -15.231 37.037
# C(genotype)[T.KLF14-KO:Het]:SC -2.7711 11.164 -0.248 0.805 -25.145 19.603
# gWAT 2.5214 5.410 0.466 0.643 -8.321 13.364
# C(sex)[T.m]:gWAT 0.0320 6.181 0.005 0.996 -12.356 12.420
# C(ko_parent)[T.MAT]:gWAT 11.2072 4.596 2.439 0.018 1.997 20.417
# C(genotype)[T.KLF14-KO:Het]:gWAT -0.3474 4.714 -0.074 0.942 -9.795 9.100
# SC:gWAT 13.9732 19.484 0.717 0.476 -25.074 53.020
# C(sex)[T.m]:SC:gWAT -16.5886 17.869 -0.928 0.357 -52.398 19.221
# C(ko_parent)[T.MAT]:SC:gWAT -18.1201 9.764 -1.856 0.069 -37.687 1.447
# C(genotype)[T.KLF14-KO:Het]:SC:gWAT -0.2622 9.087 -0.029 0.977 -18.472 17.948
# ==============================================================================
# Omnibus: 1.715 Durbin-Watson: 1.455
# Prob(Omnibus): 0.424 Jarque-Bera (JB): 1.060
# Skew: 0.245 Prob(JB): 0.589
# Kurtosis: 3.342 Cond. No. 229.
# ==============================================================================
# Warnings:
# [1] Standard Errors assume that the covariance matrix of the errors is correctly specified.
print(model.pvalues)
########################################################################################################################
### Model BW ~ C(sex) + C(sex):gWAT + C(ko_parent) + gWAT
### Deprecated model by the next one. This one is too simple, and forces the slopes of the lines to be parallel
########################################################################################################################
# don't use lines with NaNs (these should be removed by fit(), but just in case)
idx_not_nan = np.where(~np.isnan(metainfo['SC']) * ~np.isnan(metainfo['gWAT']) * ~np.isnan(metainfo['BW']))[0]
# data that we are going to use
idx_subset = idx_not_nan
# fit linear model to data
model = sm.formula.ols('BW ~ C(sex) + C(sex):gWAT + C(ko_parent) + gWAT', data=metainfo, subset=idx_subset).fit()
print(model.summary())
# OLS Regression Results
# ==============================================================================
# Dep. Variable: BW R-squared: 0.740
# Model: OLS Adj. R-squared: 0.726
# Method: Least Squares F-statistic: 50.60
# Date: Tue, 25 Feb 2020 Prob (F-statistic): 4.45e-20
# Time: 15:24:38 Log-Likelihood: -210.82
# No. Observations: 76 AIC: 431.6
# Df Residuals: 71 BIC: 443.3
# Df Model: 4
# Covariance Type: nonrobust
# =======================================================================================
# coef std err t P>|t| [0.025 0.975]
# ---------------------------------------------------------------------------------------
# Intercept 19.4126 1.598 12.144 0.000 16.225 22.600
# C(sex)[T.m] 17.6887 2.981 5.934 0.000 11.745 23.633
# C(ko_parent)[T.MAT] 2.7743 0.927 2.991 0.004 0.925 4.624
# gWAT 8.0439 1.721 4.674 0.000 4.612 11.476
# C(sex)[T.m]:gWAT -7.2480 2.845 -2.548 0.013 -12.921 -1.575
# ==============================================================================
# Omnibus: 4.048 Durbin-Watson: 1.411
# Prob(Omnibus): 0.132 Jarque-Bera (JB): 3.306
# Skew: 0.378 Prob(JB): 0.191
# Kurtosis: 3.687 Cond. No. 16.2
# ==============================================================================
# Warnings:
# [1] Standard Errors assume that the covariance matrix of the errors is correctly specified.
# helper function to plot the different lines created by the model
def model_line(model, sex, ko_parent, gWAT):
sex = np.float(sex == 'm')
ko_parent = np.float(ko_parent == 'MAT')
return model.params['Intercept'] +\
model.params['C(sex)[T.m]'] * sex + \
model.params['C(ko_parent)[T.MAT]'] * ko_parent + \
model.params['gWAT'] * gWAT + \
model.params['C(sex)[T.m]:gWAT'] * sex * gWAT
# plot BW as a function of gWAT
if DEBUG:
annotate = False
plt.clf()
# f PAT
idx = (metainfo['sex'] == 'f') * (metainfo['ko_parent'] == 'PAT') * (metainfo['genotype'] == 'KLF14-KO:WT')
gWAT = np.linspace(np.min(metainfo['gWAT'][idx]), np.max(metainfo['gWAT'][idx]))
plt.scatter(metainfo['gWAT'][idx], metainfo['BW'][idx], label='f PAT WT', color='C0', facecolor='none')
if annotate:
for i in np.where(idx)[0]:
plt.annotate(i, (metainfo['gWAT'][i], metainfo['BW'][i]))
idx = (metainfo['sex'] == 'f') * (metainfo['ko_parent'] == 'PAT') * (metainfo['genotype'] == 'KLF14-KO:Het')
gWAT = np.linspace(np.min(metainfo['gWAT'][idx]), np.max(metainfo['gWAT'][idx]))
plt.scatter(metainfo['gWAT'][idx], metainfo['BW'][idx], label='f PAT Het', color='C0')
if annotate:
for i in np.where(idx)[0]:
plt.annotate(i, (metainfo['gWAT'][i], metainfo['BW'][i]))
idx = (metainfo['sex'] == 'f') * (metainfo['ko_parent'] == 'PAT')
gWAT = np.linspace(np.min(metainfo['gWAT'][idx]), np.max(metainfo['gWAT'][idx]))
BW = model_line(model, sex='f', ko_parent='PAT', gWAT=gWAT)
plt.plot(gWAT, BW, color='C0', linewidth=3)
# f MAT
idx = (metainfo['sex'] == 'f') * (metainfo['ko_parent'] == 'MAT') * (metainfo['genotype'] == 'KLF14-KO:WT')
gWAT = np.linspace(np.min(metainfo['gWAT'][idx]), np.max(metainfo['gWAT'][idx]))
plt.scatter(metainfo['gWAT'][idx], metainfo['BW'][idx], label='f MAT WT', color='C2', facecolor='none')
if annotate:
for i in np.where(idx)[0]:
plt.annotate(i, (metainfo['gWAT'][i], metainfo['BW'][i]))
idx = (metainfo['sex'] == 'f') * (metainfo['ko_parent'] == 'MAT') * (metainfo['genotype'] == 'KLF14-KO:Het')
gWAT = np.linspace(np.min(metainfo['gWAT'][idx]), np.max(metainfo['gWAT'][idx]))
plt.scatter(metainfo['gWAT'][idx], metainfo['BW'][idx], label='f MAT Het', color='C2')
if annotate:
for i in np.where(idx)[0]:
plt.annotate(i, (metainfo['gWAT'][i], metainfo['BW'][i]))
idx = (metainfo['sex'] == 'f') * (metainfo['ko_parent'] == 'MAT')
gWAT = np.linspace(np.min(metainfo['gWAT'][idx]), np.max(metainfo['gWAT'][idx]))
BW = model_line(model, sex='f', ko_parent='MAT', gWAT=gWAT)
plt.plot(gWAT, BW, color='C2', linewidth=3)
# m PAT
idx = (metainfo['sex'] == 'm') * (metainfo['ko_parent'] == 'PAT') * (metainfo['genotype'] == 'KLF14-KO:WT')
gWAT = np.linspace(np.min(metainfo['gWAT'][idx]), np.max(metainfo['gWAT'][idx]))
plt.scatter(metainfo['gWAT'][idx], metainfo['BW'][idx], label='m PAT WT', color='k', facecolor='none')
if annotate:
for i in np.where(idx)[0]:
plt.annotate(i, (metainfo['gWAT'][i], metainfo['BW'][i]))
idx = (metainfo['sex'] == 'm') * (metainfo['ko_parent'] == 'PAT') * (metainfo['genotype'] == 'KLF14-KO:Het')
gWAT = np.linspace(np.min(metainfo['gWAT'][idx]), np.max(metainfo['gWAT'][idx]))
plt.scatter(metainfo['gWAT'][idx], metainfo['BW'][idx], label='m PAT Het', color='k')
if annotate:
for i in np.where(idx)[0]:
plt.annotate(i, (metainfo['gWAT'][i], metainfo['BW'][i]))
idx = (metainfo['sex'] == 'm') * (metainfo['ko_parent'] == 'PAT')
gWAT = np.linspace(np.min(metainfo['gWAT'][idx]), np.max(metainfo['gWAT'][idx]))
BW = model_line(model, sex='m', ko_parent='PAT', gWAT=gWAT)
plt.plot(gWAT, BW, color='k', linewidth=3)
# m MAT
idx = (metainfo['sex'] == 'm') * (metainfo['ko_parent'] == 'MAT') * (metainfo['genotype'] == 'KLF14-KO:WT')
gWAT = np.linspace(np.min(metainfo['gWAT'][idx]), np.max(metainfo['gWAT'][idx]))
plt.scatter(metainfo['gWAT'][idx], metainfo['BW'][idx], label='m MAT WT', color='C3', facecolor='none')
if annotate:
for i in np.where(idx)[0]:
plt.annotate(i, (metainfo['gWAT'][i], metainfo['BW'][i]))
idx = (metainfo['sex'] == 'm') * (metainfo['ko_parent'] == 'MAT') * (metainfo['genotype'] == 'KLF14-KO:Het')
gWAT = np.linspace(np.min(metainfo['gWAT'][idx]), np.max(metainfo['gWAT'][idx]))
plt.scatter(metainfo['gWAT'][idx], metainfo['BW'][idx], label='m MAT Het', color='C3')
if annotate:
for i in np.where(idx)[0]:
plt.annotate(i, (metainfo['gWAT'][i], metainfo['BW'][i]))
idx = (metainfo['sex'] == 'm') * (metainfo['ko_parent'] == 'MAT')
gWAT = np.linspace(np.min(metainfo['gWAT'][idx]), np.max(metainfo['gWAT'][idx]))
BW = model_line(model, sex='m', ko_parent='MAT', gWAT=gWAT)
plt.plot(gWAT, BW, color='C3', linewidth=3)
plt.legend()
plt.xlabel('m$_G$ (g)', fontsize=14)
plt.ylabel('BW (g)', fontsize=14)
plt.tick_params(labelsize=14)
plt.tight_layout()
plt.savefig(os.path.join(figures_dir, 'klf14_b6ntac_exp_0099_bw_model_mG_simpler.png'), bbox_inches='tight')
plt.savefig(os.path.join(figures_dir, 'klf14_b6ntac_exp_0099_bw_model_mG_simpler.svg'), bbox_inches='tight')
########################################################################################################################
### Model BW ~ C(sex) * C(ko_parent) * gWAT
### VALID model
########################################################################################################################
# don't use lines with NaNs (these should be removed by fit(), but just in case)
idx_not_nan = np.where(~np.isnan(metainfo['SC']) * ~np.isnan(metainfo['gWAT']) * ~np.isnan(metainfo['BW']))[0]
# data that we are going to use
idx_subset = idx_not_nan
# Mixed-effects linear model
aux = metainfo.loc[idx_subset, :]
formula = 'BW ~ C(sex) * C(ko_parent) * gWAT'
vc = {'litter': '0 + C(litter)'} # litter is a random effected nested inside mother_id
model = sm.formula.mixedlm(formula, vc_formula=vc, re_formula='1', groups='mother_id', data=aux).fit()
print(model.summary())
# fit linear model to data
formula = 'BW ~ C(sex) * C(ko_parent) * gWAT'
model = sm.formula.ols(formula, data=metainfo, subset=idx_subset).fit()
print(model.summary())
# OLS Regression Results
# ==============================================================================
# Dep. Variable: BW R-squared: 0.773
# Model: OLS Adj. R-squared: 0.749
# Method: Least Squares F-statistic: 33.01
# Date: Thu, 27 Feb 2020 Prob (F-statistic): 1.65e-19
# Time: 10:14:01 Log-Likelihood: -205.77
# No. Observations: 76 AIC: 427.5
# Df Residuals: 68 BIC: 446.2
# Df Model: 7
# Covariance Type: nonrobust
# ========================================================================================================
# coef std err t P>|t| [0.025 0.975]
# --------------------------------------------------------------------------------------------------------
# Intercept 19.8526 1.975 10.053 0.000 15.912 23.793
# C(sex)[T.m] 12.2959 3.705 3.318 0.001 4.902 19.690
# C(ko_parent)[T.MAT] 1.8066 2.984 0.605 0.547 -4.148 7.762
# C(sex)[T.m]:C(ko_parent)[T.MAT] 12.8513 5.808 2.213 0.030 1.261 24.441
# gWAT 6.7074 2.246 2.986 0.004 2.225 11.189
# C(sex)[T.m]:gWAT -0.5933 3.647 -0.163 0.871 -7.870 6.684
# C(ko_parent)[T.MAT]:gWAT 2.5961 3.308 0.785 0.435 -4.005 9.197
# C(sex)[T.m]:C(ko_parent)[T.MAT]:gWAT -14.5795 5.526 -2.638 0.010 -25.607 -3.552
# ==============================================================================
# Omnibus: 4.620 Durbin-Watson: 1.636
# Prob(Omnibus): 0.099 Jarque-Bera (JB): 4.493
# Skew: 0.308 Prob(JB): 0.106
# Kurtosis: 4.019 Cond. No. 40.4
# ==============================================================================
# Warnings:
# [1] Standard Errors assume that the covariance matrix of the errors is correctly specified.
print(model.pvalues)
# helper function to plot the different lines created by the model
def model_line(model, sex, ko_parent, gWAT):
sex = np.float(sex == 'm')
ko_parent = np.float(ko_parent == 'MAT')
return model.params['Intercept'] +\
model.params['C(sex)[T.m]'] * sex + \
model.params['C(ko_parent)[T.MAT]'] * ko_parent + \
model.params['C(sex)[T.m]:C(ko_parent)[T.MAT]'] * sex * ko_parent + \
model.params['gWAT'] * gWAT + \
model.params['C(sex)[T.m]:gWAT'] * sex * gWAT + \
model.params['C(ko_parent)[T.MAT]:gWAT'] * ko_parent * gWAT + \
model.params['C(sex)[T.m]:C(ko_parent)[T.MAT]:gWAT'] * sex * ko_parent * gWAT
# plot BW as a function of gWAT
if DEBUG:
annotate = False
plt.clf()
# f PAT
idx = (metainfo['sex'] == 'f') * (metainfo['ko_parent'] == 'PAT') * (metainfo['genotype'] == 'KLF14-KO:WT')
gWAT = np.linspace(np.min(metainfo['gWAT'][idx]), np.max(metainfo['gWAT'][idx]))
plt.scatter(metainfo['gWAT'][idx], metainfo['BW'][idx], label='f PAT WT', color='C0', facecolor='none')
if annotate:
for i in np.where(idx)[0]:
plt.annotate(i, (metainfo['gWAT'][i], metainfo['BW'][i]))
idx = (metainfo['sex'] == 'f') * (metainfo['ko_parent'] == 'PAT') * (metainfo['genotype'] == 'KLF14-KO:Het')
gWAT = np.linspace(np.min(metainfo['gWAT'][idx]), np.max(metainfo['gWAT'][idx]))
plt.scatter(metainfo['gWAT'][idx], metainfo['BW'][idx], label='f PAT Het', color='C0')
if annotate:
for i in np.where(idx)[0]:
plt.annotate(i, (metainfo['gWAT'][i], metainfo['BW'][i]))
idx = (metainfo['sex'] == 'f') * (metainfo['ko_parent'] == 'PAT')
gWAT = np.linspace(np.min(metainfo['gWAT'][idx]), np.max(metainfo['gWAT'][idx]))
BW = model_line(model, sex='f', ko_parent='PAT', gWAT=gWAT)
plt.plot(gWAT, BW, color='C0', linewidth=3)
# f MAT
idx = (metainfo['sex'] == 'f') * (metainfo['ko_parent'] == 'MAT') * (metainfo['genotype'] == 'KLF14-KO:WT')
gWAT = np.linspace(np.min(metainfo['gWAT'][idx]), np.max(metainfo['gWAT'][idx]))
plt.scatter(metainfo['gWAT'][idx], metainfo['BW'][idx], label='f MAT WT', color='C2', facecolor='none')
if annotate:
for i in np.where(idx)[0]:
plt.annotate(i, (metainfo['gWAT'][i], metainfo['BW'][i]))
idx = (metainfo['sex'] == 'f') * (metainfo['ko_parent'] == 'MAT') * (metainfo['genotype'] == 'KLF14-KO:Het')
gWAT = np.linspace(np.min(metainfo['gWAT'][idx]), np.max(metainfo['gWAT'][idx]))
plt.scatter(metainfo['gWAT'][idx], metainfo['BW'][idx], label='f MAT Het', color='C2')
if annotate:
for i in np.where(idx)[0]:
plt.annotate(i, (metainfo['gWAT'][i], metainfo['BW'][i]))
idx = (metainfo['sex'] == 'f') * (metainfo['ko_parent'] == 'MAT')
gWAT = np.linspace(np.min(metainfo['gWAT'][idx]), np.max(metainfo['gWAT'][idx]))
BW = model_line(model, sex='f', ko_parent='MAT', gWAT=gWAT)
plt.plot(gWAT, BW, color='C2', linewidth=3)
# m PAT
idx = (metainfo['sex'] == 'm') * (metainfo['ko_parent'] == 'PAT') * (metainfo['genotype'] == 'KLF14-KO:WT')
gWAT = np.linspace(np.min(metainfo['gWAT'][idx]), np.max(metainfo['gWAT'][idx]))
plt.scatter(metainfo['gWAT'][idx], metainfo['BW'][idx], label='m PAT WT', color='k', facecolor='none')
if annotate:
for i in np.where(idx)[0]:
plt.annotate(i, (metainfo['gWAT'][i], metainfo['BW'][i]))
idx = (metainfo['sex'] == 'm') * (metainfo['ko_parent'] == 'PAT') * (metainfo['genotype'] == 'KLF14-KO:Het')
gWAT = np.linspace(np.min(metainfo['gWAT'][idx]), np.max(metainfo['gWAT'][idx]))
plt.scatter(metainfo['gWAT'][idx], metainfo['BW'][idx], label='m PAT Het', color='k')
if annotate:
for i in np.where(idx)[0]:
plt.annotate(i, (metainfo['gWAT'][i], metainfo['BW'][i]))
idx = (metainfo['sex'] == 'm') * (metainfo['ko_parent'] == 'PAT')
gWAT = np.linspace(np.min(metainfo['gWAT'][idx]), np.max(metainfo['gWAT'][idx]))
BW = model_line(model, sex='m', ko_parent='PAT', gWAT=gWAT)
plt.plot(gWAT, BW, color='k', linewidth=3)
# m MAT
idx = (metainfo['sex'] == 'm') * (metainfo['ko_parent'] == 'MAT') * (metainfo['genotype'] == 'KLF14-KO:WT')
gWAT = np.linspace(np.min(metainfo['gWAT'][idx]), np.max(metainfo['gWAT'][idx]))
plt.scatter(metainfo['gWAT'][idx], metainfo['BW'][idx], label='m MAT WT', color='C3', facecolor='none')
if annotate:
for i in np.where(idx)[0]:
plt.annotate(i, (metainfo['gWAT'][i], metainfo['BW'][i]))
idx = (metainfo['sex'] == 'm') * (metainfo['ko_parent'] == 'MAT') * (metainfo['genotype'] == 'KLF14-KO:Het')
gWAT = np.linspace(np.min(metainfo['gWAT'][idx]), np.max(metainfo['gWAT'][idx]))
plt.scatter(metainfo['gWAT'][idx], metainfo['BW'][idx], label='m MAT Het', color='C3')
if annotate:
for i in np.where(idx)[0]:
plt.annotate(i, (metainfo['gWAT'][i], metainfo['BW'][i]))
idx = (metainfo['sex'] == 'm') * (metainfo['ko_parent'] == 'MAT')
gWAT = np.linspace(np.min(metainfo['gWAT'][idx]), np.max(metainfo['gWAT'][idx]))
BW = model_line(model, sex='m', ko_parent='MAT', gWAT=gWAT)
plt.plot(gWAT, BW, color='C3', linewidth=3)
plt.legend()
plt.xlabel('m$_G$ (g)', fontsize=14)
plt.ylabel('BW (g)', fontsize=14)
plt.tick_params(labelsize=14)
plt.tight_layout()
plt.savefig(os.path.join(figures_dir, 'klf14_b6ntac_exp_0099_bw_model_mG.png'), bbox_inches='tight')
plt.savefig(os.path.join(figures_dir, 'klf14_b6ntac_exp_0099_bw_model_mG.svg'), bbox_inches='tight')
########################################################################################################################
### Model BW ~ C(sex) * C(ko_parent) * gWAT
### Add nested random effects: mother_id -> litter -> mouse
# https://www.statsmodels.org/dev/generated/statsmodels.formula.api.mixedlm.html?highlight=mixedlm#statsmodels.formula.api.mixedlm
### EXPERIMENTAL model
########################################################################################################################
# don't use lines with NaNs (these should be removed by fit(), but just in case)
idx_not_nan = np.where(~np.isnan(metainfo['SC']) * ~np.isnan(metainfo['gWAT']) * ~np.isnan(metainfo['BW']))[0]
# data that we are going to use
idx_subset = idx_not_nan
# Mixed-effects linear model
aux = metainfo.loc[idx_subset, :]
formula = 'BW ~ C(sex) * C(ko_parent) * gWAT'
vc = {'litter': '0 + C(litter)'} # litter is a random effected nested inside mother_id
model = sm.formula.mixedlm(formula, vc_formula=vc, re_formula='1', groups='mother_id', data=aux).fit()
print(model.summary())
# /home/rcasero/.conda/envs/cytometer_tensorflow/lib/python3.6/site-packages/statsmodels/base/model.py:1286: RuntimeWarning: invalid value encountered in sqrt
# bse_ = np.sqrt(np.diag(self.cov_params()))
# Mixed Linear Model Regression Results
# =================================================================================
# Model: MixedLM Dependent Variable: BW
# No. Observations: 76 Method: REML
# No. Groups: 8 Scale: 9.5401
# Min. group size: 1 Likelihood: -189.5050
# Max. group size: 20 Converged: Yes
# Mean group size: 9.5
# ---------------------------------------------------------------------------------
# Coef. Std.Err. z P>|z| [0.025 0.975]
# ---------------------------------------------------------------------------------
# Intercept 22.692 2.227 10.191 0.000 18.328 27.056
# C(sex)[T.m] 9.604 3.253 2.953 0.003 3.229 15.980
# C(ko_parent)[T.MAT] -2.804 3.152 -0.889 0.374 -8.982 3.375
# C(sex)[T.m]:C(ko_parent)[T.MAT] 7.670 6.151 1.247 0.212 -4.386 19.727
# gWAT 4.437 2.195 2.022 0.043 0.136 8.739
# C(sex)[T.m]:gWAT 1.656 3.157 0.524 0.600 -4.533 7.844
# C(ko_parent)[T.MAT]:gWAT 6.823 3.262 2.092 0.036 0.430 13.216
# C(sex)[T.m]:C(ko_parent)[T.MAT]:gWAT -10.814 5.674 -1.906 0.057 -21.935 0.308
# mother_id Var 0.000
# litter Var 7.659
# =================================================================================
########################################################################################################################
### BW ~ C(sex) * C(ko_parent) * SC
### VALID model, but bad fit because of 5 outliers (we remove them in the next model)
########################################################################################################################
# don't use lines with NaNs (these should be removed by fit(), but just in case)
idx_not_nan = np.where(~np.isnan(metainfo['SC']) * ~np.isnan(metainfo['gWAT']) * ~np.isnan(metainfo['BW']))[0]
# SC outliers
idx_outliers = []
# data that we are going to use
idx_subset = list(set(idx_not_nan) - set(idx_outliers))
# fit linear model to data
model = sm.formula.ols('BW ~ C(sex) * C(ko_parent) * SC', data=metainfo, subset=idx_subset).fit()
print(model.summary())
# OLS Regression Results
# ==============================================================================
# Dep. Variable: BW R-squared: 0.706
# Model: OLS Adj. R-squared: 0.676
# Method: Least Squares F-statistic: 23.32
# Date: Thu, 27 Feb 2020 Prob (F-statistic): 8.42e-16
# Time: 14:56:57 Log-Likelihood: -215.55
# No. Observations: 76 AIC: 447.1
# Df Residuals: 68 BIC: 465.7
# Df Model: 7
# Covariance Type: nonrobust
# ======================================================================================================
# coef std err t P>|t| [0.025 0.975]
# ------------------------------------------------------------------------------------------------------
# Intercept 23.4759 1.721 13.637 0.000 20.041 26.911
# C(sex)[T.m] 10.4744 3.158 3.317 0.001 4.173 16.776
# C(ko_parent)[T.MAT] 4.6399 2.384 1.946 0.056 -0.117 9.397
# C(sex)[T.m]:C(ko_parent)[T.MAT] 4.6208 4.142 1.116 0.269 -3.645 12.887
# SC 2.9539 2.518 1.173 0.245 -2.071 7.979
# C(sex)[T.m]:SC 3.0082 4.049 0.743 0.460 -5.072 11.088
# C(ko_parent)[T.MAT]:SC 1.0103 4.400 0.230 0.819 -7.770 9.790
# C(sex)[T.m]:C(ko_parent)[T.MAT]:SC -12.2497 6.355 -1.928 0.058 -24.931 0.432
# ==============================================================================
# Omnibus: 3.016 Durbin-Watson: 1.576
# Prob(Omnibus): 0.221 Jarque-Bera (JB): 2.498
# Skew: 0.440 Prob(JB): 0.287
# Kurtosis: 3.118 Cond. No. 27.7
# ==============================================================================
# Warnings:
# [1] Standard Errors assume that the covariance matrix of the errors is correctly specified.
print(model.pvalues)
def model_line(model, sex, ko_parent, SC):
sex = np.float(sex == 'm')
ko_parent = np.float(ko_parent == 'MAT')
return model.params['Intercept'] + \
model.params['C(sex)[T.m]'] * sex + \
model.params['C(ko_parent)[T.MAT]'] * ko_parent + \
model.params['C(sex)[T.m]:C(ko_parent)[T.MAT]'] * sex * ko_parent + \
model.params['SC'] * SC + \
model.params['C(sex)[T.m]:SC'] * sex * SC + \
model.params['C(ko_parent)[T.MAT]:SC'] * ko_parent * SC + \
model.params['C(sex)[T.m]:C(ko_parent)[T.MAT]:SC'] * sex * ko_parent * SC
# plot BW as a function of SC
if DEBUG:
annotate = False
plt.clf()
# f PAT
idx = (metainfo['sex'] == 'f') * (metainfo['ko_parent'] == 'PAT') * (metainfo['genotype'] == 'KLF14-KO:WT')
SC = np.linspace(np.min(metainfo['SC'][idx]), np.max(metainfo['SC'][idx]))
plt.scatter(metainfo['SC'][idx], metainfo['BW'][idx], label='f PAT WT', color='C0', facecolor='none')
if annotate:
for i in np.where(idx)[0]:
plt.annotate(i, (metainfo['SC'][i], metainfo['BW'][i]))
idx = (metainfo['sex'] == 'f') * (metainfo['ko_parent'] == 'PAT') * (metainfo['genotype'] == 'KLF14-KO:Het')
SC = np.linspace(np.min(metainfo['SC'][idx]), np.max(metainfo['SC'][idx]))
plt.scatter(metainfo['SC'][idx], metainfo['BW'][idx], label='f PAT Het', color='C0')
if annotate:
for i in np.where(idx)[0]:
plt.annotate(i, (metainfo['SC'][i], metainfo['BW'][i]))
idx = (metainfo['sex'] == 'f') * (metainfo['ko_parent'] == 'PAT')
SC = np.linspace(np.min(metainfo['SC'][idx]), np.max(metainfo['SC'][idx]))
BW = model_line(model, sex='f', ko_parent='PAT', SC=SC)
plt.plot(SC, BW, color='C0', linewidth=3)
# f MAT
idx = (metainfo['sex'] == 'f') * (metainfo['ko_parent'] == 'MAT') * (metainfo['genotype'] == 'KLF14-KO:WT')
SC = np.linspace(np.min(metainfo['SC'][idx]), np.max(metainfo['SC'][idx]))
plt.scatter(metainfo['SC'][idx], metainfo['BW'][idx], label='f MAT WT', color='C2', facecolor='none')
if annotate:
for i in np.where(idx)[0]:
plt.annotate(i, (metainfo['SC'][i], metainfo['BW'][i]))
idx = (metainfo['sex'] == 'f') * (metainfo['ko_parent'] == 'MAT') * (metainfo['genotype'] == 'KLF14-KO:Het')
SC = np.linspace(np.min(metainfo['SC'][idx]), np.max(metainfo['SC'][idx]))
plt.scatter(metainfo['SC'][idx], metainfo['BW'][idx], label='f MAT Het', color='C2')
if annotate:
for i in np.where(idx)[0]:
plt.annotate(i, (metainfo['SC'][i], metainfo['BW'][i]))
idx = (metainfo['sex'] == 'f') * (metainfo['ko_parent'] == 'MAT')
SC = np.linspace(np.min(metainfo['SC'][idx]), np.max(metainfo['SC'][idx]))
BW = model_line(model, sex='f', ko_parent='MAT', SC=SC)
plt.plot(SC, BW, color='C2', linewidth=3)
# m PAT
idx = (metainfo['sex'] == 'm') * (metainfo['ko_parent'] == 'PAT') * (metainfo['genotype'] == 'KLF14-KO:WT')
SC = np.linspace(np.min(metainfo['SC'][idx]), np.max(metainfo['SC'][idx]))
plt.scatter(metainfo['SC'][idx], metainfo['BW'][idx], label='m PAT WT', color='k', facecolor='none')
if annotate:
for i in np.where(idx)[0]:
plt.annotate(i, (metainfo['SC'][i], metainfo['BW'][i]))
idx = (metainfo['sex'] == 'm') * (metainfo['ko_parent'] == 'PAT') * (metainfo['genotype'] == 'KLF14-KO:Het')
SC = np.linspace(np.min(metainfo['SC'][idx]), np.max(metainfo['SC'][idx]))
plt.scatter(metainfo['SC'][idx], metainfo['BW'][idx], label='m PAT Het', color='k')
if annotate:
for i in np.where(idx)[0]:
plt.annotate(i, (metainfo['SC'][i], metainfo['BW'][i]))
idx = (metainfo['sex'] == 'm') * (metainfo['ko_parent'] == 'PAT')
SC = np.linspace(np.min(metainfo['SC'][idx]), np.max(metainfo['SC'][idx]))
BW = model_line(model, sex='m', ko_parent='PAT', SC=SC)
plt.plot(SC, BW, color='k', linewidth=3)
# m MAT
idx = (metainfo['sex'] == 'm') * (metainfo['ko_parent'] == 'MAT') * (metainfo['genotype'] == 'KLF14-KO:WT')
SC = np.linspace(np.min(metainfo['SC'][idx]), np.max(metainfo['SC'][idx]))
plt.scatter(metainfo['SC'][idx], metainfo['BW'][idx], label='m MAT WT', color='C3', facecolor='none')
if annotate:
for i in np.where(idx)[0]:
plt.annotate(i, (metainfo['SC'][i], metainfo['BW'][i]))
idx = (metainfo['sex'] == 'm') * (metainfo['ko_parent'] == 'MAT') * (metainfo['genotype'] == 'KLF14-KO:Het')
SC = np.linspace(np.min(metainfo['SC'][idx]), np.max(metainfo['SC'][idx]))
plt.scatter(metainfo['SC'][idx], metainfo['BW'][idx], label='m MAT Het', color='C3')
if annotate:
for i in np.where(idx)[0]:
plt.annotate(i, (metainfo['SC'][i], metainfo['BW'][i]))
idx = (metainfo['sex'] == 'm') * (metainfo['ko_parent'] == 'MAT')
SC = np.linspace(np.min(metainfo['SC'][idx]), np.max(metainfo['SC'][idx]))
BW = model_line(model, sex='m', ko_parent='MAT', SC=SC)
plt.plot(SC, BW, color='C3', linewidth=3)
plt.legend()
plt.xlabel('m$_{SC}$ (g)', fontsize=14)
plt.ylabel('BW (g)', fontsize=14)
plt.tick_params(labelsize=14)
plt.tight_layout()
plt.savefig(os.path.join(figures_dir, 'klf14_b6ntac_exp_0099_bw_model_mSC.png'), bbox_inches='tight')
plt.savefig(os.path.join(figures_dir, 'klf14_b6ntac_exp_0099_bw_model_mSC.svg'), bbox_inches='tight')
########################################################################################################################
### BW ~ C(sex) * C(ko_parent) * SC
### VALID model, 5 outliers removed
########################################################################################################################
# don't use lines with NaNs (these should be removed by fit(), but just in case)
idx_not_nan = np.where(~np.isnan(metainfo['SC']) * ~np.isnan(metainfo['gWAT']) * ~np.isnan(metainfo['BW']))[0]
# SC outliers
idx_outliers = [35, 36, 37, 64, 65]
# data that we are going to use
idx_subset = list(set(idx_not_nan) - set(idx_outliers))
# fit linear model to data
model = sm.formula.ols('BW ~ C(sex) * C(ko_parent) * SC', data=metainfo, subset=idx_subset).fit()
print(model.summary())
# OLS Regression Results
# ==============================================================================
# Dep. Variable: BW R-squared: 0.754
# Model: OLS Adj. R-squared: 0.727
# Method: Least Squares F-statistic: 27.62
# Date: Thu, 27 Feb 2020 Prob (F-statistic): 6.14e-17
# Time: 15:14:50 Log-Likelihood: -193.59
# No. Observations: 71 AIC: 403.2
# Df Residuals: 63 BIC: 421.3
# Df Model: 7
# Covariance Type: nonrobust
# ======================================================================================================
# coef std err t P>|t| [0.025 0.975]
# ------------------------------------------------------------------------------------------------------
# Intercept 21.6675 1.715 12.634 0.000 18.240 25.095
# C(sex)[T.m] 12.2828 2.936 4.184 0.000 6.416 18.150
# C(ko_parent)[T.MAT] 0.2006 3.291 0.061 0.952 -6.375 6.777
# C(sex)[T.m]:C(ko_parent)[T.MAT] 9.0601 4.486 2.020 0.048 0.095 18.025
# SC 8.4782 2.989 2.837 0.006 2.506 14.450
# C(sex)[T.m]:SC -2.5161 4.132 -0.609 0.545 -10.774 5.742
# C(ko_parent)[T.MAT]:SC 20.4707 10.549 1.941 0.057 -0.609 41.550
# C(sex)[T.m]:C(ko_parent)[T.MAT]:SC -31.7102 11.327 -2.799 0.007 -54.346 -9.074
# ==============================================================================
# Omnibus: 5.918 Durbin-Watson: 1.687
# Prob(Omnibus): 0.052 Jarque-Bera (JB): 5.075
# Skew: 0.575 Prob(JB): 0.0790
# Kurtosis: 3.626 Cond. No. 53.2
# ==============================================================================
# Warnings:
# [1] Standard Errors assume that the covariance matrix of the errors is correctly specified.
print(model.pvalues)
# partial regression and influence plots
if DEBUG:
sm.graphics.plot_partregress_grid(model)
sm.graphics.influence_plot(model, criterion="cooks")
def model_line(model, sex, ko_parent, SC):
sex = np.float(sex == 'm')
ko_parent = np.float(ko_parent == 'MAT')
return model.params['Intercept'] + \
model.params['C(sex)[T.m]'] * sex + \
model.params['C(ko_parent)[T.MAT]'] * ko_parent + \
model.params['C(sex)[T.m]:C(ko_parent)[T.MAT]'] * sex * ko_parent + \
model.params['SC'] * SC + \
model.params['C(sex)[T.m]:SC'] * sex * SC + \
model.params['C(ko_parent)[T.MAT]:SC'] * ko_parent * SC + \
model.params['C(sex)[T.m]:C(ko_parent)[T.MAT]:SC'] * sex * ko_parent * SC
# plot BW as a function of SC
if DEBUG:
annotate = False
plt.clf()
# f PAT
idx = (metainfo['sex'] == 'f') * (metainfo['ko_parent'] == 'PAT') * (metainfo['genotype'] == 'KLF14-KO:WT')
SC = np.linspace(np.min(metainfo['SC'][idx]), np.max(metainfo['SC'][idx]))
plt.scatter(metainfo['SC'][idx], metainfo['BW'][idx], label='f PAT WT', color='C0', facecolor='none')
if annotate:
for i in np.where(idx)[0]:
plt.annotate(i, (metainfo['SC'][i], metainfo['BW'][i]))
idx = (metainfo['sex'] == 'f') * (metainfo['ko_parent'] == 'PAT') * (metainfo['genotype'] == 'KLF14-KO:Het')
SC = np.linspace(np.min(metainfo['SC'][idx]), np.max(metainfo['SC'][idx]))
plt.scatter(metainfo['SC'][idx], metainfo['BW'][idx], label='f PAT Het', color='C0')
if annotate:
for i in np.where(idx)[0]:
plt.annotate(i, (metainfo['SC'][i], metainfo['BW'][i]))
idx = (metainfo['sex'] == 'f') * (metainfo['ko_parent'] == 'PAT')
SC = np.linspace(np.min(metainfo['SC'][idx]), np.max(metainfo['SC'][idx]))
BW = model_line(model, sex='f', ko_parent='PAT', SC=SC)
plt.plot(SC, BW, color='C0', linewidth=3)
# f MAT
idx = (metainfo['sex'] == 'f') * (metainfo['ko_parent'] == 'MAT') * (metainfo['genotype'] == 'KLF14-KO:WT')
SC = np.linspace(np.min(metainfo['SC'][idx]), np.max(metainfo['SC'][idx]))
plt.scatter(metainfo['SC'][idx], metainfo['BW'][idx], label='f MAT WT', color='C2', facecolor='none')
if annotate:
for i in np.where(idx)[0]:
plt.annotate(i, (metainfo['SC'][i], metainfo['BW'][i]))
idx = (metainfo['sex'] == 'f') * (metainfo['ko_parent'] == 'MAT') * (metainfo['genotype'] == 'KLF14-KO:Het')
SC = np.linspace(np.min(metainfo['SC'][idx]), np.max(metainfo['SC'][idx]))
plt.scatter(metainfo['SC'][idx], metainfo['BW'][idx], label='f MAT Het', color='C2')
if annotate:
for i in np.where(idx)[0]:
plt.annotate(i, (metainfo['SC'][i], metainfo['BW'][i]))
idx = (metainfo['sex'] == 'f') * (metainfo['ko_parent'] == 'MAT')
SC = np.linspace(np.min(metainfo['SC'][idx]), np.max(metainfo['SC'][idx]))
BW = model_line(model, sex='f', ko_parent='MAT', SC=SC)
plt.plot(SC, BW, color='C2', linewidth=3)
# m PAT
idx = (metainfo['sex'] == 'm') * (metainfo['ko_parent'] == 'PAT') * (metainfo['genotype'] == 'KLF14-KO:WT')
SC = np.linspace(np.min(metainfo['SC'][idx]), np.max(metainfo['SC'][idx]))
plt.scatter(metainfo['SC'][idx], metainfo['BW'][idx], label='m PAT WT', color='k', facecolor='none')
if annotate:
for i in np.where(idx)[0]:
plt.annotate(i, (metainfo['SC'][i], metainfo['BW'][i]))
idx = (metainfo['sex'] == 'm') * (metainfo['ko_parent'] == 'PAT') * (metainfo['genotype'] == 'KLF14-KO:Het')
SC = np.linspace(np.min(metainfo['SC'][idx]), np.max(metainfo['SC'][idx]))
plt.scatter(metainfo['SC'][idx], metainfo['BW'][idx], label='m PAT Het', color='k')
if annotate:
for i in np.where(idx)[0]:
plt.annotate(i, (metainfo['SC'][i], metainfo['BW'][i]))
idx = (metainfo['sex'] == 'm') * (metainfo['ko_parent'] == 'PAT')
SC = np.linspace(np.min(metainfo['SC'][idx]), np.max(metainfo['SC'][idx]))
BW = model_line(model, sex='m', ko_parent='PAT', SC=SC)
plt.plot(SC, BW, color='k', linewidth=3)
# m MAT
idx = (metainfo['sex'] == 'm') * (metainfo['ko_parent'] == 'MAT') * (metainfo['genotype'] == 'KLF14-KO:WT')
SC = np.linspace(np.min(metainfo['SC'][idx]), np.max(metainfo['SC'][idx]))
plt.scatter(metainfo['SC'][idx], metainfo['BW'][idx], label='m MAT WT', color='C3', facecolor='none')
if annotate:
for i in np.where(idx)[0]:
plt.annotate(i, (metainfo['SC'][i], metainfo['BW'][i]))
idx = (metainfo['sex'] == 'm') * (metainfo['ko_parent'] == 'MAT') * (metainfo['genotype'] == 'KLF14-KO:Het')
SC = np.linspace(np.min(metainfo['SC'][idx]), np.max(metainfo['SC'][idx]))
plt.scatter(metainfo['SC'][idx], metainfo['BW'][idx], label='m MAT Het', color='C3')
if annotate:
for i in np.where(idx)[0]:
plt.annotate(i, (metainfo['SC'][i], metainfo['BW'][i]))
idx = (metainfo['sex'] == 'm') * (metainfo['ko_parent'] == 'MAT')
SC = np.linspace(np.min(metainfo['SC'][idx]), np.max(metainfo['SC'][idx]))
BW = model_line(model, sex='m', ko_parent='MAT', SC=SC)
plt.plot(SC, BW, color='C3', linewidth=3)
plt.legend()
plt.xlabel('m$_{SC}$ (g)', fontsize=14)
plt.ylabel('BW (g)', fontsize=14)
plt.tick_params(labelsize=14)
plt.tight_layout()
plt.savefig(os.path.join(figures_dir, 'klf14_b6ntac_exp_0099_bw_model_mSC_no_outliers.png'), bbox_inches='tight')
plt.savefig(os.path.join(figures_dir, 'klf14_b6ntac_exp_0099_bw_model_mSC_no_outliers.svg'), bbox_inches='tight')
########################################################################################################################
### BW ~ C(sex) * C(ko_parent) * SC
### VALID model, 2 outliers removed
########################################################################################################################
# don't use lines with NaNs (these should be removed by fit(), but just in case)
idx_not_nan = np.where(~np.isnan(metainfo['SC']) * ~np.isnan(metainfo['gWAT']) * ~np.isnan(metainfo['BW']))[0]
# SC outliers
idx_outliers = [35, 36, 65]
# data that we are going to use
idx_subset = list(set(idx_not_nan) - set(idx_outliers))
# fit linear model to data
model = sm.formula.ols('BW ~ C(sex) * C(ko_parent) * SC', data=metainfo, subset=idx_subset).fit()
print(model.summary())
# OLS Regression Results
# ==============================================================================
# Dep. Variable: BW R-squared: 0.743
# Model: OLS Adj. R-squared: 0.715
# Method: Least Squares F-statistic: 26.85
# Date: Wed, 04 Mar 2020 Prob (F-statistic): 6.67e-17
# Time: 02:52:03 Log-Likelihood: -201.43
# No. Observations: 73 AIC: 418.9
# Df Residuals: 65 BIC: 437.2
# Df Model: 7
# Covariance Type: nonrobust
# ======================================================================================================
# coef std err t P>|t| [0.025 0.975]
# ------------------------------------------------------------------------------------------------------
# Intercept 21.7064 1.769 12.273 0.000 18.174 25.239
# C(sex)[T.m] 12.2438 3.028 4.044 0.000 6.197 18.291
# C(ko_parent)[T.MAT] 3.6077 2.575 1.401 0.166 -1.535 8.751
# C(sex)[T.m]:C(ko_parent)[T.MAT] 5.6530 4.064 1.391 0.169 -2.464 13.770
# SC 7.4582 3.031 2.460 0.017 1.404 13.512
# C(sex)[T.m]:SC -1.4961 4.225 -0.354 0.724 -9.934 6.942
# C(ko_parent)[T.MAT]:SC 7.5800 6.199 1.223 0.226 -4.801 19.961
# C(sex)[T.m]:C(ko_parent)[T.MAT]:SC -18.8194 7.520 -2.503 0.015 -33.838 -3.801
# ==============================================================================
# Omnibus: 5.698 Durbin-Watson: 1.666
# Prob(Omnibus): 0.058 Jarque-Bera (JB): 4.862
# Skew: 0.571 Prob(JB): 0.0879
# Kurtosis: 3.544 Cond. No. 34.6
# ==============================================================================
# Warnings:
# [1] Standard Errors assume that the covariance matrix of the errors is correctly specified.
print(model.pvalues)
# partial regression and influence plots
if DEBUG:
sm.graphics.plot_partregress_grid(model)
sm.graphics.influence_plot(model, criterion="cooks")
def model_line(model, sex, ko_parent, SC):
sex = np.float(sex == 'm')
ko_parent = np.float(ko_parent == 'MAT')
return model.params['Intercept'] + \
model.params['C(sex)[T.m]'] * sex + \
model.params['C(ko_parent)[T.MAT]'] * ko_parent + \
model.params['C(sex)[T.m]:C(ko_parent)[T.MAT]'] * sex * ko_parent + \
model.params['SC'] * SC + \
model.params['C(sex)[T.m]:SC'] * sex * SC + \
model.params['C(ko_parent)[T.MAT]:SC'] * ko_parent * SC + \
model.params['C(sex)[T.m]:C(ko_parent)[T.MAT]:SC'] * sex * ko_parent * SC
# plot BW as a function of SC
if DEBUG:
annotate = False
plt.clf()
# f PAT
idx = (metainfo['sex'] == 'f') * (metainfo['ko_parent'] == 'PAT') * (metainfo['genotype'] == 'KLF14-KO:WT')
SC = np.linspace(np.min(metainfo['SC'][idx]), np.max(metainfo['SC'][idx]))
plt.scatter(metainfo['SC'][idx], metainfo['BW'][idx], label='f PAT WT', color='C0', facecolor='none')
if annotate:
for i in np.where(idx)[0]:
plt.annotate(i, (metainfo['SC'][i], metainfo['BW'][i]))
idx = (metainfo['sex'] == 'f') * (metainfo['ko_parent'] == 'PAT') * (metainfo['genotype'] == 'KLF14-KO:Het')
SC = np.linspace(np.min(metainfo['SC'][idx]), np.max(metainfo['SC'][idx]))
plt.scatter(metainfo['SC'][idx], metainfo['BW'][idx], label='f PAT Het', color='C0')
if annotate:
for i in np.where(idx)[0]:
plt.annotate(i, (metainfo['SC'][i], metainfo['BW'][i]))
idx = (metainfo['sex'] == 'f') * (metainfo['ko_parent'] == 'PAT')
SC = np.linspace(np.min(metainfo['SC'][idx]), np.max(metainfo['SC'][idx]))
BW = model_line(model, sex='f', ko_parent='PAT', SC=SC)
plt.plot(SC, BW, color='C0', linewidth=3)
# f MAT
idx = (metainfo['sex'] == 'f') * (metainfo['ko_parent'] == 'MAT') * (metainfo['genotype'] == 'KLF14-KO:WT')
SC = np.linspace(np.min(metainfo['SC'][idx]), np.max(metainfo['SC'][idx]))
plt.scatter(metainfo['SC'][idx], metainfo['BW'][idx], label='f MAT WT', color='C2', facecolor='none')
if annotate:
for i in np.where(idx)[0]:
plt.annotate(i, (metainfo['SC'][i], metainfo['BW'][i]))
idx = (metainfo['sex'] == 'f') * (metainfo['ko_parent'] == 'MAT') * (metainfo['genotype'] == 'KLF14-KO:Het')
SC = np.linspace(np.min(metainfo['SC'][idx]), np.max(metainfo['SC'][idx]))
plt.scatter(metainfo['SC'][idx], metainfo['BW'][idx], label='f MAT Het', color='C2')
if annotate:
for i in np.where(idx)[0]:
plt.annotate(i, (metainfo['SC'][i], metainfo['BW'][i]))
idx = (metainfo['sex'] == 'f') * (metainfo['ko_parent'] == 'MAT')
SC = np.linspace(np.min(metainfo['SC'][idx]), np.max(metainfo['SC'][idx]))
BW = model_line(model, sex='f', ko_parent='MAT', SC=SC)
plt.plot(SC, BW, color='C2', linewidth=3)
# m PAT
idx = (metainfo['sex'] == 'm') * (metainfo['ko_parent'] == 'PAT') * (metainfo['genotype'] == 'KLF14-KO:WT')
SC = np.linspace(np.min(metainfo['SC'][idx]), np.max(metainfo['SC'][idx]))
plt.scatter(metainfo['SC'][idx], metainfo['BW'][idx], label='m PAT WT', color='k', facecolor='none')
if annotate:
for i in np.where(idx)[0]:
plt.annotate(i, (metainfo['SC'][i], metainfo['BW'][i]))
idx = (metainfo['sex'] == 'm') * (metainfo['ko_parent'] == 'PAT') * (metainfo['genotype'] == 'KLF14-KO:Het')
SC = np.linspace(np.min(metainfo['SC'][idx]), np.max(metainfo['SC'][idx]))
plt.scatter(metainfo['SC'][idx], metainfo['BW'][idx], label='m PAT Het', color='k')
if annotate:
for i in np.where(idx)[0]:
plt.annotate(i, (metainfo['SC'][i], metainfo['BW'][i]))
idx = (metainfo['sex'] == 'm') * (metainfo['ko_parent'] == 'PAT')
SC = np.linspace(np.min(metainfo['SC'][idx]), np.max(metainfo['SC'][idx]))
BW = model_line(model, sex='m', ko_parent='PAT', SC=SC)
plt.plot(SC, BW, color='k', linewidth=3)
# m MAT
idx = (metainfo['sex'] == 'm') * (metainfo['ko_parent'] == 'MAT') * (metainfo['genotype'] == 'KLF14-KO:WT')
SC = np.linspace(np.min(metainfo['SC'][idx]), np.max(metainfo['SC'][idx]))
plt.scatter(metainfo['SC'][idx], metainfo['BW'][idx], label='m MAT WT', color='C3', facecolor='none')
if annotate:
for i in np.where(idx)[0]:
plt.annotate(i, (metainfo['SC'][i], metainfo['BW'][i]))
idx = (metainfo['sex'] == 'm') * (metainfo['ko_parent'] == 'MAT') * (metainfo['genotype'] == 'KLF14-KO:Het')
SC = np.linspace(np.min(metainfo['SC'][idx]), np.max(metainfo['SC'][idx]))
plt.scatter(metainfo['SC'][idx], metainfo['BW'][idx], label='m MAT Het', color='C3')
if annotate:
for i in np.where(idx)[0]:
plt.annotate(i, (metainfo['SC'][i], metainfo['BW'][i]))
idx = (metainfo['sex'] == 'm') * (metainfo['ko_parent'] == 'MAT')
SC = np.linspace(np.min(metainfo['SC'][idx]), np.max(metainfo['SC'][idx]))
BW = model_line(model, sex='m', ko_parent='MAT', SC=SC)
plt.plot(SC, BW, color='C3', linewidth=3)
plt.legend()
plt.xlabel('m$_{SC}$ (g)', fontsize=14)
plt.ylabel('BW (g)', fontsize=14)
plt.tick_params(labelsize=14)
plt.tight_layout()
plt.savefig(os.path.join(figures_dir, 'klf14_b6ntac_exp_0099_bw_model_mSC_3_outliers.png'), bbox_inches='tight')
plt.savefig(os.path.join(figures_dir, 'klf14_b6ntac_exp_0099_bw_model_mSC_3_outliers.svg'), bbox_inches='tight')
########################################################################################################################
### Model BW ~ C(sex) * C(ko_parent) * gWAT
### Add nested random effects: mother_id -> litter -> mouse
# https://www.statsmodels.org/dev/generated/statsmodels.formula.api.mixedlm.html?highlight=mixedlm#statsmodels.formula.api.mixedlm
### EXPERIMENTAL model
########################################################################################################################
# don't use lines with NaNs (these should be removed by fit(), but just in case)
idx_not_nan = np.where(~np.isnan(metainfo['SC']) * ~np.isnan(metainfo['gWAT']) * ~np.isnan(metainfo['BW']))[0]
# data that we are going to use
idx_subset = idx_not_nan
# Mixed-effects linear model
aux = metainfo.loc[idx_subset, :]
formula = 'BW ~ C(sex) * C(ko_parent) * SC'
vc = {'litter': '0 + C(litter)'} # litter is a random effected nested inside mother_id
model = sm.formula.mixedlm(formula, vc_formula=vc, re_formula='1', groups='mother_id', data=aux).fit()
print(model.summary())
# /home/rcasero/.conda/envs/cytometer_tensorflow/lib/python3.6/site-packages/pandas/core/computation/expressions.py:183: UserWarning: evaluating in Python space because the '*' operator is not supported by numexpr for the bool dtype, use '&' instead
# .format(op=op_str, alt_op=unsupported[op_str]))
# Mixed Linear Model Regression Results
# ===============================================================================
# Model: MixedLM Dependent Variable: BW
# No. Observations: 76 Method: REML
# No. Groups: 8 Scale: 13.6930
# Min. group size: 1 Likelihood: -199.2320
# Max. group size: 20 Converged: Yes
# Mean group size: 9.5
# -------------------------------------------------------------------------------
# Coef. Std.Err. z P>|z| [0.025 0.975]
# -------------------------------------------------------------------------------
# Intercept 25.506 2.129 11.981 0.000 21.333 29.678
# C(sex)[T.m] 8.618 2.990 2.882 0.004 2.757 14.479
# C(ko_parent)[T.MAT] 2.191 3.433 0.638 0.523 -4.539 8.920
# C(sex)[T.m]:C(ko_parent)[T.MAT] 4.530 4.099 1.105 0.269 -3.504 12.565
# SC 1.437 2.447 0.587 0.557 -3.360 6.233
# C(sex)[T.m]:SC 4.643 3.661 1.268 0.205 -2.531 11.818
# C(ko_parent)[T.MAT]:SC 4.991 5.019 0.994 0.320 -4.847 14.829
# C(sex)[T.m]:C(ko_parent)[T.MAT]:SC -13.179 6.603 -1.996 0.046 -26.120 -0.238
# mother_id Var 2.805 3.306
# litter Var 4.728 2.638
# ===============================================================================
def model_line(model, sex, ko_parent, SC):
sex = np.float(sex == 'm')
ko_parent = np.float(ko_parent == 'MAT')
return model.params['Intercept'] + \
model.params['C(sex)[T.m]'] * sex + \
model.params['C(ko_parent)[T.MAT]'] * ko_parent + \
model.params['C(sex)[T.m]:C(ko_parent)[T.MAT]'] * sex * ko_parent + \
model.params['SC'] * SC + \
model.params['C(sex)[T.m]:SC'] * sex * SC + \
model.params['C(ko_parent)[T.MAT]:SC'] * ko_parent * SC + \
model.params['C(sex)[T.m]:C(ko_parent)[T.MAT]:SC'] * sex * ko_parent * SC
# plot BW as a function of SC
if DEBUG:
annotate = False
plt.clf()
# f PAT
idx = (metainfo['sex'] == 'f') * (metainfo['ko_parent'] == 'PAT') * (metainfo['genotype'] == 'KLF14-KO:WT')
SC = np.linspace(np.min(metainfo['SC'][idx]), np.max(metainfo['SC'][idx]))
plt.scatter(metainfo['SC'][idx], metainfo['BW'][idx], label='f PAT WT', color='C0', facecolor='none')
if annotate:
for i in np.where(idx)[0]:
plt.annotate(i, (metainfo['SC'][i], metainfo['BW'][i]))
idx = (metainfo['sex'] == 'f') * (metainfo['ko_parent'] == 'PAT') * (metainfo['genotype'] == 'KLF14-KO:Het')
SC = np.linspace(np.min(metainfo['SC'][idx]), np.max(metainfo['SC'][idx]))
plt.scatter(metainfo['SC'][idx], metainfo['BW'][idx], label='f PAT Het', color='C0')
if annotate:
for i in np.where(idx)[0]:
plt.annotate(i, (metainfo['SC'][i], metainfo['BW'][i]))
idx = (metainfo['sex'] == 'f') * (metainfo['ko_parent'] == 'PAT')
SC = np.linspace(np.min(metainfo['SC'][idx]), np.max(metainfo['SC'][idx]))
BW = model_line(model, sex='f', ko_parent='PAT', SC=SC)
plt.plot(SC, BW, color='C0', linewidth=3)
# f MAT
idx = (metainfo['sex'] == 'f') * (metainfo['ko_parent'] == 'MAT') * (metainfo['genotype'] == 'KLF14-KO:WT')
SC = np.linspace(np.min(metainfo['SC'][idx]), np.max(metainfo['SC'][idx]))
plt.scatter(metainfo['SC'][idx], metainfo['BW'][idx], label='f MAT WT', color='C2', facecolor='none')
if annotate:
for i in np.where(idx)[0]:
plt.annotate(i, (metainfo['SC'][i], metainfo['BW'][i]))
idx = (metainfo['sex'] == 'f') * (metainfo['ko_parent'] == 'MAT') * (metainfo['genotype'] == 'KLF14-KO:Het')
SC = np.linspace(np.min(metainfo['SC'][idx]), np.max(metainfo['SC'][idx]))
plt.scatter(metainfo['SC'][idx], metainfo['BW'][idx], label='f MAT Het', color='C2')
if annotate:
for i in np.where(idx)[0]:
plt.annotate(i, (metainfo['SC'][i], metainfo['BW'][i]))
idx = (metainfo['sex'] == 'f') * (metainfo['ko_parent'] == 'MAT')
SC = np.linspace(np.min(metainfo['SC'][idx]), np.max(metainfo['SC'][idx]))
BW = model_line(model, sex='f', ko_parent='MAT', SC=SC)
plt.plot(SC, BW, color='C2', linewidth=3)
# m PAT
idx = (metainfo['sex'] == 'm') * (metainfo['ko_parent'] == 'PAT') * (metainfo['genotype'] == 'KLF14-KO:WT')
SC = np.linspace(np.min(metainfo['SC'][idx]), np.max(metainfo['SC'][idx]))
plt.scatter(metainfo['SC'][idx], metainfo['BW'][idx], label='m PAT WT', color='k', facecolor='none')
if annotate:
for i in np.where(idx)[0]:
plt.annotate(i, (metainfo['SC'][i], metainfo['BW'][i]))
idx = (metainfo['sex'] == 'm') * (metainfo['ko_parent'] == 'PAT') * (metainfo['genotype'] == 'KLF14-KO:Het')
SC = np.linspace(np.min(metainfo['SC'][idx]), np.max(metainfo['SC'][idx]))
plt.scatter(metainfo['SC'][idx], metainfo['BW'][idx], label='m PAT Het', color='k')
if annotate:
for i in np.where(idx)[0]:
plt.annotate(i, (metainfo['SC'][i], metainfo['BW'][i]))
idx = (metainfo['sex'] == 'm') * (metainfo['ko_parent'] == 'PAT')
SC = np.linspace(np.min(metainfo['SC'][idx]), np.max(metainfo['SC'][idx]))
BW = model_line(model, sex='m', ko_parent='PAT', SC=SC)
plt.plot(SC, BW, color='k', linewidth=3)
# m MAT
idx = (metainfo['sex'] == 'm') * (metainfo['ko_parent'] == 'MAT') * (metainfo['genotype'] == 'KLF14-KO:WT')
SC = np.linspace(np.min(metainfo['SC'][idx]), np.max(metainfo['SC'][idx]))
plt.scatter(metainfo['SC'][idx], metainfo['BW'][idx], label='m MAT WT', color='C3', facecolor='none')
if annotate:
for i in np.where(idx)[0]:
plt.annotate(i, (metainfo['SC'][i], metainfo['BW'][i]))
idx = (metainfo['sex'] == 'm') * (metainfo['ko_parent'] == 'MAT') * (metainfo['genotype'] == 'KLF14-KO:Het')
SC = np.linspace(np.min(metainfo['SC'][idx]), np.max(metainfo['SC'][idx]))
plt.scatter(metainfo['SC'][idx], metainfo['BW'][idx], label='m MAT Het', color='C3')
if annotate:
for i in np.where(idx)[0]:
plt.annotate(i, (metainfo['SC'][i], metainfo['BW'][i]))
idx = (metainfo['sex'] == 'm') * (metainfo['ko_parent'] == 'MAT')
SC = np.linspace(np.min(metainfo['SC'][idx]), np.max(metainfo['SC'][idx]))
BW = model_line(model, sex='m', ko_parent='MAT', SC=SC)
plt.plot(SC, BW, color='C3', linewidth=3)
plt.legend()
plt.xlabel('m$_{SC}$ (g)', fontsize=14)
plt.ylabel('BW (g)', fontsize=14)
plt.tick_params(labelsize=14)
plt.tight_layout()
plt.savefig(os.path.join(figures_dir, 'klf14_b6ntac_exp_0099_bw_mixed_model_mSC.png'), bbox_inches='tight')
plt.savefig(os.path.join(figures_dir, 'klf14_b6ntac_exp_0099_bw_mixed_model_mSC.svg'), bbox_inches='tight')
########################################################################################################################
## Load SQWAT and gWAT quantile data computed in a previous section
### USED IN PAPER
########################################################################################################################
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats
import pandas as pd
import statsmodels.api as sm
from statsmodels.stats.multicomp import pairwise_tukeyhsd
from statsmodels.stats.multicomp import MultiComparison
from statsmodels.stats.multitest import multipletests
import scipy.stats
# directories
klf14_root_data_dir = os.path.join(home, 'Data/cytometer_data/klf14')
annotations_dir = os.path.join(home, 'Data/cytometer_data/aida_data_Klf14_v7/annotations')
ndpi_dir = os.path.join(home, 'scan_srv2_cox/Maz Yon')
figures_dir = os.path.join(home, 'GoogleDrive/Research/20190727_cytometer_paper/figures')
metainfo_dir = os.path.join(home, 'Data/cytometer_data/klf14')
DEBUG = False
quantiles = np.linspace(0, 1, 11)
quantiles = quantiles[1:-1]
# load metainfo file
metainfo_csv_file = os.path.join(metainfo_dir, 'klf14_b6ntac_meta_info.csv')
metainfo = pd.read_csv(metainfo_csv_file)
# make sure that in the boxplots PAT comes before MAT
metainfo['sex'] = metainfo['sex'].astype(pd.api.types.CategoricalDtype(categories=['f', 'm'], ordered=True))
metainfo['ko_parent'] = metainfo['ko_parent'].astype(pd.api.types.CategoricalDtype(categories=['PAT', 'MAT'], ordered=True))
metainfo['genotype'] = metainfo['genotype'].astype(pd.api.types.CategoricalDtype(categories=['KLF14-KO:WT', 'KLF14-KO:Het'], ordered=True))
# load SQWAT data
depot = 'sqwat'
filename_quantiles = os.path.join(figures_dir, 'klf14_b6ntac_exp_0099_area_quantiles_' + depot + '.npz')
aux = np.load(filename_quantiles)
area_mean_sqwat = aux['area_mean_all']
area_q_sqwat = aux['area_q_all']
id_sqwat = aux['id_all']
ko_sqwat = aux['ko_all']
genotype_sqwat = aux['genotype_all']
sex_sqwat = aux['sex_all']
# load gWAT data
depot = 'gwat'
filename_quantiles = os.path.join(figures_dir, 'klf14_b6ntac_exp_0099_area_quantiles_' + depot + '.npz')
aux = np.load(filename_quantiles)
area_mean_gwat = aux['area_mean_all']
area_q_gwat = aux['area_q_all']
id_gwat = aux['id_all']
ko_gwat = aux['ko_all']
genotype_gwat = aux['genotype_all']
sex_gwat = aux['sex_all']
# add a new column to the metainfo frame with the mean cell area for SQWAT
metainfo_idx = [np.where(metainfo['id'] == x)[0][0] for x in id_sqwat]
metainfo.loc[metainfo_idx, 'SC_area_mean'] = area_mean_sqwat # m^2
metainfo['SC_area_mean_1e12'] = metainfo['SC_area_mean'] * 1e12 # um^2
# add a new column to the metainfo frame with the mean cell area for gWAT
metainfo_idx = [np.where(metainfo['id'] == x)[0][0] for x in id_gwat]
metainfo.loc[metainfo_idx, 'gWAT_area_mean'] = area_mean_gwat # m^2
metainfo['gWAT_area_mean_1e12'] = metainfo['gWAT_area_mean'] * 1e12 # um^2
# volume sphere with same radius as given circle
def vol_sphere(area_circle):
return (4 / 3 / np.sqrt(np.pi)) * np.power(area_circle, 3/2)
# add a new column to the metainfo frame with the mean cell volume for SQWAT
metainfo.loc[metainfo_idx, 'SC_vol_mean'] = vol_sphere(metainfo['SC_area_mean']) # m^3
metainfo['SC_vol_mean_1e12'] = metainfo['SC_vol_mean'] * 1e12 # mean(gWAT_vol_mean_1e12) = 0.2013305044030984
metainfo['SC_vol_mean_1e18'] = metainfo['SC_vol_mean'] * 1e18 # mean(gWAT_vol_mean_1e18) = 201331 um^3
# add a new column to the metainfo frame with the mean cell volume for gWAT
metainfo.loc[metainfo_idx, 'gWAT_vol_mean'] = vol_sphere(metainfo['gWAT_area_mean']) # m^3
metainfo['gWAT_vol_mean_1e12'] = metainfo['gWAT_vol_mean'] * 1e12 # mean(gWAT_vol_mean_1e12) = 0.3281878940926992
metainfo['gWAT_vol_mean_1e18'] = metainfo['gWAT_vol_mean'] * 1e18 # mean(gWAT_vol_mean_1e18) = 328188 um^3
# add a new column to the metainfo frame with the median cell volume for SQWAT
metainfo_idx = [np.where(metainfo['id'] == x)[0][0] for x in id_sqwat]
metainfo['SC_vol_median'] = np.NaN
metainfo.loc[metainfo_idx, 'SC_vol_median'] = vol_sphere(area_q_sqwat[:, 4]) # m^3
metainfo['SC_vol_median_1e12'] = metainfo['SC_vol_median'] * 1e12
# add a new column to the metainfo frame with the median cell volume for gWAT
metainfo_idx = [np.where(metainfo['id'] == x)[0][0] for x in id_gwat]
metainfo['gWAT_vol_median'] = np.NaN
metainfo.loc[metainfo_idx, 'gWAT_vol_median'] = vol_sphere(area_q_gwat[:, 4]) # m^3
metainfo['gWAT_vol_median_1e12'] = metainfo['gWAT_vol_median'] * 1e12
# add new column with fat depot weights normalised by body weight
metainfo['SC_BW'] = metainfo['SC'] / metainfo['BW']
metainfo['gWAT_BW'] = metainfo['gWAT'] / metainfo['BW']
# add new column with estimates number of cells
fat_density_SC = 0.9038 # g / cm^3
fat_density_gWAT = 0.9029 # g / cm^3
metainfo['SC_kN'] = metainfo['SC'] / (fat_density_SC * 1e6 * metainfo['SC_vol_mean'])
metainfo['gWAT_kN'] = metainfo['gWAT'] / (fat_density_gWAT * 1e6 * metainfo['gWAT_vol_mean'])
# shorthand for subgroups
idx_f = metainfo['sex'] == 'f'
idx_m = metainfo['sex'] == 'm'
idx_pat = metainfo['ko_parent'] == 'PAT'
idx_mat = metainfo['ko_parent'] == 'MAT'
idx_wt = metainfo['genotype'] == 'KLF14-KO:WT'
idx_het = metainfo['genotype'] == 'KLF14-KO:Het'
print('Females N = ' + str(np.count_nonzero(idx_f)))
print('Males N = ' + str(np.count_nonzero(idx_m)))
print('PAT N = ' + str(np.count_nonzero(idx_pat)))
print('MAT N = ' + str(np.count_nonzero(idx_mat)))
print('WT N = ' + str(np.count_nonzero(idx_wt)))
print('Het N = ' + str(np.count_nonzero(idx_het)))
# number of animals by group
# SC_area_mean female, SC_area_mean male
print(
str(np.count_nonzero(~np.isnan(metainfo['SC_area_mean']) & idx_f & idx_pat & idx_wt)) + ', ' +
str(np.count_nonzero(~np.isnan(metainfo['SC_area_mean']) & idx_f & idx_pat & idx_het)) + ', ' +
str(np.count_nonzero(~np.isnan(metainfo['SC_area_mean']) & idx_f & idx_mat & idx_wt)) + ', ' +
str(np.count_nonzero(~np.isnan(metainfo['SC_area_mean']) & idx_f & idx_mat & idx_het)) + ', ' +
str(np.count_nonzero(~np.isnan(metainfo['SC_area_mean']) & idx_m & idx_pat & idx_wt)) + ', ' +
str(np.count_nonzero(~np.isnan(metainfo['SC_area_mean']) & idx_m & idx_pat & idx_het)) + ', ' +
str(np.count_nonzero(~np.isnan(metainfo['SC_area_mean']) & idx_m & idx_mat & idx_wt)) + ', ' +
str(np.count_nonzero(~np.isnan(metainfo['SC_area_mean']) & idx_m & idx_mat & idx_het))
)
# gWAT_area_mean female, gWAT_area_mean male
print(
str(np.count_nonzero(~np.isnan(metainfo['gWAT_area_mean']) & idx_f & idx_pat & idx_wt)) + ', ' +
str(np.count_nonzero(~np.isnan(metainfo['gWAT_area_mean']) & idx_f & idx_pat & idx_het)) + ', ' +
str(np.count_nonzero(~np.isnan(metainfo['gWAT_area_mean']) & idx_f & idx_mat & idx_wt)) + ', ' +
str(np.count_nonzero(~np.isnan(metainfo['gWAT_area_mean']) & idx_f & idx_mat & idx_het)) + ', ' +
str(np.count_nonzero(~np.isnan(metainfo['gWAT_area_mean']) & idx_m & idx_pat & idx_wt)) + ', ' +
str(np.count_nonzero(~np.isnan(metainfo['gWAT_area_mean']) & idx_m & idx_pat & idx_het)) + ', ' +
str(np.count_nonzero(~np.isnan(metainfo['gWAT_area_mean']) & idx_m & idx_mat & idx_wt)) + ', ' +
str(np.count_nonzero(~np.isnan(metainfo['gWAT_area_mean']) & idx_m & idx_mat & idx_het))
)
# SC_kN female, SC_area_mean male
print(
str(np.count_nonzero(~np.isnan(metainfo['SC_kN']) & idx_f & idx_pat & idx_wt)) + ', ' +
str(np.count_nonzero(~np.isnan(metainfo['SC_kN']) & idx_f & idx_pat & idx_het)) + ', ' +
str(np.count_nonzero(~np.isnan(metainfo['SC_kN']) & idx_f & idx_mat & idx_wt)) + ', ' +
str(np.count_nonzero(~np.isnan(metainfo['SC_kN']) & idx_f & idx_mat & idx_het)) + ', ' +
str(np.count_nonzero(~np.isnan(metainfo['SC_kN']) & idx_m & idx_pat & idx_wt)) + ', ' +
str(np.count_nonzero(~np.isnan(metainfo['SC_kN']) & idx_m & idx_pat & idx_het)) + ', ' +
str(np.count_nonzero(~np.isnan(metainfo['SC_kN']) & idx_m & idx_mat & idx_wt)) + ', ' +
str(np.count_nonzero(~np.isnan(metainfo['SC_kN']) & idx_m & idx_mat & idx_het))
)
# gWAT_area_mean female, gWAT_area_mean male
print(
str(np.count_nonzero(~np.isnan(metainfo['gWAT_kN']) & idx_f & idx_pat & idx_wt)) + ', ' +
str(np.count_nonzero(~np.isnan(metainfo['gWAT_kN']) & idx_f & idx_pat & idx_het)) + ', ' +
str(np.count_nonzero(~np.isnan(metainfo['gWAT_kN']) & idx_f & idx_mat & idx_wt)) + ', ' +
str(np.count_nonzero(~np.isnan(metainfo['gWAT_kN']) & idx_f & idx_mat & idx_het)) + ', ' +
str(np.count_nonzero(~np.isnan(metainfo['gWAT_kN']) & idx_m & idx_pat & idx_wt)) + ', ' +
str(np.count_nonzero(~np.isnan(metainfo['gWAT_kN']) & idx_m & idx_pat & idx_het)) + ', ' +
str(np.count_nonzero(~np.isnan(metainfo['gWAT_kN']) & idx_m & idx_mat & idx_wt)) + ', ' +
str(np.count_nonzero(~np.isnan(metainfo['gWAT_kN']) & idx_m & idx_mat & idx_het))
)
if DEBUG:
plt.clf()
plt.scatter(metainfo['gWAT_vol_mean'], metainfo['gWAT_vol_median'])
plt.clf()
plt.scatter(metainfo['SC_vol_mean'], metainfo['SC_vol_median'])
if DEBUG:
# compare SQWAT to gWAT
plt.clf()
plt.scatter(metainfo['SC_vol_median'], metainfo['gWAT_vol_median'])
plt.scatter(metainfo['SC_vol_mean'], metainfo['gWAT_vol_median'])
# BW vs. SQWAT
plt.clf()
plt.scatter(metainfo['SC_vol_median'], metainfo['BW'])
plt.scatter(metainfo['SC_vol_mean'], metainfo['BW'])
# BW vs. gWAT
plt.clf()
plt.scatter(metainfo['gWAT_vol_median'], metainfo['BW'])
plt.scatter(metainfo['gWAT_vol_mean'], metainfo['BW'])
if DEBUG:
print(np.min(metainfo['SC_vol_mean']) * 1e12) # cm^3
print(np.max(metainfo['SC_vol_mean']) * 1e12) # cm^3
print(np.min(metainfo['gWAT_vol_mean']) * 1e12) # cm^3
print(np.max(metainfo['gWAT_vol_mean']) * 1e12) # cm^3
print(np.min(metainfo['SC_kN']))
print(np.max(metainfo['SC_kN']))
print(np.min(metainfo['gWAT_kN']))
print(np.max(metainfo['gWAT_kN']))
########################################################################################################################
### Statistical tests comparing sex, ko_parent and genotype for SC and gWAT mean areas
### All possible combinations between: PAT WT, PAT Het, MAT WT, MAT Het
### NOT USED IN PAPER
########################################################################################################################
def make_groups(metainfo, indices, groups):
metainfo['groups'] = np.nan
indices_acc = set({})
for index, group in zip(indices, groups):
if len(indices_acc.intersection(set(index))) > 0:
raise ValueError('Index in more than one group')
metainfo.loc[index, 'groups'] = group
# ANOVA: sex groups
model = sm.formula.ols('SC_area_mean_1e12 ~ C(sex)', data=metainfo).fit()
print(model.summary())
# Tukey's HSD post-hoc comparison: SC: sex+genotype groups
idx_not_nan = ~np.isnan(metainfo['SC_area_mean'])
make_groups(metainfo, (idx_f & idx_wt, idx_f & idx_het, idx_m & idx_wt, idx_m & idx_het), ('f_WT', 'f_Het', 'm_WT', 'm_Het'))
idx = idx_not_nan
mc = MultiComparison(metainfo.loc[idx, 'SC_area_mean'] * 1e12, metainfo.loc[idx, 'groups'])
mc_results = mc.tukeyhsd()
print(mc_results)
if DEBUG:
plt.clf()
mc_results.plot_simultaneous()
plt.clf()
plt.boxplot(
(metainfo.loc[idx_f & idx_wt & idx_not_nan, 'SC_area_mean'] * 1e12,
metainfo.loc[idx_f & idx_het & idx_not_nan, 'SC_area_mean'] * 1e12,
metainfo.loc[idx_m & idx_wt & idx_not_nan, 'SC_area_mean'] * 1e12,
metainfo.loc[idx_m & idx_het & idx_not_nan, 'SC_area_mean'] * 1e12),
notch=True,
labels=('f WT', 'f Het', 'm WT', 'm Het')
)
plt.ylabel('Area ($\mu m^2$)', fontsize=14)
plt.tick_params(labelsize=14)
plt.tight_layout()
# Tukey's HSD post-hoc comparison: Female SC: ko_parent + genotype groups
idx_not_nan = ~np.isnan(metainfo['SC_area_mean'])
make_groups(metainfo,
(idx_f & idx_pat & idx_wt, idx_f & idx_pat & idx_het, idx_f & idx_mat & idx_wt, idx_f & idx_mat & idx_het),
('PAT_WT', 'PAT_Het', 'MAT_WT', 'MAT_Het'))
idx = idx_not_nan & idx_f
mc = MultiComparison(metainfo.loc[idx, 'SC_area_mean'] * 1e12, metainfo.loc[idx, 'groups'])
mc_results = mc.tukeyhsd()
print(mc_results)
if DEBUG:
mc_results.plot_simultaneous()
plt.clf()
plt.boxplot(
(metainfo['SC_area_mean'][idx_f & idx_pat & idx_wt & idx_not_nan] * 1e12,
metainfo['SC_area_mean'][idx_f & idx_pat & idx_het & idx_not_nan] * 1e12,
metainfo['SC_area_mean'][idx_f & idx_mat & idx_wt & idx_not_nan] * 1e12,
metainfo['SC_area_mean'][idx_f & idx_mat & idx_het & idx_not_nan] * 1e12),
notch=False,
labels=('PAT WT', 'PAT Het', 'MAT WT', 'MAT Het')
)
plt.ylabel('Area ($\mu m^2$)', fontsize=14)
plt.tick_params(labelsize=14)
plt.tight_layout()
# Tukey's HSD post-hoc comparison: Female gWAT: ko_parent + genotype groups
idx_not_nan = ~np.isnan(metainfo['gWAT_area_mean'])
make_groups(metainfo,
(idx_f & idx_pat & idx_wt, idx_f & idx_pat & idx_het, idx_f & idx_mat & idx_wt, idx_f & idx_mat & idx_het),
('PAT_WT', 'PAT_Het', 'MAT_WT', 'MAT_Het'))
idx = idx_not_nan & idx_f
mc = MultiComparison(metainfo.loc[idx, 'gWAT_area_mean'] * 1e12, metainfo.loc[idx, 'groups'])
mc_results = mc.tukeyhsd()
print(mc_results)
if DEBUG:
mc_results.plot_simultaneous()
plt.clf()
plt.boxplot(
(metainfo['gWAT_area_mean'][idx_f & idx_pat & idx_wt & idx_not_nan] * 1e12,
metainfo['gWAT_area_mean'][idx_f & idx_pat & idx_het & idx_not_nan] * 1e12,
metainfo['gWAT_area_mean'][idx_f & idx_mat & idx_wt & idx_not_nan] * 1e12,
metainfo['gWAT_area_mean'][idx_f & idx_mat & idx_het & idx_not_nan] * 1e12),
notch=False,
labels=('PAT WT', 'PAT Het', 'MAT WT', 'MAT Het')
)
plt.ylabel('Area ($\mu m^2$)', fontsize=14)
plt.tick_params(labelsize=14)
plt.tight_layout()
# Tukey's HSD post-hoc comparison: Male SC: ko_parent + genotype groups
idx_not_nan = ~np.isnan(metainfo['SC_area_mean'])
make_groups(metainfo,
(idx_m & idx_pat & idx_wt, idx_m & idx_pat & idx_het, idx_m & idx_mat & idx_wt, idx_m & idx_mat & idx_het),
('PAT_WT', 'PAT_Het', 'MAT_WT', 'MAT_Het'))
idx = idx_not_nan * idx_m
mc = MultiComparison(metainfo.loc[idx, 'SC_area_mean'] * 1e12, metainfo.loc[idx, 'groups'])
mc_results = mc.tukeyhsd()
print(mc_results)
idx_not_nan = ~np.isnan(metainfo['SC_kN'])
make_groups(metainfo,
(idx_m & idx_pat & idx_wt, idx_m & idx_pat & idx_het, idx_m & idx_mat & idx_wt, idx_m & idx_mat & idx_het),
('PAT_WT', 'PAT_Het', 'MAT_WT', 'MAT_Het'))
idx = idx_not_nan * idx_m
mc = MultiComparison(metainfo.loc[idx, 'SC_kN'], metainfo.loc[idx, 'groups'])
mc_results = mc.tukeyhsd()
print(mc_results)
if DEBUG:
mc_results.plot_simultaneous()
plt.clf()
plt.boxplot(
(metainfo['SC_kN'][idx_m & idx_pat & idx_wt & idx_not_nan],
metainfo['SC_kN'][idx_m & idx_pat & idx_het & idx_not_nan],
metainfo['SC_kN'][idx_m & idx_mat & idx_wt & idx_not_nan],
metainfo['SC_kN'][idx_m & idx_mat & idx_het & idx_not_nan]),
notch=False,
labels=('PAT WT', 'PAT Het', 'MAT WT', 'MAT Het')
)
plt.ylabel('Weighted cell count', fontsize=14)
plt.tick_params(labelsize=14)
plt.tight_layout()
# Tukey's HSD post-hoc comparison: Male gWAT: ko_parent + genotype groups
idx_not_nan = ~np.isnan(metainfo['gWAT_area_mean'])
make_groups(metainfo,
(idx_m & idx_pat & idx_wt, idx_m & idx_pat & idx_het, idx_m & idx_mat & idx_wt, idx_m & idx_mat & idx_het),
('PAT_WT', 'PAT_Het', 'MAT_WT', 'MAT_Het'))
idx = idx_not_nan * idx_m
mc = MultiComparison(metainfo.loc[idx, 'gWAT_area_mean'] * 1e12, metainfo.loc[idx, 'groups'])
mc_results = mc.tukeyhsd()
print(mc_results)
idx_not_nan = ~np.isnan(metainfo['gWAT_kN'])
make_groups(metainfo,
(idx_m & idx_pat & idx_wt, idx_m & idx_pat & idx_het, idx_m & idx_mat & idx_wt, idx_m & idx_mat & idx_het),
('PAT_WT', 'PAT_Het', 'MAT_WT', 'MAT_Het'))
idx = idx_not_nan * idx_m
mc = MultiComparison(metainfo.loc[idx, 'gWAT_kN'], metainfo.loc[idx, 'groups'])
mc_results = mc.tukeyhsd()
print(mc_results)
if DEBUG:
mc_results.plot_simultaneous()
plt.clf()
plt.boxplot(
(metainfo['gWAT_kN'][idx_f & idx_pat & idx_wt & idx_not_nan],
metainfo['gWAT_kN'][idx_f & idx_pat & idx_het & idx_not_nan],
metainfo['gWAT_kN'][idx_f & idx_mat & idx_wt & idx_not_nan],
metainfo['gWAT_kN'][idx_f & idx_mat & idx_het & idx_not_nan]),
notch=False,
labels=('PAT WT', 'PAT Het', 'MAT WT', 'MAT Het')
)
plt.ylabel('Weighted cell count', fontsize=14)
plt.tick_params(labelsize=14)
plt.tight_layout()
########################################################################################################################
### Female and male SC and gWAT mean cell area stratified in MAT and PAT
### Only compare: PAT WT vs. PAT Het and MAT WT vs. MAT Het
### USED IN PAPER
########################################################################################################################
pval = []
idx_not_nan = ~np.isnan(metainfo['SC_area_mean'])
# Female PAT, WT vs Het
idx_subset = idx_not_nan & idx_f & idx_pat
model = sm.formula.ols('SC_area_mean ~ C(genotype)', data=metainfo, subset=idx_subset).fit()
# print(model.summary())
pval.append(model.pvalues['C(genotype)[T.KLF14-KO:Het]'])
print('SC Female PAT, WT vs Het: p = ' + str(model.pvalues['C(genotype)[T.KLF14-KO:Het]'])
+ ', meandiff = ' + str(1e12 * model.params['C(genotype)[T.KLF14-KO:Het]']) + ',\n'
+ 'F = ' + str(model.fvalue) + '(' + str(int(model.df_model)) + ', ' + str(int(model.df_resid)) + ')'
)
# Female MAT, WT vs Het
idx_subset = idx_not_nan & idx_f & idx_mat
model = sm.formula.ols('SC_area_mean ~ C(genotype)', data=metainfo, subset=idx_subset).fit()
# print(model.summary())
pval.append(model.pvalues['C(genotype)[T.KLF14-KO:Het]'])
print('SC Female MAT, WT vs Het: p = ' + str(model.pvalues['C(genotype)[T.KLF14-KO:Het]'])
+ ', meandiff = ' + str(1e12 * model.params['C(genotype)[T.KLF14-KO:Het]']) + ',\n'
+ 'F = ' + str(model.fvalue) + '(' + str(int(model.df_model)) + ', ' + str(int(model.df_resid)) + ')'
)
# Male PAT, WT vs Het
idx_subset = idx_not_nan & idx_m & idx_pat
model = sm.formula.ols('SC_area_mean ~ C(genotype)', data=metainfo, subset=idx_subset).fit()
# print(model.summary())
pval.append(model.pvalues['C(genotype)[T.KLF14-KO:Het]'])
print('SC male PAT, WT vs Het: p = ' + str(model.pvalues['C(genotype)[T.KLF14-KO:Het]'])
+ ', meandiff = ' + str(1e12 * model.params['C(genotype)[T.KLF14-KO:Het]']) + ',\n'
+ 'F = ' + str(model.fvalue) + '(' + str(int(model.df_model)) + ', ' + str(int(model.df_resid)) + ')'
)
# Male MAT, WT vs Het
idx_subset = idx_not_nan & idx_m & idx_mat
model = sm.formula.ols('SC_area_mean ~ C(genotype) ', data=metainfo, subset=idx_subset).fit()
# print(model.summary())
pval.append(model.pvalues['C(genotype)[T.KLF14-KO:Het]'])
print('SC male MAT, WT vs Het: p = ' + str(model.pvalues['C(genotype)[T.KLF14-KO:Het]'])
+ ', meandiff = ' + str(1e12 * model.params['C(genotype)[T.KLF14-KO:Het]']) + ',\n'
+ 'F = ' + str(model.fvalue) + '(' + str(int(model.df_model)) + ', ' + str(int(model.df_resid)) + ')'
)
idx_not_nan = ~np.isnan(metainfo['gWAT_area_mean'])
# Female PAT, WT vs Het
idx_subset = idx_not_nan & idx_f & idx_pat
model = sm.formula.ols('gWAT_area_mean ~ C(genotype)', data=metainfo, subset=idx_subset).fit()
# print(model.summary())
pval.append(model.pvalues['C(genotype)[T.KLF14-KO:Het]'])
print('gWAT Female PAT, WT vs Het: p = ' + str(model.pvalues['C(genotype)[T.KLF14-KO:Het]'])
+ ', meandiff = ' + str(1e12 * model.params['C(genotype)[T.KLF14-KO:Het]']) + ',\n'
+ 'F = ' + str(model.fvalue) + '(' + str(int(model.df_model)) + ', ' + str(int(model.df_resid)) + ')'
)
# Female MAT, WT vs Het
idx_subset = idx_not_nan & idx_f & idx_mat
model = sm.formula.ols('gWAT_area_mean ~ C(genotype)', data=metainfo, subset=idx_subset).fit()
# print(model.summary())
pval.append(model.pvalues['C(genotype)[T.KLF14-KO:Het]'])
print('gWAT Female MAT, WT vs Het: p = ' + str(model.pvalues['C(genotype)[T.KLF14-KO:Het]'])
+ ', meandiff = ' + str(1e12 * model.params['C(genotype)[T.KLF14-KO:Het]']) + ',\n'
+ 'F = ' + str(model.fvalue) + '(' + str(int(model.df_model)) + ', ' + str(int(model.df_resid)) + ')'
)
# Male PAT, WT vs Het
idx_subset = idx_not_nan & idx_m & idx_pat
model = sm.formula.ols('gWAT_area_mean ~ C(genotype)', data=metainfo, subset=idx_subset).fit()
# print(model.summary())
pval.append(model.pvalues['C(genotype)[T.KLF14-KO:Het]'])
print('gWAT male PAT, WT vs Het: p = ' + str(model.pvalues['C(genotype)[T.KLF14-KO:Het]'])
+ ', meandiff = ' + str(1e12 * model.params['C(genotype)[T.KLF14-KO:Het]']) + ',\n'
+ 'F = ' + str(model.fvalue) + '(' + str(int(model.df_model)) + ', ' + str(int(model.df_resid)) + ')'
)
# Male MAT, WT vs Het
idx_subset = idx_not_nan & idx_m & idx_mat
model = sm.formula.ols('gWAT_area_mean ~ C(genotype)', data=metainfo, subset=idx_subset).fit()
# print(model.summary())
pval.append(model.pvalues['C(genotype)[T.KLF14-KO:Het]'])
print('gWAT male MAT, WT vs Het: p = ' + str(model.pvalues['C(genotype)[T.KLF14-KO:Het]'])
+ ', meandiff = ' + str(1e12 * model.params['C(genotype)[T.KLF14-KO:Het]']) + ',\n'
+ 'F = ' + str(model.fvalue) + '(' + str(int(model.df_model)) + ', ' + str(int(model.df_resid)) + ')'
)
########################################################################################################################
### Female and male SC and gWAT cell count (k N) stratified in MAT and PAT
### Only compare: PAT WT vs. PAT Het and MAT WT vs. MAT Het
### USED IN PAPER
########################################################################################################################
idx_not_nan = ~np.isnan(metainfo['SC_kN'])
# Female PAT, WT vs Het
idx_subset = idx_not_nan & idx_f & idx_pat
model = sm.formula.ols('SC_kN ~ C(genotype)', data=metainfo, subset=idx_subset).fit()
# print(model.summary())
pval.append(model.pvalues['C(genotype)[T.KLF14-KO:Het]'])
print('SC Female PAT, WT vs Het: p = ' + str(model.pvalues['C(genotype)[T.KLF14-KO:Het]'])
+ ', meandiff = ' + str(model.params['C(genotype)[T.KLF14-KO:Het]']) + ',\n'
+ 'F = ' + str(model.fvalue) + '(' + str(int(model.df_model)) + ', ' + str(int(model.df_resid)) + ')'
)
# Female MAT, WT vs Het
idx_subset = idx_not_nan & idx_f & idx_mat
model = sm.formula.ols('SC_kN ~ C(genotype)', data=metainfo, subset=idx_subset).fit()
# print(model.summary())
pval.append(model.pvalues['C(genotype)[T.KLF14-KO:Het]'])
print('SC Female MAT, WT vs Het: p = ' + str(model.pvalues['C(genotype)[T.KLF14-KO:Het]'])
+ ', meandiff = ' + str(model.params['C(genotype)[T.KLF14-KO:Het]']) + ',\n'
+ 'F = ' + str(model.fvalue) + '(' + str(int(model.df_model)) + ', ' + str(int(model.df_resid)) + ')'
)
# Male PAT, WT vs Het
idx_subset = idx_not_nan & idx_m & idx_pat
model = sm.formula.ols('SC_kN ~ C(genotype)', data=metainfo, subset=idx_subset).fit()
# print(model.summary())
pval.append(model.pvalues['C(genotype)[T.KLF14-KO:Het]'])
print('SC male PAT, WT vs Het: p = ' + str(model.pvalues['C(genotype)[T.KLF14-KO:Het]'])
+ ', meandiff = ' + str(model.params['C(genotype)[T.KLF14-KO:Het]']) + ',\n'
+ 'F = ' + str(model.fvalue) + '(' + str(int(model.df_model)) + ', ' + str(int(model.df_resid)) + ')'
)
# Male MAT, WT vs Het
idx_subset = idx_not_nan & idx_m & idx_mat
model = sm.formula.ols('SC_kN ~ C(genotype) ', data=metainfo, subset=idx_subset).fit()
# print(model.summary())
pval.append(model.pvalues['C(genotype)[T.KLF14-KO:Het]'])
print('SC male MAT, WT vs Het: p = ' + str(model.pvalues['C(genotype)[T.KLF14-KO:Het]'])
+ ', meandiff = ' + str(model.params['C(genotype)[T.KLF14-KO:Het]']) + ',\n'
+ 'F = ' + str(model.fvalue) + '(' + str(int(model.df_model)) + ', ' + str(int(model.df_resid)) + ')'
)
idx_not_nan = ~np.isnan(metainfo['gWAT_kN'])
# Female PAT, WT vs Het
idx_subset = idx_not_nan & idx_f & idx_pat
model = sm.formula.ols('gWAT_kN ~ C(genotype)', data=metainfo, subset=idx_subset).fit()
# print(model.summary())
pval.append(model.pvalues['C(genotype)[T.KLF14-KO:Het]'])
print('gWAT Female PAT, WT vs Het: p = ' + str(model.pvalues['C(genotype)[T.KLF14-KO:Het]'])
+ ', meandiff = ' + str(model.params['C(genotype)[T.KLF14-KO:Het]']) + ',\n'
+ 'F = ' + str(model.fvalue) + '(' + str(int(model.df_model)) + ', ' + str(int(model.df_resid)) + ')'
)
# Female MAT, WT vs Het
idx_subset = idx_not_nan & idx_f & idx_mat
model = sm.formula.ols('gWAT_kN ~ C(genotype)', data=metainfo, subset=idx_subset).fit()
# print(model.summary())
pval.append(model.pvalues['C(genotype)[T.KLF14-KO:Het]'])
print('gWAT Female MAT, WT vs Het: p = ' + str(model.pvalues['C(genotype)[T.KLF14-KO:Het]'])
+ ', meandiff = ' + str(model.params['C(genotype)[T.KLF14-KO:Het]']) + ',\n'
+ 'F = ' + str(model.fvalue) + '(' + str(int(model.df_model)) + ', ' + str(int(model.df_resid)) + ')'
)
# Male PAT, WT vs Het
idx_subset = idx_not_nan & idx_m & idx_pat
model = sm.formula.ols('gWAT_kN ~ C(genotype)', data=metainfo, subset=idx_subset).fit()
# print(model.summary())
pval.append(model.pvalues['C(genotype)[T.KLF14-KO:Het]'])
print('gWAT male PAT, WT vs Het: p = ' + str(model.pvalues['C(genotype)[T.KLF14-KO:Het]'])
+ ', meandiff = ' + str(model.params['C(genotype)[T.KLF14-KO:Het]']) + ',\n'
+ 'F = ' + str(model.fvalue) + '(' + str(int(model.df_model)) + ', ' + str(int(model.df_resid)) + ')'
)
# Male MAT, WT vs Het
idx_subset = idx_not_nan & idx_m & idx_mat
model = sm.formula.ols('gWAT_kN ~ C(genotype)', data=metainfo, subset=idx_subset).fit()
# print(model.summary())
pval.append(model.pvalues['C(genotype)[T.KLF14-KO:Het]'])
print('gWAT male MAT, WT vs Het: p = ' + str(model.pvalues['C(genotype)[T.KLF14-KO:Het]'])
+ ', meandiff = ' + str(model.params['C(genotype)[T.KLF14-KO:Het]']) + ',\n'
+ 'F = ' + str(model.fvalue) + '(' + str(int(model.df_model)) + ', ' + str(int(model.df_resid)) + ')'
)
########################################################################################################################
### Control for BW
### Female and male SC and gWAT mean cell area stratified in MAT and PAT
### Only compare: PAT WT vs. PAT Het and MAT WT vs. MAT Het
### USED IN PAPER
########################################################################################################################
idx_not_nan = ~np.isnan(metainfo['SC_vol_mean'])
# Female PAT, WT vs Het
idx_subset = idx_not_nan & idx_f & idx_pat
model = sm.formula.ols('SC_vol_mean ~ C(genotype) + BW', data=metainfo, subset=idx_subset).fit()
# print(model.summary())
pval.append(model.pvalues['C(genotype)[T.KLF14-KO:Het]'])
print('SC Female PAT, WT vs Het: p = ' + str(model.pvalues['C(genotype)[T.KLF14-KO:Het]'])
+ ', meandiff = ' + str(1e18 * model.params['C(genotype)[T.KLF14-KO:Het]']) + ',\n'
+ 'F = ' + str(model.fvalue) + '(' + str(int(model.df_model)) + ', ' + str(int(model.df_resid)) + ')'
)
# Female MAT, WT vs Het
idx_subset = idx_not_nan & idx_f & idx_mat
model = sm.formula.ols('SC_vol_mean ~ C(genotype) + BW', data=metainfo, subset=idx_subset).fit()
# print(model.summary())
pval.append(model.pvalues['C(genotype)[T.KLF14-KO:Het]'])
print('SC Female MAT, WT vs Het: p = ' + str(model.pvalues['C(genotype)[T.KLF14-KO:Het]'])
+ ', meandiff = ' + str(1e18 * model.params['C(genotype)[T.KLF14-KO:Het]']) + ',\n'
+ 'F = ' + str(model.fvalue) + '(' + str(int(model.df_model)) + ', ' + str(int(model.df_resid)) + ')'
)
# Male PAT, WT vs Het
idx_subset = idx_not_nan & idx_m & idx_pat
model = sm.formula.ols('SC_vol_mean ~ C(genotype) + BW', data=metainfo, subset=idx_subset).fit()
# print(model.summary())
pval.append(model.pvalues['C(genotype)[T.KLF14-KO:Het]'])
print('SC male PAT, WT vs Het: p = ' + str(model.pvalues['C(genotype)[T.KLF14-KO:Het]'])
+ ', meandiff = ' + str(1e18 * model.params['C(genotype)[T.KLF14-KO:Het]']) + ',\n'
+ 'F = ' + str(model.fvalue) + '(' + str(int(model.df_model)) + ', ' + str(int(model.df_resid)) + ')'
)
# Male MAT, WT vs Het
idx_subset = idx_not_nan & idx_m & idx_mat
model = sm.formula.ols('SC_vol_mean ~ C(genotype) + BW', data=metainfo, subset=idx_subset).fit()
# print(model.summary())
pval.append(model.pvalues['C(genotype)[T.KLF14-KO:Het]'])
print('SC male MAT, WT vs Het: p = ' + str(model.pvalues['C(genotype)[T.KLF14-KO:Het]'])
+ ', meandiff = ' + str(1e18 * model.params['C(genotype)[T.KLF14-KO:Het]']) + ',\n'
+ 'F = ' + str(model.fvalue) + '(' + str(int(model.df_model)) + ', ' + str(int(model.df_resid)) + ')'
)
idx_not_nan = ~np.isnan(metainfo['gWAT_vol_mean'])
# Female PAT, WT vs Het
idx_subset = idx_not_nan & idx_f & idx_pat
model = sm.formula.ols('gWAT_vol_mean ~ C(genotype) + BW', data=metainfo, subset=idx_subset).fit()
# print(model.summary())
pval.append(model.pvalues['C(genotype)[T.KLF14-KO:Het]'])
print('gWAT Female PAT, WT vs Het: p = ' + str(model.pvalues['C(genotype)[T.KLF14-KO:Het]'])
+ ', meandiff = ' + str(1e18 * model.params['C(genotype)[T.KLF14-KO:Het]']) + ',\n'
+ 'F = ' + str(model.fvalue) + '(' + str(int(model.df_model)) + ', ' + str(int(model.df_resid)) + ')'
)
# Female MAT, WT vs Het
idx_subset = idx_not_nan & idx_f & idx_mat
model = sm.formula.ols('gWAT_vol_mean ~ C(genotype) + BW', data=metainfo, subset=idx_subset).fit()
# print(model.summary())
pval.append(model.pvalues['C(genotype)[T.KLF14-KO:Het]'])
print('gWAT Female MAT, WT vs Het: p = ' + str(model.pvalues['C(genotype)[T.KLF14-KO:Het]'])
+ ', meandiff = ' + str(1e18 * model.params['C(genotype)[T.KLF14-KO:Het]']) + ',\n'
+ 'F = ' + str(model.fvalue) + '(' + str(int(model.df_model)) + ', ' + str(int(model.df_resid)) + ')'
)
# Male PAT, WT vs Het
idx_subset = idx_not_nan & idx_m & idx_pat
model = sm.formula.ols('gWAT_vol_mean ~ C(genotype) + BW', data=metainfo, subset=idx_subset).fit()
# print(model.summary())
pval.append(model.pvalues['C(genotype)[T.KLF14-KO:Het]'])
print('gWAT male PAT, WT vs Het: p = ' + str(model.pvalues['C(genotype)[T.KLF14-KO:Het]'])
+ ', meandiff = ' + str(1e18 * model.params['C(genotype)[T.KLF14-KO:Het]']) + ',\n'
+ 'F = ' + str(model.fvalue) + '(' + str(int(model.df_model)) + ', ' + str(int(model.df_resid)) + ')'
)
# Male MAT, WT vs Het
idx_subset = idx_not_nan & idx_m & idx_mat
model = sm.formula.ols('gWAT_vol_mean ~ C(genotype) + BW', data=metainfo, subset=idx_subset).fit()
# print(model.summary())
pval.append(model.pvalues['C(genotype)[T.KLF14-KO:Het]'])
print('gWAT male MAT, WT vs Het: p = ' + str(model.pvalues['C(genotype)[T.KLF14-KO:Het]'])
+ ', meandiff = ' + str(1e18 * model.params['C(genotype)[T.KLF14-KO:Het]']) + ',\n'
+ 'F = ' + str(model.fvalue) + '(' + str(int(model.df_model)) + ', ' + str(int(model.df_resid)) + ')'
)
########################################################################################################################
### Adjust for BW
### Female and male SC and gWAT cell count (k N) stratified in MAT and PAT
### Only compare: PAT WT vs. PAT Het and MAT WT vs. MAT Het
### USED IN PAPER
########################################################################################################################
idx_not_nan = ~np.isnan(metainfo['SC_kN'])
# Female PAT, WT vs Het
idx_subset = idx_not_nan & idx_f & idx_pat
model = sm.formula.ols('SC_kN ~ C(genotype) + BW', data=metainfo, subset=idx_subset).fit()
# print(model.summary())
pval.append(model.pvalues['C(genotype)[T.KLF14-KO:Het]'])
print('SC Female PAT, WT vs Het: p = ' + str(model.pvalues['C(genotype)[T.KLF14-KO:Het]'])
+ ', meandiff = ' + str(model.params['C(genotype)[T.KLF14-KO:Het]']) + ',\n'
+ 'F = ' + str(model.fvalue) + '(' + str(int(model.df_model)) + ', ' + str(int(model.df_resid)) + ')'
)
# Female MAT, WT vs Het
idx_subset = idx_not_nan & idx_f & idx_mat
model = sm.formula.ols('SC_kN ~ C(genotype) + BW', data=metainfo, subset=idx_subset).fit()
# print(model.summary())
pval.append(model.pvalues['C(genotype)[T.KLF14-KO:Het]'])
print('SC Female MAT, WT vs Het: p = ' + str(model.pvalues['C(genotype)[T.KLF14-KO:Het]'])
+ ', meandiff = ' + str(model.params['C(genotype)[T.KLF14-KO:Het]']) + ',\n'
+ 'F = ' + str(model.fvalue) + '(' + str(int(model.df_model)) + ', ' + str(int(model.df_resid)) + ')'
)
# Male PAT, WT vs Het
idx_subset = idx_not_nan & idx_m & idx_pat
model = sm.formula.ols('SC_kN ~ C(genotype) + BW', data=metainfo, subset=idx_subset).fit()
# print(model.summary())
pval.append(model.pvalues['C(genotype)[T.KLF14-KO:Het]'])
print('SC male PAT, WT vs Het: p = ' + str(model.pvalues['C(genotype)[T.KLF14-KO:Het]'])
+ ', meandiff = ' + str(model.params['C(genotype)[T.KLF14-KO:Het]']) + ',\n'
+ 'F = ' + str(model.fvalue) + '(' + str(int(model.df_model)) + ', ' + str(int(model.df_resid)) + ')'
)
# Male MAT, WT vs Het
idx_subset = idx_not_nan & idx_m & idx_mat
model = sm.formula.ols('SC_kN ~ C(genotype) + BW', data=metainfo, subset=idx_subset).fit()
# print(model.summary())
pval.append(model.pvalues['C(genotype)[T.KLF14-KO:Het]'])
print('SC male MAT, WT vs Het: p = ' + str(model.pvalues['C(genotype)[T.KLF14-KO:Het]'])
+ ', meandiff = ' + str(model.params['C(genotype)[T.KLF14-KO:Het]']) + ',\n'
+ 'F = ' + str(model.fvalue) + '(' + str(int(model.df_model)) + ', ' + str(int(model.df_resid)) + ')'
)
idx_not_nan = ~np.isnan(metainfo['gWAT_kN'])
# Female PAT, WT vs Het
idx_subset = idx_not_nan & idx_f & idx_pat
model = sm.formula.ols('gWAT_kN ~ C(genotype) + BW', data=metainfo, subset=idx_subset).fit()
# print(model.summary())
pval.append(model.pvalues['C(genotype)[T.KLF14-KO:Het]'])
print('gWAT Female PAT, WT vs Het: p = ' + str(model.pvalues['C(genotype)[T.KLF14-KO:Het]'])
+ ', meandiff = ' + str(model.params['C(genotype)[T.KLF14-KO:Het]']) + ',\n'
+ 'F = ' + str(model.fvalue) + '(' + str(int(model.df_model)) + ', ' + str(int(model.df_resid)) + ')'
)
# Female MAT, WT vs Het
idx_subset = idx_not_nan & idx_f & idx_mat
model = sm.formula.ols('gWAT_kN ~ C(genotype) + BW', data=metainfo, subset=idx_subset).fit()
# print(model.summary())
pval.append(model.pvalues['C(genotype)[T.KLF14-KO:Het]'])
print('gWAT Female MAT, WT vs Het: p = ' + str(model.pvalues['C(genotype)[T.KLF14-KO:Het]'])
+ ', meandiff = ' + str(model.params['C(genotype)[T.KLF14-KO:Het]']) + ',\n'
+ 'F = ' + str(model.fvalue) + '(' + str(int(model.df_model)) + ', ' + str(int(model.df_resid)) + ')'
)
# Male PAT, WT vs Het
idx_subset = idx_not_nan & idx_m & idx_pat
model = sm.formula.ols('gWAT_kN ~ C(genotype) + BW', data=metainfo, subset=idx_subset).fit()
# print(model.summary())
pval.append(model.pvalues['C(genotype)[T.KLF14-KO:Het]'])
print('gWAT male PAT, WT vs Het: p = ' + str(model.pvalues['C(genotype)[T.KLF14-KO:Het]'])
+ ', meandiff = ' + str(model.params['C(genotype)[T.KLF14-KO:Het]']) + ',\n'
+ 'F = ' + str(model.fvalue) + '(' + str(int(model.df_model)) + ', ' + str(int(model.df_resid)) + ')'
)
# Male MAT, WT vs Het
idx_subset = idx_not_nan & idx_m & idx_mat
model = sm.formula.ols('gWAT_kN ~ C(genotype) + BW', data=metainfo, subset=idx_subset).fit()
# print(model.summary())
pval.append(model.pvalues['C(genotype)[T.KLF14-KO:Het]'])
print('gWAT male MAT, WT vs Het: p = ' + str(model.pvalues['C(genotype)[T.KLF14-KO:Het]'])
+ ', meandiff = ' + str(model.params['C(genotype)[T.KLF14-KO:Het]']) + ',\n'
+ 'F = ' + str(model.fvalue) + '(' + str(int(model.df_model)) + ', ' + str(int(model.df_resid)) + ')'
)
# Benjamini/Hochberg tests
reject_h0, pval_adj, _, _ = multipletests(pval, alpha=0.05, method='fdr_bh', is_sorted=False, returnsorted=False)
print('Original pvalues:\n' + str(np.reshape(pval, (8, 4))))
print('Adjusted pvalues:\n' + str(np.reshape(pval_adj, (8, 4))))
########################################################################################################################
### Model gWAT ~ gWAT_vol_mean_1e12 * C(ko_parent) * C(sex)
### INVALID model: No significance for ko_parent
### NOT USED IN PAPER
########################################################################################################################
idx_not_nan = np.where(~np.isnan(metainfo['SC']) * ~np.isnan(metainfo['gWAT']) * ~np.isnan(metainfo['BW']))[0]
# data that we are going to use
idx_subset = idx_not_nan
model = sm.formula.ols('gWAT ~ gWAT_vol_mean_1e12 * C(ko_parent) * C(sex)', data=metainfo, subset=idx_subset).fit()
print(model.summary())
# OLS Regression Results
# ==============================================================================
# Dep. Variable: gWAT R-squared: 0.386
# Model: OLS Adj. R-squared: 0.316
# Method: Least Squares F-statistic: 5.557
# Date: Fri, 28 Feb 2020 Prob (F-statistic): 5.36e-05
# Time: 19:36:00 Log-Likelihood: -13.303
# No. Observations: 70 AIC: 42.61
# Df Residuals: 62 BIC: 60.59
# Df Model: 7
# Covariance Type: nonrobust
# ======================================================================================================================
# coef std err t P>|t| [0.025 0.975]
# ----------------------------------------------------------------------------------------------------------------------
# Intercept 0.2711 0.179 1.513 0.135 -0.087 0.629
# C(ko_parent)[T.MAT] 0.0546 0.255 0.214 0.831 -0.455 0.564
# C(sex)[T.m] 0.1560 0.606 0.258 0.798 -1.054 1.366
# C(ko_parent)[T.MAT]:C(sex)[T.m] 1.6161 0.858 1.883 0.064 -0.099 3.331
# gWAT_vol_mean_1e12 2.5752 0.839 3.068 0.003 0.897 4.253
# gWAT_vol_mean_1e12:C(ko_parent)[T.MAT] -0.7311 1.031 -0.709 0.481 -2.792 1.329
# gWAT_vol_mean_1e12:C(sex)[T.m] -1.0670 1.574 -0.678 0.500 -4.214 2.080
# gWAT_vol_mean_1e12:C(ko_parent)[T.MAT]:C(sex)[T.m] -3.1432 2.181 -1.441 0.154 -7.502 1.216
# ==============================================================================
# Omnibus: 5.047 Durbin-Watson: 1.692
# Prob(Omnibus): 0.080 Jarque-Bera (JB): 4.181
# Skew: 0.550 Prob(JB): 0.124
# Kurtosis: 3.472 Cond. No. 105.
# ==============================================================================
# Warnings:
# [1] Standard Errors assume that the covariance matrix of the errors is correctly specified.
# partial regression and influence plots
if DEBUG:
sm.graphics.plot_partregress_grid(model)
sm.graphics.influence_plot(model, criterion="cooks")
def model_line(model, sex, ko_parent, gWAT_vol_mean_1e12):
sex = np.float(sex == 'm')
ko_parent = np.float(ko_parent == 'MAT')
return model.params['Intercept'] + \
model.params['C(ko_parent)[T.MAT]'] * ko_parent + \
model.params['C(sex)[T.m]'] * sex + \
model.params['C(ko_parent)[T.MAT]:C(sex)[T.m]'] * ko_parent * sex + \
model.params['gWAT_vol_mean_1e12'] * gWAT_vol_mean_1e12 + \
model.params['gWAT_vol_mean_1e12:C(ko_parent)[T.MAT]'] * gWAT_vol_mean_1e12 * ko_parent + \
model.params['gWAT_vol_mean_1e12:C(sex)[T.m]'] * gWAT_vol_mean_1e12 * sex + \
model.params['gWAT_vol_mean_1e12:C(ko_parent)[T.MAT]:C(sex)[T.m]'] * gWAT_vol_mean_1e12 * ko_parent * sex
# plot BW as a function of gWAT
if DEBUG:
annotate = False
plt.clf()
# f PAT
idx = (metainfo['sex'] == 'f') * (metainfo['ko_parent'] == 'PAT') * (metainfo['genotype'] == 'KLF14-KO:WT')
gWAT_vol_mean_1e12 = np.linspace(np.min(metainfo['gWAT_vol_mean_1e12'][idx]), np.max(metainfo['gWAT_vol_mean_1e12'][idx]))
plt.scatter(metainfo['gWAT_vol_mean_1e12'][idx], metainfo['gWAT'][idx], label='f PAT WT', color='C0', facecolor='none')
if annotate:
for i in np.where(idx)[0]:
plt.annotate(i, (metainfo['gWAT_vol_mean_1e12'][i], metainfo['gWAT'][i]))
idx = (metainfo['sex'] == 'f') * (metainfo['ko_parent'] == 'PAT') * (metainfo['genotype'] == 'KLF14-KO:Het')
gWAT_vol_mean_1e12 = np.linspace(np.min(metainfo['gWAT_vol_mean_1e12'][idx]), np.max(metainfo['gWAT_vol_mean_1e12'][idx]))
plt.scatter(metainfo['gWAT_vol_mean_1e12'][idx], metainfo['gWAT'][idx], label='f PAT Het', color='C0')
if annotate:
for i in np.where(idx)[0]:
plt.annotate(i, (metainfo['gWAT_vol_mean_1e12'][i], metainfo['gWAT'][i]))
idx = (metainfo['sex'] == 'f') * (metainfo['ko_parent'] == 'PAT')
gWAT_vol_mean_1e12 = np.linspace(np.min(metainfo['gWAT_vol_mean_1e12'][idx]), np.max(metainfo['gWAT_vol_mean_1e12'][idx]))
gWAT = model_line(model, sex='f', ko_parent='PAT', gWAT_vol_mean_1e12=gWAT_vol_mean_1e12)
plt.plot(gWAT_vol_mean_1e12, gWAT, color='C0', linewidth=3)
# f MAT
idx = (metainfo['sex'] == 'f') * (metainfo['ko_parent'] == 'MAT') * (metainfo['genotype'] == 'KLF14-KO:WT')
gWAT_vol_mean_1e12 = np.linspace(np.min(metainfo['gWAT_vol_mean_1e12'][idx]), np.max(metainfo['gWAT_vol_mean_1e12'][idx]))
plt.scatter(metainfo['gWAT_vol_mean_1e12'][idx], metainfo['gWAT'][idx], label='f MAT WT', color='C2', facecolor='none')
if annotate:
for i in np.where(idx)[0]:
plt.annotate(i, (metainfo['gWAT_vol_mean_1e12'][i], metainfo['gWAT'][i]))
idx = (metainfo['sex'] == 'f') * (metainfo['ko_parent'] == 'MAT') * (metainfo['genotype'] == 'KLF14-KO:Het')
gWAT_vol_mean_1e12 = np.linspace(np.min(metainfo['gWAT_vol_mean_1e12'][idx]), np.max(metainfo['gWAT_vol_mean_1e12'][idx]))
plt.scatter(metainfo['gWAT_vol_mean_1e12'][idx], metainfo['gWAT'][idx], label='f MAT Het', color='C2')
if annotate:
for i in np.where(idx)[0]:
plt.annotate(i, (metainfo['gWAT_vol_mean_1e12'][i], metainfo['gWAT'][i]))
idx = (metainfo['sex'] == 'f') * (metainfo['ko_parent'] == 'MAT')
gWAT_vol_mean_1e12 = np.linspace(np.min(metainfo['gWAT_vol_mean_1e12'][idx]), np.max(metainfo['gWAT_vol_mean_1e12'][idx]))
gWAT = model_line(model, sex='f', ko_parent='MAT', gWAT_vol_mean_1e12=gWAT_vol_mean_1e12)
plt.plot(gWAT_vol_mean_1e12, gWAT, color='C2', linewidth=3)
# m PAT
idx = (metainfo['sex'] == 'm') * (metainfo['ko_parent'] == 'PAT') * (metainfo['genotype'] == 'KLF14-KO:WT')
gWAT_vol_mean_1e12 = np.linspace(np.min(metainfo['gWAT_vol_mean_1e12'][idx]), np.max(metainfo['gWAT_vol_mean_1e12'][idx]))
plt.scatter(metainfo['gWAT_vol_mean_1e12'][idx], metainfo['gWAT'][idx], label='m PAT WT', color='k', facecolor='none')
if annotate:
for i in np.where(idx)[0]:
plt.annotate(i, (metainfo['gWAT_vol_mean_1e12'][i], metainfo['gWAT'][i]))
idx = (metainfo['sex'] == 'm') * (metainfo['ko_parent'] == 'PAT') * (metainfo['genotype'] == 'KLF14-KO:Het')
gWAT_vol_mean_1e12 = np.linspace(np.min(metainfo['gWAT_vol_mean_1e12'][idx]), np.max(metainfo['gWAT_vol_mean_1e12'][idx]))
plt.scatter(metainfo['gWAT_vol_mean_1e12'][idx], metainfo['gWAT'][idx], label='m PAT Het', color='k')
if annotate:
for i in np.where(idx)[0]:
plt.annotate(i, (metainfo['gWAT_vol_mean_1e12'][i], metainfo['gWAT'][i]))
idx = (metainfo['sex'] == 'm') * (metainfo['ko_parent'] == 'PAT')
gWAT_vol_mean_1e12 = np.linspace(np.min(metainfo['gWAT_vol_mean_1e12'][idx]), np.max(metainfo['gWAT_vol_mean_1e12'][idx]))
gWAT = model_line(model, sex='m', ko_parent='PAT', gWAT_vol_mean_1e12=gWAT_vol_mean_1e12)
plt.plot(gWAT_vol_mean_1e12, gWAT, color='k', linewidth=3)
# m MAT
idx = (metainfo['sex'] == 'm') * (metainfo['ko_parent'] == 'MAT') * (metainfo['genotype'] == 'KLF14-KO:WT')
gWAT_vol_mean_1e12 = np.linspace(np.min(metainfo['gWAT_vol_mean_1e12'][idx]), np.max(metainfo['gWAT_vol_mean_1e12'][idx]))
plt.scatter(metainfo['gWAT_vol_mean_1e12'][idx], metainfo['gWAT'][idx], label='m MAT WT', color='C3', facecolor='none')
if annotate:
for i in np.where(idx)[0]:
plt.annotate(i, (metainfo['gWAT_vol_mean_1e12'][i], metainfo['gWAT'][i]))
idx = (metainfo['sex'] == 'm') * (metainfo['ko_parent'] == 'MAT') * (metainfo['genotype'] == 'KLF14-KO:Het')
gWAT_vol_mean_1e12 = np.linspace(np.min(metainfo['gWAT_vol_mean_1e12'][idx]), np.max(metainfo['gWAT_vol_mean_1e12'][idx]))
plt.scatter(metainfo['gWAT_vol_mean_1e12'][idx], metainfo['gWAT'][idx], label='m MAT Het', color='C3')
if annotate:
for i in np.where(idx)[0]:
plt.annotate(i, (metainfo['gWAT_vol_mean_1e12'][i], metainfo['gWAT'][i]))
idx = (metainfo['sex'] == 'm') * (metainfo['ko_parent'] == 'MAT')
gWAT_vol_mean_1e12 = np.linspace(np.min(metainfo['gWAT_vol_mean_1e12'][idx]), np.max(metainfo['gWAT_vol_mean_1e12'][idx]))
gWAT = model_line(model, sex='m', ko_parent='MAT', gWAT_vol_mean_1e12=gWAT_vol_mean_1e12)
plt.plot(gWAT_vol_mean_1e12, gWAT, color='C3', linewidth=3)
plt.legend()
plt.xlabel(r'$\overline{V}_{gWAT}$ (g)', fontsize=14)
plt.ylabel('m$_{gWAT}$ (g)', fontsize=14)
plt.tick_params(labelsize=14)
plt.tight_layout()
plt.savefig(os.path.join(figures_dir, 'klf14_b6ntac_exp_0099_gWAT_model_Vm.png'), bbox_inches='tight')
plt.savefig(os.path.join(figures_dir, 'klf14_b6ntac_exp_0099_gWAT_model_Vm.svg'), bbox_inches='tight')
########################################################################################################################
### Model SC ~ SC_vol_mean_1e12 * C(ko_parent) * C(sex)
### INVALID model, like previous
### NOT USED IN PAPER
########################################################################################################################
idx_not_nan = np.where(~np.isnan(metainfo['SC']) * ~np.isnan(metainfo['gWAT']) * ~np.isnan(metainfo['BW']))[0]
# data that we are going to use
idx_subset = idx_not_nan
model = sm.formula.ols('SC ~ SC_vol_mean_1e12 * C(ko_parent) * C(sex)', data=metainfo, subset=idx_subset).fit()
print(model.summary())
# OLS Regression Results
# ==============================================================================
# Dep. Variable: SC R-squared: 0.362
# Model: OLS Adj. R-squared: 0.294
# Method: Least Squares F-statistic: 5.347
# Date: Thu, 27 Feb 2020 Prob (F-statistic): 7.01e-05
# Time: 16:08:51 Log-Likelihood: -12.523
# No. Observations: 74 AIC: 41.05
# Df Residuals: 66 BIC: 59.48
# Df Model: 7
# Covariance Type: nonrobust
# ====================================================================================================================
# coef std err t P>|t| [0.025 0.975]
# --------------------------------------------------------------------------------------------------------------------
# Intercept 0.0901 0.165 0.546 0.587 -0.239 0.419
# C(ko_parent)[T.MAT] 0.1587 0.218 0.729 0.468 -0.276 0.593
# C(sex)[T.m] -0.1723 0.324 -0.532 0.597 -0.819 0.475
# C(ko_parent)[T.MAT]:C(sex)[T.m] 0.5270 0.455 1.159 0.251 -0.381 1.435
# SC_vol_mean_1e12 4.1312 1.309 3.156 0.002 1.518 6.744
# SC_vol_mean_1e12:C(ko_parent)[T.MAT] -3.4677 1.480 -2.342 0.022 -6.424 -0.512
# SC_vol_mean_1e12:C(sex)[T.m] -0.8534 1.670 -0.511 0.611 -4.189 2.482
# SC_vol_mean_1e12:C(ko_parent)[T.MAT]:C(sex)[T.m] 0.0342 2.143 0.016 0.987 -4.245 4.314
# ==============================================================================
# Omnibus: 22.663 Durbin-Watson: 1.433
# Prob(Omnibus): 0.000 Jarque-Bera (JB): 30.557
# Skew: 1.358 Prob(JB): 2.31e-07
# Kurtosis: 4.593 Cond. No. 119.
# ==============================================================================
# Warnings:
# [1] Standard Errors assume that the covariance matrix of the errors is correctly specified.
# partial regression and influence plots
if DEBUG:
sm.graphics.plot_partregress_grid(model)
sm.graphics.influence_plot(model, criterion="cooks")
def model_line(model, sex, ko_parent, SC_vol_mean_1e12):
sex = np.float(sex == 'm')
ko_parent = np.float(ko_parent == 'MAT')
return model.params['Intercept'] + \
model.params['C(ko_parent)[T.MAT]'] * ko_parent + \
model.params['C(sex)[T.m]'] * sex + \
model.params['C(ko_parent)[T.MAT]:C(sex)[T.m]'] * ko_parent * sex + \
model.params['SC_vol_mean_1e12'] * SC_vol_mean_1e12 + \
model.params['SC_vol_mean_1e12:C(ko_parent)[T.MAT]'] * SC_vol_mean_1e12 * ko_parent + \
model.params['SC_vol_mean_1e12:C(sex)[T.m]'] * SC_vol_mean_1e12 * sex + \
model.params['SC_vol_mean_1e12:C(ko_parent)[T.MAT]:C(sex)[T.m]'] * SC_vol_mean_1e12 * ko_parent * sex
# plot BW as a function of SC
if DEBUG:
annotate = False
plt.clf()
# f PAT
idx = (metainfo['sex'] == 'f') * (metainfo['ko_parent'] == 'PAT') * (metainfo['genotype'] == 'KLF14-KO:WT')
SC_vol_mean_1e12 = np.linspace(np.min(metainfo['SC_vol_mean_1e12'][idx]), np.max(metainfo['SC_vol_mean_1e12'][idx]))
plt.scatter(metainfo['SC_vol_mean_1e12'][idx], metainfo['SC'][idx], label='f PAT WT', color='C0', facecolor='none')
if annotate:
for i in np.where(idx)[0]:
plt.annotate(i, (metainfo['SC_vol_mean_1e12'][i], metainfo['SC'][i]))
idx = (metainfo['sex'] == 'f') * (metainfo['ko_parent'] == 'PAT') * (metainfo['genotype'] == 'KLF14-KO:Het')
SC_vol_mean_1e12 = np.linspace(np.min(metainfo['SC_vol_mean_1e12'][idx]), np.max(metainfo['SC_vol_mean_1e12'][idx]))
plt.scatter(metainfo['SC_vol_mean_1e12'][idx], metainfo['SC'][idx], label='f PAT Het', color='C0')
if annotate:
for i in np.where(idx)[0]:
plt.annotate(i, (metainfo['SC_vol_mean_1e12'][i], metainfo['SC'][i]))
idx = (metainfo['sex'] == 'f') * (metainfo['ko_parent'] == 'PAT')
SC_vol_mean_1e12 = np.linspace(np.min(metainfo['SC_vol_mean_1e12'][idx]), np.max(metainfo['SC_vol_mean_1e12'][idx]))
SC = model_line(model, sex='f', ko_parent='PAT', SC_vol_mean_1e12=SC_vol_mean_1e12)
plt.plot(SC_vol_mean_1e12, SC, color='C0', linewidth=3)
# f MAT
idx = (metainfo['sex'] == 'f') * (metainfo['ko_parent'] == 'MAT') * (metainfo['genotype'] == 'KLF14-KO:WT')
SC_vol_mean_1e12 = np.linspace(np.min(metainfo['SC_vol_mean_1e12'][idx]), np.max(metainfo['SC_vol_mean_1e12'][idx]))
plt.scatter(metainfo['SC_vol_mean_1e12'][idx], metainfo['SC'][idx], label='f MAT WT', color='C2', facecolor='none')
if annotate:
for i in np.where(idx)[0]:
plt.annotate(i, (metainfo['SC_vol_mean_1e12'][i], metainfo['SC'][i]))
idx = (metainfo['sex'] == 'f') * (metainfo['ko_parent'] == 'MAT') * (metainfo['genotype'] == 'KLF14-KO:Het')
SC_vol_mean_1e12 = np.linspace(np.min(metainfo['SC_vol_mean_1e12'][idx]), np.max(metainfo['SC_vol_mean_1e12'][idx]))
plt.scatter(metainfo['SC_vol_mean_1e12'][idx], metainfo['SC'][idx], label='f MAT Het', color='C2')
if annotate:
for i in np.where(idx)[0]:
plt.annotate(i, (metainfo['SC_vol_mean_1e12'][i], metainfo['SC'][i]))
idx = (metainfo['sex'] == 'f') * (metainfo['ko_parent'] == 'MAT')
SC_vol_mean_1e12 = np.linspace(np.min(metainfo['SC_vol_mean_1e12'][idx]), np.max(metainfo['SC_vol_mean_1e12'][idx]))
SC = model_line(model, sex='f', ko_parent='MAT', SC_vol_mean_1e12=SC_vol_mean_1e12)
plt.plot(SC_vol_mean_1e12, SC, color='C2', linewidth=3)
# m PAT
idx = (metainfo['sex'] == 'm') * (metainfo['ko_parent'] == 'PAT') * (metainfo['genotype'] == 'KLF14-KO:WT')
SC_vol_mean_1e12 = np.linspace(np.min(metainfo['SC_vol_mean_1e12'][idx]), np.max(metainfo['SC_vol_mean_1e12'][idx]))
plt.scatter(metainfo['SC_vol_mean_1e12'][idx], metainfo['SC'][idx], label='m PAT WT', color='k', facecolor='none')
if annotate:
for i in np.where(idx)[0]:
plt.annotate(i, (metainfo['SC_vol_mean_1e12'][i], metainfo['SC'][i]))
idx = (metainfo['sex'] == 'm') * (metainfo['ko_parent'] == 'PAT') * (metainfo['genotype'] == 'KLF14-KO:Het')
SC_vol_mean_1e12 = np.linspace(np.min(metainfo['SC_vol_mean_1e12'][idx]), np.max(metainfo['SC_vol_mean_1e12'][idx]))
plt.scatter(metainfo['SC_vol_mean_1e12'][idx], metainfo['SC'][idx], label='m PAT Het', color='k')
if annotate:
for i in
|
np.where(idx)
|
numpy.where
|
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 29 17:23:20 2019
Functions for smoothing generating the pdf, cdf.
@author: Yanlong
"""
from __future__ import unicode_literals
import numpy as np
#import matplotlib as mpl
#mpl.use('Agg')
import matplotlib.pyplot as plt
import matplotlib
matplotlib.rcParams['text.usetex'] = True
matplotlib.rcParams['xtick.direction'] = 'in'
matplotlib.rcParams['ytick.direction'] = 'in'
matplotlib.rcParams['xtick.top'] = True
matplotlib.rcParams['ytick.right'] = True
from scipy.optimize import curve_fit
import sys
import glob
import lmfit
from scipy import signal
from scipy import interpolate
from scipy import special
from scipy import optimize
from scipy import stats
def release_list(a):
del a[:]
del a
def func(x, rho, a, b, rc):
return np.log(np.exp(rho)/(np.exp(x)/np.exp(rc))**a / (1.+np.exp(x)/np.exp(rc))**(b-a))
def func_pw(x, rho, a, b, rc):
return np.log(np.exp(rho)/((np.exp(x)/np.exp(rc))**a + (np.exp(x)/np.exp(rc))**b ) )
def func_cdf(x, rho, a, b, rc):
rc = np.exp(rc)
rho = np.exp(rho)
x = np.exp(x)
x = x/rc
aa = 3-a
bb = 1+a-b
beta = x**aa / aa * special.hyp2f1(aa, 1-bb, aa+1, -x)
try:
res = np.log(4*np.pi*rho*rc**3 * beta)
except:
pass
return res
def func_cdf_inv(frac, rho, a, b, c):
m_h = func_cdf(np.log(1e8), rho, a, b, c) + np.log(frac)
rmin = np.log(1e-10)
rmax = np.log(1e10)
alpha = 0.5
while rmax - rmin > 1e-6:
rmid = alpha*rmax + (1.-alpha)*rmin
if func_cdf(rmid, rho, a, b, c)>m_h:
rmax = rmid
else:
rmin = rmid
rmid = alpha*rmax + (1.-alpha)*rmin
return np.exp(rmid)
def func_cdf_pw(x, rho, a, b, rc):
rc = np.exp(rc)
rho = np.exp(rho)
x = np.exp(x)
x = x/rc
#print(rho, x, rc)
ab = (a-3.) /(a-b)
#print(aa, bb)
hgf = x**(3.-a) / (3.-a) * special.hyp2f1(1, ab, ab+1, -x**(b-a))
return np.log(4*np.pi*rho*rc**3 * hgf)
def func_cdf_pw_inv(frac, rho, a, b, c):
m_h = func_cdf_pw(np.log(1e8), rho, a, b, c) + np.log(frac)
rmin = np.log(1e-10)
rmax = np.log(1e10)
alpha = 0.5
while rmax - rmin > 1e-6:
rmid = alpha*rmax + (1.-alpha)*rmin
if func_cdf_pw(rmid, rho, a, b, c)>m_h:
rmax = rmid
else:
rmin = rmid
rmid = alpha*rmax + (1.-alpha)*rmin
return np.exp(rmid)
def cdf_sample(r_m):
rmin = r_m[3,0] *1.01
rmax = r_m[-1,0] *1.01
n_points = 500
r = np.logspace(np.log10(rmin), np.log10(rmax), num=n_points)
mcum = np.zeros(n_points)
mt = 0.
i = j = 0
for i in range(n_points):
if j>=len(r_m):
mcum[i] = mt
continue
while r_m[j, 0]<r[i]:
mt += r_m[j, 1]
j += 1
if j >=len(r_m):
break
mcum[i] = mt
#print(r[i], r_m[j-1,0], mcum[i])
return np.array(list(zip(r, mcum)))
def pdf_sample(r_m):
rmin = r_m[3,0] *1.01
rmax = r_m[-1,0] *1.01
n_points = 20
r = np.logspace(np.log10(rmin), np.log10(rmax), num=n_points-1)
eta = r[2]/r[1]
dmcum = np.zeros(n_points-1)
i = j = 0
for i in range(n_points-1):
while j < len(r_m):
if r_m[j, 0]<r[i+1]:
dmcum[i]+=r_m[j, 1]
j+=1
continue
else:
break
dmcum[i] /= ((r[i]*eta)**3 - r[i]**3)*4.*np.pi/3.
#print(r[i], r_m[j-1,0], mcum[i])
result = np.array(list(zip(r*np.sqrt(eta), dmcum)))
return result[np.all(result > 1e-3, axis=1)]
def cdf_smooth(raw_cdf):
n_points = 500
x = np.log(raw_cdf[:,0])
y = np.log(raw_cdf[:,1])
#tck = interpolate.splrep(x, y, s=0)
#tck, u = interpolate.splprep([x, y], s=0)
f = interpolate.interp1d(x, y, kind='linear')
xnew = np.linspace(x[0], x[-1], n_points)
#ynew = interpolate.splev(xnew, tck, der=0)
ynew = f(xnew)
#spl = interpolate.UnivariateSpline(xnew, ynew)
#spl.set_smoothing_factor(0.9)
#ynew = spl(xnew)
ynew = signal.savgol_filter(ynew, 349, 2)
return np.array(list(zip(np.exp(xnew), np.exp(ynew))))
def cdf_smooth_cheby(raw_cdf):
n_points = 500
x = np.log(raw_cdf[:,0])
y = np.log(raw_cdf[:,1])
#tck = interpolate.splrep(x, y, s=0)
#tck, u = interpolate.splprep([x, y], s=0)
f = interpolate.interp1d(x, y, kind='linear')
xnew = np.linspace(x[0], x[-1], n_points)
#ynew = interpolate.splev(xnew, tck, der=0)
ynew = f(xnew)
#spl = interpolate.UnivariateSpline(xnew, ynew)
#spl.set_smoothing_factor(0.9)
#ynew = spl(xnew)
#ynew = signal.savgol_filter(ynew, 349, 2)
cheby = np.polynomial.Chebyshev.fit(xnew, ynew, 4)
#y = signal.savgol_filter(y, len(x)//8*2+1, 3)
ynew = cheby(xnew)
return np.array(list(zip(np.exp(xnew), np.exp(ynew))))
def cdf_smooth_mono(raw_cdf):
return
def pdf_cal(cdf):
x = np.log(cdf[:,0])
y = np.log(cdf[:,1])
dydx_log = np.diff(y)/np.diff(x)
z = dydx_log * np.exp(y[:-1])/4./np.pi/(np.exp(x[:-1]))**3
return np.array(list(zip(
|
np.exp(x[:-1])
|
numpy.exp
|
from unittest import TestCase
from tempfile import TemporaryDirectory
from pathlib import Path
from giant.camera_models import PinholeModel, OwenModel, BrownModel, OpenCVModel, save, load
import numpy as np
import giant.rotations as at
import lxml.etree as etree
class TestPinholeModel(TestCase):
def setUp(self):
self.Class = PinholeModel
def test___init__(self):
model = self.Class(intrinsic_matrix=np.array([[1, 2, 3], [4, 5, 6]]), focal_length=10.5, field_of_view=20.5,
use_a_priori=True, misalignment=[np.zeros(3), np.ones(3)],
estimation_parameters='basic intrinsic',
a1=1, a2=2, a3=3)
np.testing.assert_array_equal(model.intrinsic_matrix, [[1, 2, 3], [4, 5, 6]])
self.assertEqual(model.focal_length, 10.5)
self.assertEqual(model.field_of_view, 20.5)
self.assertTrue(model.use_a_priori)
self.assertEqual(model.estimation_parameters, ['basic intrinsic'])
self.assertEqual(model.a1, 1)
self.assertEqual(model.a2, 2)
self.assertEqual(model.a3, 3)
model = self.Class(kx=1, ky=2, px=4, py=5, focal_length=10.5, field_of_view=20.5,
use_a_priori=True, misalignment=[np.zeros(3), np.ones(3)],
estimation_parameters=['focal_length', 'px'])
np.testing.assert_array_equal(model.intrinsic_matrix, [[1, 0, 4], [0, 2, 5]])
self.assertEqual(model.focal_length, 10.5)
self.assertEqual(model.field_of_view, 20.5)
self.assertTrue(model.use_a_priori)
self.assertEqual(model.estimation_parameters, ['focal_length', 'px'])
def test_estimation_parameters(self):
model = self.Class()
model.estimation_parameters = 'kx'
self.assertEqual(model.estimation_parameters, ['kx'])
model.estimate_multiple_misalignments = False
model.estimation_parameters = ['px', 'py', 'Multiple misalignments']
self.assertEqual(model.estimation_parameters, ['px', 'py', 'multiple misalignments'])
self.assertTrue(model.estimate_multiple_misalignments)
def test_kx(self):
model = self.Class(intrinsic_matrix=np.array([[1, 0, 0], [0, 0, 0]]))
self.assertEqual(model.kx, 1)
model.kx = 100
self.assertEqual(model.kx, 100)
self.assertEqual(model.intrinsic_matrix[0, 0], 100)
def test_ky(self):
model = self.Class(intrinsic_matrix=np.array([[0, 0, 0], [0, 3, 0]]))
self.assertEqual(model.ky, 3)
model.ky = 100
self.assertEqual(model.ky, 100)
self.assertEqual(model.intrinsic_matrix[1, 1], 100)
def test_px(self):
model = self.Class(intrinsic_matrix=np.array([[0, 0, 20], [0, 3, 0]]))
self.assertEqual(model.px, 20)
model.px = 100
self.assertEqual(model.px, 100)
self.assertEqual(model.intrinsic_matrix[0, 2], 100)
def test_py(self):
model = self.Class(intrinsic_matrix=np.array([[0, 0, 0], [0, 0, 10]]))
self.assertEqual(model.py, 10)
model.py = 100
self.assertEqual(model.py, 100)
self.assertEqual(model.intrinsic_matrix[1, 2], 100)
def test_a1(self):
model = self.Class(temperature_coefficients=np.array([10, 0, 0]))
self.assertEqual(model.a1, 10)
model.a1 = 100
self.assertEqual(model.a1, 100)
self.assertEqual(model.temperature_coefficients[0], 100)
def test_a2(self):
model = self.Class(temperature_coefficients=np.array([0, 10, 0]))
self.assertEqual(model.a2, 10)
model.a2 = 100
self.assertEqual(model.a2, 100)
self.assertEqual(model.temperature_coefficients[1], 100)
def test_a3(self):
model = self.Class(temperature_coefficients=np.array([0, 0, 10]))
self.assertEqual(model.a3, 10)
model.a3 = 100
self.assertEqual(model.a3, 100)
self.assertEqual(model.temperature_coefficients[2], 100)
def test_intrinsic_matrix_inv(self):
model = self.Class(kx=5, ky=10, px=100, py=-5)
np.testing.assert_array_almost_equal(model.intrinsic_matrix @ np.vstack([model.intrinsic_matrix_inv,
[0, 0, 1]]),
[[1, 0, 0], [0, 1, 0]])
np.testing.assert_array_almost_equal(model.intrinsic_matrix_inv @ np.vstack([model.intrinsic_matrix,
[0, 0, 1]]),
[[1, 0, 0], [0, 1, 0]])
def test_get_temperature_scale(self):
model = self.Class(temperature_coefficients=[1, 2, 3.])
self.assertEqual(model.get_temperature_scale(1), 7)
np.testing.assert_array_equal(model.get_temperature_scale([1, 2]), [7, 35])
np.testing.assert_array_equal(model.get_temperature_scale([-1, 2.]), [-1, 35])
def test_apply_distortion(self):
inputs = [[0, 0], [1, 0], [-1, 0], [1.5, 0], [-1.5, 0], [[1.5], [0]], [[1.5, -1], [0, 0]],
[0, 1], [0, -1], [0, 1.5], [0, -1.5], [[0], [1.5]], [[0, 0], [1.5, -1]], [1, 1]]
model = self.Class()
for inp in inputs:
gnom_dist = model.apply_distortion(np.array(inp))
np.testing.assert_array_almost_equal(gnom_dist, inp)
def test_get_projections(self):
points = [[0, 0, 1], [-0.1, 0.2, 2.2], [[-0.1], [0.2], [2.2]], [[-0.1, 0], [0.2, 0], [2.2, 1]]]
model = self.Class(focal_length=8.7, kx=500, ky=500.5, px=1500, py=1500.5, a1=1, a2=2, a3=3)
with self.subTest(misalignment=None):
for point in points:
gnom, _, pix = model.get_projections(point)
gnom_true = model.focal_length * np.array(point[:2]) / point[2]
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], gnom_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_equal(gnom, gnom_true)
np.testing.assert_array_equal(pix, pix_true)
with self.subTest(temperature=1):
for point in points:
gnom, _, pix = model.get_projections(point, temperature=1)
gnom_true = model.focal_length * np.array(point[:2]) / point[2]
gnom_true *= model.get_temperature_scale(1)
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], gnom_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_equal(gnom, gnom_true)
np.testing.assert_array_equal(pix, pix_true)
with self.subTest(temperature=-10.5):
for point in points:
gnom, _, pix = model.get_projections(point, temperature=-10.5)
gnom_true = model.focal_length * np.array(point[:2]) / point[2]
gnom_true *= model.get_temperature_scale(-10.5)
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], gnom_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_equal(gnom, gnom_true)
np.testing.assert_array_equal(pix, pix_true)
model = self.Class(focal_length=8.7, kx=500, ky=500.5, px=1500, py=1500.5,
misalignment=[0, 0, np.pi])
with self.subTest(misalignment=[0, 0, np.pi]):
for point in points:
gnom, _, pix = model.get_projections(point)
gnom_true = -model.focal_length * np.array(point[:2]) / point[2]
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], gnom_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(gnom, gnom_true)
np.testing.assert_array_almost_equal(pix, pix_true)
model = self.Class(focal_length=8.7, kx=500, ky=500.5, px=1500, py=1500.5,
misalignment=[np.pi, 0, 0])
with self.subTest(misalignment=[np.pi, 0, 0]):
for point in points:
gnom, _, pix = model.get_projections(point)
gnom_true = model.focal_length * np.array(point[:2]) / point[2]
gnom_true[0] *= -1
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], gnom_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(gnom, gnom_true)
np.testing.assert_array_almost_equal(pix, pix_true)
model = self.Class(focal_length=8.7, kx=500, ky=500.5, px=1500, py=1500.5,
misalignment=[0, np.pi, 0])
with self.subTest(misalignment=[0, np.pi, 0]):
for point in points:
gnom, _, pix = model.get_projections(point)
gnom_true = model.focal_length * np.array(point[:2]) / point[2]
gnom_true[1] *= -1
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], gnom_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(gnom, gnom_true)
np.testing.assert_array_almost_equal(pix, pix_true)
model = self.Class(focal_length=8.7, kx=500, ky=500.5, px=1500, py=1500.5,
misalignment=[1, 0.2, 0.3])
with self.subTest(misalignment=[1, 0.2, 0.3]):
rot_mat = at.rotvec_to_rotmat([1, 0.2, 0.3]).squeeze()
for point in points:
point_new = rot_mat @ point
gnom, _, pix = model.get_projections(point)
gnom_true = model.focal_length * np.array(point_new[:2]) / np.array(point_new[2])
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], gnom_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(gnom, gnom_true)
np.testing.assert_array_almost_equal(pix, pix_true)
model = self.Class(focal_length=8.7, kx=500, ky=500.5, px=1500, py=1500.5,
misalignment=[[1, 0.2, 0.3], [0, 0, np.pi]])
model.estimate_multiple_misalignments = True
with self.subTest(misalignment=[[1, 0.2, 0.3], [0, 0, np.pi]]):
for point in points:
rot_mat = at.rotvec_to_rotmat([1, 0.2, 0.3]).squeeze()
point_new = rot_mat @ point
gnom, _, pix = model.get_projections(point, image=0)
gnom_true = model.focal_length * np.array(point_new[:2]) / np.array(point_new[2])
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], gnom_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(gnom, gnom_true)
np.testing.assert_array_almost_equal(pix, pix_true)
gnom, _, pix = model.get_projections(point, image=1)
gnom_true = -model.focal_length * np.array(point[:2]) / point[2]
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], gnom_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(gnom, gnom_true)
np.testing.assert_array_almost_equal(pix, pix_true)
def test_project_onto_image(self):
points = [[0, 0, 1], [-0.1, 0.2, 2.2], [[-0.1], [0.2], [2.2]], [[-0.1, 0], [0.2, 0], [2.2, 1]]]
model = self.Class(focal_length=8.7, kx=500, ky=500.5, px=1500, py=1500.5, a1=-1e-3, a2=1e-6, a3=-7e-8)
with self.subTest(misalignment=None):
for point in points:
pix = model.project_onto_image(point)
gnom_true = model.focal_length * np.array(point[:2]) / point[2]
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], gnom_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_equal(pix, pix_true)
with self.subTest(temperature=1):
for point in points:
pix = model.project_onto_image(point, temperature=1)
gnom_true = model.focal_length * np.array(point[:2]) / point[2]
gnom_true *= model.get_temperature_scale(1)
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], gnom_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_equal(pix, pix_true)
with self.subTest(temperature=-10.5):
for point in points:
pix = model.project_onto_image(point, temperature=-10.5)
gnom_true = model.focal_length * np.array(point[:2]) / point[2]
gnom_true *= model.get_temperature_scale(-10.5)
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], gnom_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_equal(pix, pix_true)
model = self.Class(focal_length=8.7, kx=500, ky=500.5, px=1500, py=1500.5,
misalignment=[0, 0, np.pi])
with self.subTest(misalignment=[0, 0, np.pi]):
for point in points:
pix = model.project_onto_image(point)
gnom_true = -model.focal_length * np.array(point[:2]) / point[2]
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], gnom_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(pix, pix_true)
model = self.Class(focal_length=8.7, kx=500, ky=500.5, px=1500, py=1500.5,
misalignment=[np.pi, 0, 0])
with self.subTest(misalignment=[np.pi, 0, 0]):
for point in points:
pix = model.project_onto_image(point)
gnom_true = model.focal_length * np.array(point[:2]) / point[2]
gnom_true[0] *= -1
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], gnom_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(pix, pix_true)
model = self.Class(focal_length=8.7, kx=500, ky=500.5, px=1500, py=1500.5,
misalignment=[0, np.pi, 0])
with self.subTest(misalignment=[0, np.pi, 0]):
for point in points:
pix = model.project_onto_image(point)
gnom_true = model.focal_length * np.array(point[:2]) / point[2]
gnom_true[1] *= -1
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], gnom_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(pix, pix_true)
model = self.Class(focal_length=8.7, kx=500, ky=500.5, px=1500, py=1500.5,
misalignment=[1, 0.2, 0.3])
with self.subTest(misalignment=[1, 0.2, 0.3]):
rot_mat = at.rotvec_to_rotmat([1, 0.2, 0.3]).squeeze()
for point in points:
point_new = rot_mat @ point
pix = model.project_onto_image(point)
gnom_true = model.focal_length * np.array(point_new[:2]) / np.array(point_new[2])
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], gnom_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(pix, pix_true)
model = self.Class(focal_length=8.7, kx=500, ky=500.5, px=1500, py=1500.5,
misalignment=[[1, 0.2, 0.3], [0, 0, np.pi]])
model.estimate_multiple_misalignments = True
with self.subTest(misalignment=[[1, 0.2, 0.3], [0, 0, np.pi]]):
for point in points:
rot_mat = at.rotvec_to_rotmat([1, 0.2, 0.3]).squeeze()
point_new = rot_mat @ point
pix = model.project_onto_image(point, image=0)
gnom_true = model.focal_length * np.array(point_new[:2]) / np.array(point_new[2])
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], gnom_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(pix, pix_true)
pix = model.project_onto_image(point, image=1)
gnom_true = -model.focal_length * np.array(point[:2]) / point[2]
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], gnom_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(pix, pix_true)
def test_compute_pixel_jacobian(self):
def num_deriv(uvec, cmodel, delta=1e-8, image=0, temperature=0) -> np.ndarray:
uvec = np.array(uvec).reshape(3, -1)
pix_true = cmodel.project_onto_image(uvec, image=image, temperature=temperature)
uvec_pert = uvec + [[delta], [0], [0]]
pix_pert_x_f = cmodel.project_onto_image(uvec_pert, image=image, temperature=temperature)
uvec_pert = uvec + [[0], [delta], [0]]
pix_pert_y_f = cmodel.project_onto_image(uvec_pert, image=image, temperature=temperature)
uvec_pert = uvec + [[0], [0], [delta]]
pix_pert_z_f = cmodel.project_onto_image(uvec_pert, image=image, temperature=temperature)
uvec_pert = uvec - [[delta], [0], [0]]
pix_pert_x_b = cmodel.project_onto_image(uvec_pert, image=image, temperature=temperature)
uvec_pert = uvec - [[0], [delta], [0]]
pix_pert_y_b = cmodel.project_onto_image(uvec_pert, image=image, temperature=temperature)
uvec_pert = uvec - [[0], [0], [delta]]
pix_pert_z_b = cmodel.project_onto_image(uvec_pert, image=image, temperature=temperature)
return np.array([(pix_pert_x_f-pix_pert_x_b)/(2*delta),
(pix_pert_y_f-pix_pert_y_b)/(2*delta),
(pix_pert_z_f-pix_pert_z_b)/(2*delta)]).swapaxes(0, -1)
model_param = {"focal_length": 100.75, "kx": 30, "ky": 40,
"px": 4005.23, "py": 4005.23,
"misalignment": [[1e-8, 1e-9, 1e-10], [-1e-8, 2e-9, -1e-11], [2e-10, -5e-12, 1e-9]],
"a1": 1e-6, "a2": 2e-7, "a3": 3e-8}
inputs = [np.array([[0.5, 0, 1]]).T,
np.array([[0, 0.5, 1], [0.5, 0.5, 1], [-0.5, 0, 1]]).T,
np.array([[0.1, -0.5, 1], [-0.5, -0.5, 1], [5, 10, 1000.23], [1, 2, 1200.23]]).T]
temperatures = [0, 1, -1, 10.5, -10.5]
model = self.Class(**model_param)
model.estimate_multiple_misalignments = True
for temp in temperatures:
for input in inputs:
for image in range(3):
with self.subTest(image=image, temp=temp, input=input):
jac_ana = model.compute_pixel_jacobian(input, image=image, temperature=temp)
jac_num = num_deriv(input, model, image=image, temperature=temp, delta=1e-2)
np.testing.assert_allclose(jac_ana, jac_num, rtol=1e-3, atol=1e-10)
def test__compute_dcamera_point_dgnomic(self):
def num_deriv(gnomic_locations, cmodel, delta=1e-6) -> np.ndarray:
def g2u(g):
v = np.vstack([g, cmodel.focal_length*np.ones(g.shape[-1])])
return v/np.linalg.norm(v, axis=0, keepdims=True)
gnomic_locations = np.asarray(gnomic_locations).reshape(2, -1)
gnom_pert = gnomic_locations + [[delta], [0]]
cam_loc_pert_x_f = g2u(gnom_pert)
gnom_pert = gnomic_locations + [[0], [delta]]
cam_loc_pert_y_f = g2u(gnom_pert)
gnom_pert = gnomic_locations - [[delta], [0]]
cam_loc_pert_x_b = g2u(gnom_pert)
gnom_pert = gnomic_locations - [[0], [delta]]
cam_loc_pert_y_b = g2u(gnom_pert)
return np.array([(cam_loc_pert_x_f -cam_loc_pert_x_b)/(2*delta),
(cam_loc_pert_y_f -cam_loc_pert_y_b)/(2*delta)]).swapaxes(0, -1)
model_param = {"focal_length": 100.75, "kx": 300, "ky": 400,
"px": 1005.23, "py": 1005.23,
"misalignment": [[1e-8, 1e-9, 1e-10], [-1e-8, 2e-9, -1e-11], [2e-10, -5e-12, 1e-9]],
"a1": 1e-6, "a2": 2e-7, "a3": 3e-8}
inputs = [np.array([[0, 0]]).T,
np.array([[0, 2000], [2000, 0], [2000, 2000]]).T,
np.array([[1000, 1000], [1000, 2000], [2000, 1000], [0, 1000], [1000, 0]]).T]
model = self.Class(**model_param)
model.estimate_multiple_misalignments = True
for input in inputs:
with self.subTest(input=input):
jac_ana = []
for gnom in input.T:
jac_ana.append(
model._compute_dcamera_point_dgnomic(gnom, np.sqrt(np.sum(gnom*gnom) + model.focal_length**2)))
jac_ana = np.array(jac_ana)
jac_num = num_deriv(input, model)
np.testing.assert_almost_equal(jac_ana, jac_num)
def test__compute_dgnomic_ddist_gnomic(self):
def num_deriv(dist_gnomic_locations, cmodel, delta=1e-6) -> np.ndarray:
def dg2g(dg):
gnomic_guess = dg.copy()
# perform the fpa
for _ in np.arange(20):
# get the distorted location assuming the current guess is correct
gnomic_guess_distorted = cmodel.apply_distortion(gnomic_guess)
# subtract off the residual distortion from the gnomic guess
gnomic_guess += dg - gnomic_guess_distorted
# check for convergence
if np.all(np.linalg.norm(gnomic_guess_distorted - dg, axis=0) <= 1e-15):
break
return gnomic_guess
dist_gnomic_locations = np.asarray(dist_gnomic_locations).reshape(2, -1)
dist_gnom_pert = dist_gnomic_locations + [[delta], [0]]
gnom_loc_pert_x_f = dg2g(dist_gnom_pert)
dist_gnom_pert = dist_gnomic_locations + [[0], [delta]]
gnom_loc_pert_y_f = dg2g(dist_gnom_pert)
dist_gnom_pert = dist_gnomic_locations - [[delta], [0]]
gnom_loc_pert_x_b = dg2g(dist_gnom_pert)
dist_gnom_pert = dist_gnomic_locations - [[0], [delta]]
gnom_loc_pert_y_b = dg2g(dist_gnom_pert)
return np.array([(gnom_loc_pert_x_f - gnom_loc_pert_x_b)/(2*delta),
(gnom_loc_pert_y_f - gnom_loc_pert_y_b)/(2*delta)]).swapaxes(0, -1)
model_param = {"focal_length": 100.75, "kx": 300, "ky": 400,
"px": 1005.23, "py": 1005.23,
"misalignment": [[1e-8, 1e-9, 1e-10], [-1e-8, 2e-9, -1e-11], [2e-10, -5e-12, 1e-9]],
"a1": 1e-6, "a2": 2e-7, "a3": 3e-8}
inputs = [np.array([[0, 0]]).T,
np.array([[0, 0.1], [0.1, 0], [0.1, 0.1]]).T,
np.array([[-0.1, 0], [0, -0.1], [-0.1, -0.1], [0.1, -0.1], [-0.1, 0.1]]).T]
model = self.Class(**model_param)
model.estimate_multiple_misalignments = True
for input in inputs:
with self.subTest(input=input):
jac_ana = []
for dist_gnom in input.T:
jac_ana.append(model._compute_dgnomic_ddist_gnomic(dist_gnom))
jac_ana = np.array(jac_ana)
jac_num = num_deriv(input, model)
np.testing.assert_almost_equal(jac_ana, jac_num)
def test_compute_unit_vector_jacobian(self):
def num_deriv(pixels, cmodel, delta=1e-6, image=0, temperature=0) -> np.ndarray:
pixels = np.array(pixels).reshape(2, -1)
pix_pert = pixels + [[delta], [0]]
uvec_pert_x_f = cmodel.pixels_to_unit(pix_pert, image=image, temperature=temperature)
pix_pert = pixels + [[0], [delta]]
uvec_pert_y_f = cmodel.pixels_to_unit(pix_pert, image=image, temperature=temperature)
pix_pert = pixels - [[delta], [0]]
uvec_pert_x_b = cmodel.pixels_to_unit(pix_pert, image=image, temperature=temperature)
pix_pert = pixels - [[0], [delta]]
uvec_pert_y_b = cmodel.pixels_to_unit(pix_pert, image=image, temperature=temperature)
return np.array([(uvec_pert_x_f-uvec_pert_x_b)/(2*delta),
(uvec_pert_y_f-uvec_pert_y_b)/(2*delta)]).swapaxes(0, -1)
model_param = {"focal_length": 100.75, "kx": 300, "ky": 400,
"px": 1005.23, "py": 1005.23,
"misalignment": [[1e-8, 1e-9, 1e-10], [-1e-8, 2e-9, -1e-11], [2e-10, -5e-12, 1e-9]],
"a1": 1e-6, "a2": 2e-7, "a3": 3e-8}
inputs = [np.array([[0, 0]]).T,
np.array([[0, 2000], [2000, 0], [2000, 2000]]).T,
np.array([[1000, 1000], [1000, 2000], [2000, 1000], [0, 1000], [1000, 0]]).T]
temperatures = [0, 1, -1, 10.5, -10.5]
model = self.Class(**model_param)
model.estimate_multiple_misalignments = True
for temp in temperatures:
for input in inputs:
for image in range(3):
with self.subTest(image=image, temp=temp, input=input):
jac_ana = model.compute_unit_vector_jacobian(input, image=image, temperature=temp)
jac_num = num_deriv(input, model, image=image, temperature=temp, delta=1e-2)
np.testing.assert_allclose(jac_ana, jac_num, rtol=1e-3, atol=1e-10)
def test__compute_dcamera_point_dmisalignment(self):
def num_deriv(loc, dtheta, delta=1e-10) -> np.ndarray:
mis_pert = at.rotvec_to_rotmat(np.array(dtheta) + [delta, 0, 0]).squeeze()
point_pert_x_f = mis_pert @ loc
mis_pert = at.rotvec_to_rotmat(np.array(dtheta) + [0, delta, 0]).squeeze()
point_pert_y_f = mis_pert @ loc
mis_pert = at.rotvec_to_rotmat(np.array(dtheta) + [0, 0, delta]).squeeze()
point_pert_z_f = mis_pert @ loc
mis_pert = at.rotvec_to_rotmat(np.array(dtheta) - [delta, 0, 0]).squeeze()
point_pert_x_b = mis_pert @ loc
mis_pert = at.rotvec_to_rotmat(np.array(dtheta) - [0, delta, 0]).squeeze()
point_pert_y_b = mis_pert @ loc
mis_pert = at.rotvec_to_rotmat(np.array(dtheta) - [0, 0, delta]).squeeze()
point_pert_z_b = mis_pert @ loc
return np.array([(point_pert_x_f - point_pert_x_b) / (2 * delta),
(point_pert_y_f - point_pert_y_b) / (2 * delta),
(point_pert_z_f - point_pert_z_b) / (2 * delta)]).T
inputs = [[1, 0, 0], [0, 1, 0], [0, 0, 1], [np.sqrt(3), np.sqrt(3), np.sqrt(3)],
[-1, 0, 0], [0, -1, 0], [0, 0, -1], [-np.sqrt(3), -np.sqrt(3), -np.sqrt(3)],
[1, 0, 100], [0, 0.5, 1]]
misalignment = [[1e-8, 0, 0], [0, 1e-8, 0], [0, 0, 1e-8], [1e-9, 1e-9, 1e-9],
[-1e-8, 0, 0], [0, -1e-8, 0], [0, 0, -1e-8], [-1e-9, -1e-9, -1e-9],
[1e-9, 2.3e-9, -0.5e-9]]
for mis in misalignment:
with self.subTest(misalignment=mis):
for inp in inputs:
num = num_deriv(inp, mis)
# noinspection PyTypeChecker
ana = self.Class._compute_dcamera_point_dmisalignment(inp)
np.testing.assert_allclose(num, ana, atol=1e-10, rtol=1e-4)
def test__compute_dpixel_ddistorted_gnomic(self):
def num_deriv(loc, cmodel, delta=1e-8, temperature=0) -> np.ndarray:
loc_pert = np.array(loc) + [delta, 0]
loc_pert *= cmodel.get_temperature_scale(temperature)
pix_pert_x_f = cmodel.intrinsic_matrix[:, :2] @ loc_pert + cmodel.intrinsic_matrix[:, 2]
loc_pert = np.array(loc) + [0, delta]
loc_pert *= cmodel.get_temperature_scale(temperature)
pix_pert_y_f = cmodel.intrinsic_matrix[:, :2] @ loc_pert + cmodel.intrinsic_matrix[:, 2]
loc_pert = np.array(loc) - [delta, 0]
loc_pert *= cmodel.get_temperature_scale(temperature)
pix_pert_x_b = cmodel.intrinsic_matrix[:, :2] @ loc_pert + cmodel.intrinsic_matrix[:, 2]
loc_pert = np.array(loc) - [0, delta]
loc_pert *= cmodel.get_temperature_scale(temperature)
pix_pert_y_b = cmodel.intrinsic_matrix[:, :2] @ loc_pert + cmodel.intrinsic_matrix[:, 2]
return np.array(
[(pix_pert_x_f - pix_pert_x_b) / (2 * delta), (pix_pert_y_f - pix_pert_y_b) / (2 * delta)]).T
intrins_coefs = [{"kx": 1.5, "ky": 0, "px": 0, "py": 0, 'a1': 0, 'a2': 0, 'a3': 0},
{"kx": 0, "ky": 1.5, "px": 0, "py": 0, 'a1': 0, 'a2': 0, 'a3': 0},
{"kx": 0, "ky": 0, "px": 1.5, "py": 0, 'a1': 0, 'a2': 0, 'a3': 0},
{"kx": 0, "ky": 0, "px": 0, "py": 1.5, 'a1': 0, 'a2': 0, 'a3': 0},
{"kx": 0, "ky": 0, "px": 0, "py": 0, 'a1': 1.5, 'a2': 0, 'a3': 0},
{"kx": 0, "ky": 0, "px": 0, "py": 0, 'a1': 0, 'a2': 1.5, 'a3': 0},
{"kx": 0, "ky": 0, "px": 0, "py": 0, 'a1': 0, 'a2': 0, 'a3': 1.5},
{"kx": 1.5, "ky": 1.5, "px": 1.5, "py": 1.5, 'a1': 1.5, 'a2': 1.5, 'a3': 1.5}]
inputs = [[1e-6, 1e-6], [1, 0], [-1, 0], [1.5, 0], [-1.5, 0], [0, 1], [0, -1], [0, 1.5], [0, -1.5], [1, 1]]
temperatures = [0, 1, -1, 10.5, -10.5]
for intrins_coef in intrins_coefs:
model = self.Class(**intrins_coef)
for temp in temperatures:
with self.subTest(temp=temp, **intrins_coef):
for inp in inputs:
num = num_deriv(inp, model, temperature=temp)
ana = model._compute_dpixel_ddistorted_gnomic(temperature=temp)
np.testing.assert_allclose(num, ana, atol=1e-10)
def test__compute_dgnomic_dcamera_point(self):
def num_deriv(loc, cmodel, delta=1e-8) -> np.ndarray:
loc_pert = np.array(loc) + [delta, 0, 0]
gnom_pert_x_f = cmodel.get_projections(loc_pert)[0]
loc_pert = np.array(loc) + [0, delta, 0]
gnom_pert_y_f = cmodel.get_projections(loc_pert)[0]
loc_pert = np.array(loc) + [0, 0, delta]
gnom_pert_z_f = cmodel.get_projections(loc_pert)[0]
loc_pert = np.array(loc) - [delta, 0, 0]
gnom_pert_x_b = cmodel.get_projections(loc_pert)[0]
loc_pert = np.array(loc) - [0, delta, 0]
gnom_pert_y_b = cmodel.get_projections(loc_pert)[0]
loc_pert = np.array(loc) - [0, 0, delta]
gnom_pert_z_b = cmodel.get_projections(loc_pert)[0]
return np.array([(gnom_pert_x_f - gnom_pert_x_b) / (2 * delta),
(gnom_pert_y_f - gnom_pert_y_b) / (2 * delta),
(gnom_pert_z_f - gnom_pert_z_b) / (2 * delta)]).T
intrins_coefs = [{"focal_length": 1},
{"focal_length": 2.5},
{"focal_length": 1000},
{"focal_length": 0.1}]
inputs = [[0, 0, 1], [0.5, 0, 1], [0, 0.5, 1], [0.5, 0.5, 1], [-0.5, 0, 1], [0, -0.5, 1], [-0.5, -0.5, 1],
[5, 10, 1000.23], [0.5, 1e-14, 1]]
for intrins_coef in intrins_coefs:
model = self.Class(**intrins_coef)
with self.subTest(**intrins_coef):
for inp in inputs:
num = num_deriv(inp, model)
ana = model._compute_dgnomic_dcamera_point(np.array(inp))
np.testing.assert_allclose(num, ana, atol=1e-9, rtol=1e-5)
def test__compute_dgnomic_dfocal_length(self):
def num_deriv(loc, cmodel, delta=1e-8) -> np.ndarray:
model_pert = cmodel.copy()
model_pert.focal_length += delta
gnom_pert_f = model_pert.get_projections(loc)[0]
model_pert = cmodel.copy()
model_pert.focal_length -= delta
gnom_pert_b = model_pert.get_projections(loc)[0]
# noinspection PyTypeChecker
return np.asarray((gnom_pert_f - gnom_pert_b) / (2 * delta))
intrins_coefs = [{"focal_length": 1},
{"focal_length": 2.5},
{"focal_length": 1000},
{"focal_length": 0.1}]
inputs = [[0, 0, 1], [0.5, 0, 1], [0, 0.5, 1], [0.5, 0.5, 1], [-0.5, 0, 1], [0, -0.5, 1], [-0.5, -0.5, 1]]
for intrins_coef in intrins_coefs:
model = self.Class(**intrins_coef)
with self.subTest(**intrins_coef):
for inp in inputs:
num = num_deriv(inp, model)
ana = model._compute_dgnomic_dfocal_length(np.array(inp))
np.testing.assert_allclose(num, ana, atol=1e-10, rtol=1e-5)
def test__compute_dpixel_dintrinsic(self):
def num_deriv(loc, cmodel, delta=1e-6) -> np.ndarray:
model_pert = cmodel.copy()
model_pert.kx += delta
pix_pert_kx_f = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.ky += delta
pix_pert_ky_f = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.px += delta
pix_pert_px_f = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.py += delta
pix_pert_py_f = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.kx -= delta
pix_pert_kx_b = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.ky -= delta
pix_pert_ky_b = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.px -= delta
pix_pert_px_b = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.py -= delta
pix_pert_py_b = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
return np.array([(pix_pert_kx_f - pix_pert_kx_b) / (2 * delta),
(pix_pert_ky_f - pix_pert_ky_b) / (2 * delta),
(pix_pert_px_f - pix_pert_px_b) / (2 * delta),
(pix_pert_py_f - pix_pert_py_b) / (2 * delta)]).T
intrins_coefs = [{"kx": 1.5, "ky": 0, "px": 0, "py": 0},
{"kx": 0, "ky": 1.5, "px": 0, "py": 0},
{"kx": 0, "ky": 0, "px": 1.5, "py": 0},
{"kx": 0, "ky": 0, "px": 0, "py": 1.5},
{"kx": 1.5, "ky": 1.5, "px": 1.5, "py": 1.5}]
inputs = [[1e-6, 1e-6], [1, 0], [-1, 0], [1.5, 0], [-1.5, 0], [0, 1], [0, -1], [0, 1.5], [0, -1.5], [1, 1]]
for intrins_coef in intrins_coefs:
model = self.Class(**intrins_coef)
with self.subTest(**intrins_coef):
for inp in inputs:
num = num_deriv(inp, model)
ana = model._compute_dpixel_dintrinsic(np.array(inp))
np.testing.assert_allclose(num, ana, atol=1e-10)
def test__compute_dpixel_dtemperature_coeffs(self):
def num_deriv(loc, cmodel, delta=1e-6, temperature=0) -> np.ndarray:
loc = np.array(loc)
model_pert = cmodel.copy()
model_pert.a1 += delta
loc_copy = loc * model_pert.get_temperature_scale(temperature)
pix_pert_a1_f = model_pert.intrinsic_matrix[:, :2] @ loc_copy + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.a2 += delta
loc_copy = loc * model_pert.get_temperature_scale(temperature)
pix_pert_a2_f = model_pert.intrinsic_matrix[:, :2] @ loc_copy + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.a3 += delta
loc_copy = loc * model_pert.get_temperature_scale(temperature)
pix_pert_a3_f = model_pert.intrinsic_matrix[:, :2] @ loc_copy + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.a1 -= delta
loc_copy = loc * model_pert.get_temperature_scale(temperature)
pix_pert_a1_b = model_pert.intrinsic_matrix[:, :2] @ loc_copy + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.a2 -= delta
loc_copy = loc * model_pert.get_temperature_scale(temperature)
pix_pert_a2_b = model_pert.intrinsic_matrix[:, :2] @ loc_copy + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.a3 -= delta
loc_copy = loc * model_pert.get_temperature_scale(temperature)
pix_pert_a3_b = model_pert.intrinsic_matrix[:, :2] @ loc_copy + model_pert.intrinsic_matrix[:, 2]
return np.array([(pix_pert_a1_f - pix_pert_a1_b) / (2 * delta),
(pix_pert_a2_f - pix_pert_a2_b) / (2 * delta),
(pix_pert_a3_f - pix_pert_a3_b) / (2 * delta)]).T
intrins_coefs = [{"kx": 1.5, "ky": 0, "px": 0, "py": 0, 'a1': 0, 'a2': 0, 'a3': 0},
{"kx": 0, "ky": 1.5, "px": 0, "py": 0, 'a1': 0, 'a2': 0, 'a3': 0},
{"kx": 0, "ky": 0, "px": 1.5, "py": 0, 'a1': 0, 'a2': 0, 'a3': 0},
{"kx": 0, "ky": 0, "px": 0, "py": 1.5, 'a1': 0, 'a2': 0, 'a3': 0},
{"kx": 0, "ky": 0, "px": 0, "py": 0, 'a1': 1.5, 'a2': 0, 'a3': 0},
{"kx": 0, "ky": 0, "px": 0, "py": 0, 'a1': 0, 'a2': 1.5, 'a3': 0},
{"kx": 0, "ky": 0, "px": 0, "py": 0, 'a1': 0, 'a2': 0, 'a3': 1.5},
{"kx": 1.5, "ky": 1.5, "px": 1.5, "py": 1.5, 'a1': 1.5, 'a2': 1.5, 'a3': 1.5}]
inputs = [[1e-6, 1e-6], [1, 0], [-1, 0], [1.5, 0], [-1.5, 0], [0, 1], [0, -1], [0, 1.5], [0, -1.5], [1, 1]]
temperatures = [0, 1, -1, -10.5, 10.5]
for intrins_coef in intrins_coefs:
model = self.Class(**intrins_coef)
for temp in temperatures:
with self.subTest(temp=temp, **intrins_coef):
for inp in inputs:
num = num_deriv(inp, model, temperature=temp)
ana = model._compute_dpixel_dtemperature_coeffs(inp, temperature=temp)
np.testing.assert_allclose(num, ana, atol=1e-10)
def test_get_jacobian_row(self):
def num_deriv(loc, cmodel, delta=1e-8, image=0, temperature=0) -> np.ndarray:
model_pert = cmodel.copy()
model_pert.focal_length += delta
pix_pert_f_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.focal_length -= delta
pix_pert_f_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.kx += delta
pix_pert_kx_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.ky += delta
pix_pert_ky_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.px += delta
pix_pert_px_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.py += delta
pix_pert_py_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.kx -= delta
pix_pert_kx_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.ky -= delta
pix_pert_ky_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.px -= delta
pix_pert_px_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.py -= delta
pix_pert_py_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a1 += delta
pix_pert_a1_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a2 += delta
pix_pert_a2_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a3 += delta
pix_pert_a3_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a1 -= delta
pix_pert_a1_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a2 -= delta
pix_pert_a2_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a3 -= delta
pix_pert_a3_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
delta_misalignment = 1e-6
model_pert = cmodel.copy()
model_pert.misalignment[image][0] += delta_misalignment
pix_pert_mx_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][1] += delta_misalignment
pix_pert_my_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][2] += delta_misalignment
pix_pert_mz_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][0] -= delta_misalignment
pix_pert_mx_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][1] -= delta_misalignment
pix_pert_my_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][2] -= delta_misalignment
pix_pert_mz_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
return np.vstack([(pix_pert_f_f - pix_pert_f_b) / (delta * 2),
(pix_pert_kx_f - pix_pert_kx_b) / (delta * 2),
(pix_pert_ky_f - pix_pert_ky_b) / (delta * 2),
(pix_pert_px_f - pix_pert_px_b) / (delta * 2),
(pix_pert_py_f - pix_pert_py_b) / (delta * 2),
(pix_pert_a1_f - pix_pert_a1_b) / (delta * 2),
(pix_pert_a2_f - pix_pert_a2_b) / (delta * 2),
(pix_pert_a3_f - pix_pert_a3_b) / (delta * 2),
np.zeros((image * 3, 2)),
(pix_pert_mx_f - pix_pert_mx_b) / (delta_misalignment * 2),
(pix_pert_my_f - pix_pert_my_b) / (delta_misalignment * 2),
(pix_pert_mz_f - pix_pert_mz_b) / (delta_misalignment * 2)]).T
# TODO: investigate why this fails with slightly larger misalignments and temperature coefficients
model_param = {"focal_length": 100.75, "kx": 30, "ky": 40, "px": 4005.23, "py": 4005.23,
"a1": 1e-4, "a2": 2e-7, "a3": 3e-8,
"misalignment": [[2e-15, -1.2e-14, 5e-16], [-1e-14, 2e-14, -1e-15]]}
inputs = [[0.5, 0, 1], [0, 0.5, 1], [0.5, 0.5, 1], [-0.5, 0, 1], [0, -0.5, 1], [-0.5, -0.5, 1],
[5, 10, 1000.23], [[10], [-22], [1200.23]]]
temperatures = [0, -1, 1, -10.5, 10.5]
model = self.Class(**model_param)
model.estimate_multiple_misalignments = True
for inp in inputs:
for temp in temperatures:
with self.subTest(temp=temp, inp=inp):
num = num_deriv(inp, model, delta=1, temperature=temp)
ana = model._get_jacobian_row(np.array(inp), 0, 1, temperature=temp)
np.testing.assert_allclose(ana, num, rtol=1e-2, atol=1e-10)
num = num_deriv(inp, model, delta=1, image=1, temperature=temp)
ana = model._get_jacobian_row(np.array(inp), 1, 2, temperature=temp)
np.testing.assert_allclose(ana, num, atol=1e-10, rtol=1e-2)
def test_compute_jacobian(self):
def num_deriv(loc, cmodel, delta=1e-8, image=0, nimages=1, temperature=0) -> np.ndarray:
model_pert = cmodel.copy()
model_pert.focal_length += delta
pix_pert_f_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.focal_length -= delta
pix_pert_f_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.kx += delta
pix_pert_kx_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.ky += delta
pix_pert_ky_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.px += delta
pix_pert_px_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.py += delta
pix_pert_py_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.kx -= delta
pix_pert_kx_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.ky -= delta
pix_pert_ky_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.px -= delta
pix_pert_px_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.py -= delta
pix_pert_py_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a1 += delta
pix_pert_a1_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a2 += delta
pix_pert_a2_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a3 += delta
pix_pert_a3_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a1 -= delta
pix_pert_a1_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a2 -= delta
pix_pert_a2_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a3 -= delta
pix_pert_a3_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
delta_misalignment = 1e-6
model_pert = cmodel.copy()
model_pert.misalignment[image][0] += delta_misalignment
pix_pert_mx_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][1] += delta_misalignment
pix_pert_my_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][2] += delta_misalignment
pix_pert_mz_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][0] -= delta_misalignment
pix_pert_mx_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][1] -= delta_misalignment
pix_pert_my_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][2] -= delta_misalignment
pix_pert_mz_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
return np.vstack([(pix_pert_f_f - pix_pert_f_b) / (delta * 2),
(pix_pert_kx_f - pix_pert_kx_b) / (delta * 2),
(pix_pert_ky_f - pix_pert_ky_b) / (delta * 2),
(pix_pert_px_f - pix_pert_px_b) / (delta * 2),
(pix_pert_py_f - pix_pert_py_b) / (delta * 2),
(pix_pert_a1_f - pix_pert_a1_b) / (delta * 2),
(pix_pert_a2_f - pix_pert_a2_b) / (delta * 2),
(pix_pert_a3_f - pix_pert_a3_b) / (delta * 2),
np.zeros((image * 3, 2)),
(pix_pert_mx_f - pix_pert_mx_b) / (delta_misalignment * 2),
(pix_pert_my_f - pix_pert_my_b) / (delta_misalignment * 2),
(pix_pert_mz_f - pix_pert_mz_b) / (delta_misalignment * 2),
np.zeros(((nimages - image - 1) * 3, 2))]).T
model_param = {"focal_length": 100.75, "kx": 30, "ky": 40, "px": 4005.23, "py": 4005.23,
"a1": 1e-5, "a2": 1e-6, "a3": 1e-7,
"misalignment": [[0, 0, 1e-15], [0, 2e-15, 0], [3e-15, 0, 0]]}
inputs = [np.array([[0.5, 0, 1]]).T,
np.array([[0, 0.5, 1], [0.5, 0.5, 1], [-0.5, 0, 1000]]).T,
np.array([[0.1, -0.5, 1], [-0.5, -0.5, 1], [5, 10, 1000.23], [1, 2, 1200.23]]).T]
temperatures = [0, 1, -1, 10.5, -10.5]
model = self.Class(**model_param, estimation_parameters=['intrinsic',
'temperature dependence',
'multiple misalignments'])
for temp in temperatures:
with self.subTest(temp=temp):
model.use_a_priori = False
jac_ana = model.compute_jacobian(inputs, temperature=temp)
jac_num = []
numim = len(inputs)
for ind, inp in enumerate(inputs):
for vec in inp.T:
jac_num.append(num_deriv(vec.T, model, delta=1, image=ind, nimages=numim, temperature=temp))
np.testing.assert_allclose(jac_ana, np.vstack(jac_num), rtol=1e-2, atol=1e-10)
model.use_a_priori = True
jac_ana = model.compute_jacobian(inputs, temperature=temp)
jac_num = []
numim = len(inputs)
for ind, inp in enumerate(inputs):
for vec in inp.T:
jac_num.append(num_deriv(vec.T, model, delta=1, image=ind, nimages=numim, temperature=temp))
jac_num = np.vstack(jac_num)
jac_num = np.pad(jac_num, [(0, jac_num.shape[1]), (0, 0)], 'constant', constant_values=0)
jac_num[-jac_num.shape[1]:] = np.eye(jac_num.shape[1])
np.testing.assert_allclose(jac_ana, jac_num, rtol=1e-2, atol=1e-10)
def test_remove_jacobian_columns(self):
jac = np.arange(30).reshape(1, -1)
model = self.Class()
for est_param, vals in model.element_dict.items():
model.estimation_parameters = [est_param]
expected = jac[0, vals]
np.testing.assert_array_equal(model._remove_jacobian_columns(jac), [expected])
def test_apply_update(self):
model_param = {"focal_length": 0, "kx": 0, "ky": 0,
"px": 0, "py": 0, "a1": 0, "a2": 0, "a3": 0,
"misalignment": [[0, 0, 0], [0, 0, 0]]}
model = self.Class(**model_param, estimation_parameters=['intrinsic',
'temperature dependence',
'multiple misalignments'])
update_vec = np.arange(14)
model.apply_update(update_vec)
keys = list(model_param.keys())
keys.remove('misalignment')
for key in keys:
self.assertEqual(getattr(model, key), update_vec[model.element_dict[key][0]])
for ind, vec in enumerate(update_vec[8:].reshape(-1, 3)):
np.testing.assert_array_almost_equal(at.Rotation(vec).q, at.Rotation(model.misalignment[ind]).q)
def test_pixels_to_gnomic(self):
gnomic = [[1, 0], [0, 1], [-1, 0], [0, -1],
[0.5, 0], [0, 0.5], [-0.5, 0], [0, -0.5],
[0.5, 0.5], [-0.5, -0.5], [0.5, -0.5], [-0.5, 0.5],
[[1, 0, 0.5], [0, 1.5, -0.5]]]
model = self.Class(kx=2000, ky=-3000.2, px=1025, py=937.567,
a1=1e-3, a2=2e-6, a3=-5.5e-8)
temperatures = [0, 1, -1, 10.5, -10.5]
for gnoms in gnomic:
for temp in temperatures:
with self.subTest(gnoms=gnoms, temp=temp):
dis_gnoms = np.asarray(model.apply_distortion(gnoms)).astype(float)
dis_gnoms *= model.get_temperature_scale(temp)
pixels = ((model.intrinsic_matrix[:, :2] @ dis_gnoms).T + model.intrinsic_matrix[:, 2]).T
gnoms_solved = model.pixels_to_gnomic(pixels, temperature=temp)
np.testing.assert_allclose(gnoms_solved, gnoms)
def test_undistort_pixels(self):
intrins_param = {"kx": 3000, "ky": 4000, "px": 4005.23, 'py': 2000.33, 'a1': 1e-6, 'a2': 1e-5, 'a3': 2e-5}
pinhole = [[0, 0], [1, 0], [-1, 0], [1.5, 0], [-1.5, 0], [[1.5], [0]], [[1.5, -1], [0, 0]],
[0, 1], [0, -1], [0, 1.5], [0, -1.5], [[0], [1.5]], [[0, 0], [1.5, -1]], [1, 1]]
model = self.Class(**intrins_param)
temperatures = [0, 1, -1, 10.5, -10.5]
for gnom in pinhole:
gnom = np.asarray(gnom).astype(float)
for temp in temperatures:
with self.subTest(gnom=gnom, temp=temp):
mm_dist = model.apply_distortion(np.array(gnom))
temp_scale = model.get_temperature_scale(temp)
mm_dist *= temp_scale
pix_dist = ((model.intrinsic_matrix[:, :2] @ mm_dist).T + model.intrinsic_matrix[:, 2]).T
pix_undist = model.undistort_pixels(pix_dist, temperature=temp)
gnom *= temp_scale
pix_pinhole = ((model.intrinsic_matrix[:, :2] @ gnom).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_allclose(pix_undist, pix_pinhole, atol=1e-13)
def test_pixels_to_unit(self):
intrins_param = {"focal_length": 32.7, "kx": 3000, "ky": 4000,
"px": 4005.23, 'py': 2000.33, 'a1': 1e-5, 'a2': -1e-10, 'a3': 2e-4,
'misalignment': [[1e-10, 2e-13, -3e-12], [4e-8, -5.3e-9, 9e-15]]}
camera_vecs = [[0, 0, 1], [0.01, 0, 1], [-0.01, 0, 1], [0, 0.01, 1], [0, -0.01, 1], [0.01, 0.01, 1],
[-0.01, -0.01, 1], [[0.01, -0.01], [-0.01, 0.01], [1, 1]]]
temperatures = [0, 1, -1, 10.5, -10.5]
model = self.Class(**intrins_param)
# TODO: consider adjusting so this isn't needed
model.estimate_multiple_misalignments = True
for vec in camera_vecs:
for image in [0, 1]:
for temp in temperatures:
with self.subTest(vec=vec, image=image, temp=temp):
pixel_loc = model.project_onto_image(vec, image=image, temperature=temp)
unit_vec = model.pixels_to_unit(pixel_loc, image=image, temperature=temp)
unit_true = np.array(vec).astype(np.float64)
unit_true /= np.linalg.norm(unit_true, axis=0, keepdims=True)
np.testing.assert_allclose(unit_vec, unit_true, atol=1e-13)
def test_overwrite(self):
model1 = self.Class(field_of_view=10, intrinsic_matrix=np.array([[1, 0, 3], [0, 5, 6]]), focal_length=60,
misalignment=[[1, 2, 3], [4, 5, 6]], use_a_priori=False,
estimation_parameters=['multiple misalignments'])
model2 = self.Class(field_of_view=20, intrinsic_matrix=np.array([[11, 0, 13], [0, 15, 16]]), focal_length=160,
misalignment=[[11, 12, 13], [14, 15, 16]], use_a_priori=True,
estimation_parameters=['single misalignment'])
modeltest = model1.copy()
modeltest.overwrite(model2)
self.assertEqual(model2.field_of_view, modeltest.field_of_view)
self.assertEqual(model2.use_a_priori, modeltest.use_a_priori)
self.assertEqual(model2.estimate_multiple_misalignments, modeltest.estimate_multiple_misalignments)
np.testing.assert_array_equal(model2.intrinsic_matrix, modeltest.intrinsic_matrix)
np.testing.assert_array_equal(model2.misalignment, modeltest.misalignment)
np.testing.assert_array_equal(model2.estimation_parameters, modeltest.estimation_parameters)
modeltest = model2.copy()
modeltest.overwrite(model1)
self.assertEqual(model1.field_of_view, modeltest.field_of_view)
self.assertEqual(model1.use_a_priori, modeltest.use_a_priori)
self.assertEqual(model1.estimate_multiple_misalignments, modeltest.estimate_multiple_misalignments)
np.testing.assert_array_equal(model1.intrinsic_matrix, modeltest.intrinsic_matrix)
np.testing.assert_array_equal(model1.misalignment, modeltest.misalignment)
np.testing.assert_array_equal(model1.estimation_parameters, modeltest.estimation_parameters)
def test_distort_pixels(self):
model = self.Class(kx=1000, ky=-950.5, px=4500, py=139.32, a1=1e-3, a2=1e-4, a3=1e-5)
pixels = [[0, 1], [1, 0], [-1, 0], [0, -1], [9000., 200.2],
[[4500, 100, 10.98], [0, 139.23, 200.3]]]
temperatures = [0, 1, -1, 10.5, -10.5]
for pix in pixels:
for temp in temperatures:
with self.subTest(pix=pix, temp=temp):
undist_pix = model.undistort_pixels(pix, temperature=temp)
dist_pix = model.distort_pixels(undist_pix, temperature=temp)
np.testing.assert_allclose(dist_pix, pix)
def test_distortion_map(self):
model = self.Class(kx=100, ky=-985.234, px=1000, py=1095)
rows, cols, dist = model.distortion_map((2000, 250), step=10)
# noinspection PyTypeChecker
np.testing.assert_allclose(dist, 0, atol=1e-10)
rl, cl = np.arange(0, 2000, 10), np.arange(0, 250, 10)
rs, cs = np.meshgrid(rl, cl, indexing='ij')
np.testing.assert_array_equal(rows, rs)
np.testing.assert_array_equal(cols, cs)
def test_undistort_image(self):
# not sure how best to do this test...
pass
def test_copy(self):
model = self.Class()
model_copy = model.copy()
model.kx = 1000
model.ky = 999
model.px = 100
model.py = -20
model.a1 = 5
model.a2 = 6
model.a3 = 7
model._focal_length = 11231
model.field_of_view = 1231231
model.use_a_priori = True
model.estimation_parameters = ['a1', 'kx', 'ky']
model.estimate_multiple_misalignments = True
model.misalignment = [1231241, 123124, .12]
self.assertNotEqual(model.kx, model_copy.kx)
self.assertNotEqual(model.ky, model_copy.ky)
self.assertNotEqual(model.px, model_copy.px)
self.assertNotEqual(model.py, model_copy.py)
self.assertNotEqual(model.a1, model_copy.a1)
self.assertNotEqual(model.a2, model_copy.a2)
self.assertNotEqual(model.a3, model_copy.a3)
self.assertNotEqual(model.focal_length, model_copy.focal_length)
self.assertNotEqual(model.field_of_view, model_copy.field_of_view)
self.assertNotEqual(model.use_a_priori, model_copy.use_a_priori)
self.assertNotEqual(model.estimate_multiple_misalignments, model_copy.estimate_multiple_misalignments)
self.assertNotEqual(model.estimation_parameters, model_copy.estimation_parameters)
self.assertTrue((model.misalignment != model_copy.misalignment).all())
def test_to_from_elem(self):
element = etree.Element(self.Class.__name__)
model = self.Class(focal_length=20, field_of_view=5, use_a_priori=True,
misalignment=[1, 2, 3], kx=2, ky=200, px=50, py=300,
a1=37, a2=1, a3=-1230,
estimation_parameters=['a1', 'multiple misalignments'], n_rows=20, n_cols=30)
model_copy = model.copy()
with self.subTest(misalignment=True):
element = model.to_elem(element, misalignment=True)
self.assertEqual(model, model_copy)
model_new = self.Class.from_elem(element)
self.assertEqual(model, model_new)
with self.subTest(misalignment=False):
element = model.to_elem(element, misalignment=False)
self.assertEqual(model, model_copy)
model_new = self.Class.from_elem(element)
model.estimation_parameters[-1] = 'single misalignment'
model.estimate_multiple_misalignments = False
model.misalignment = np.zeros(3)
self.assertEqual(model, model_new)
class TestOwenModel(TestPinholeModel):
def setUp(self):
self.Class = OwenModel
def test___init__(self):
model = self.Class(intrinsic_matrix=np.array([[1, 2, 3], [4, 5, 6]]), focal_length=10.5, field_of_view=20.5,
use_a_priori=True, misalignment=[np.zeros(3), np.ones(3)],
distortion_coefficients=np.array([1, 2, 3, 4, 5, 6]),
estimation_parameters='basic intrinsic',
a1=1, a2=2, a3=3)
np.testing.assert_array_equal(model.intrinsic_matrix, [[1, 2, 3], [4, 5, 6]])
self.assertEqual(model.focal_length, 10.5)
self.assertEqual(model.field_of_view, 20.5)
self.assertTrue(model.use_a_priori)
self.assertEqual(model.estimation_parameters, ['basic intrinsic'])
self.assertEqual(model.a1, 1)
self.assertEqual(model.a2, 2)
self.assertEqual(model.a3, 3)
np.testing.assert_array_equal(model.distortion_coefficients, np.arange(1, 7))
model = self.Class(kx=1, ky=2, px=4, py=5, focal_length=10.5, field_of_view=20.5,
use_a_priori=True, misalignment=[np.zeros(3), np.ones(3)], kxy=80, kyx=90,
estimation_parameters=['focal_length', 'px'], n_rows=500, n_cols=600,
e1=1, radial2=2, pinwheel2=3, e4=4, tangential_x=6, e5=5)
np.testing.assert_array_equal(model.intrinsic_matrix, [[1, 80, 4], [90, 2, 5]])
self.assertEqual(model.focal_length, 10.5)
self.assertEqual(model.field_of_view, 20.5)
self.assertTrue(model.use_a_priori)
self.assertEqual(model.estimation_parameters, ['focal_length', 'px'])
self.assertEqual(model.n_rows, 500)
self.assertEqual(model.n_cols, 600)
np.testing.assert_array_equal(model.distortion_coefficients, [2, 4, 5, 6, 1, 3])
def test_kxy(self):
model = self.Class(intrinsic_matrix=np.array([[0, 1, 0], [0, 0, 0]]))
self.assertEqual(model.kxy, 1)
model.kxy = 100
self.assertEqual(model.kxy, 100)
self.assertEqual(model.intrinsic_matrix[0, 1], 100)
def test_kyx(self):
model = self.Class(intrinsic_matrix=np.array([[0, 0, 0], [3, 0, 0]]))
self.assertEqual(model.kyx, 3)
model.kyx = 100
self.assertEqual(model.kyx, 100)
self.assertEqual(model.intrinsic_matrix[1, 0], 100)
def test_e1(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 0, 0, 1, 0]))
self.assertEqual(model.e1, 1)
model.e1 = 100
self.assertEqual(model.e1, 100)
self.assertEqual(model.distortion_coefficients[4], 100)
def test_e2(self):
model = self.Class(distortion_coefficients=np.array([1, 0, 0, 0, 0, 0]))
self.assertEqual(model.e2, 1)
model.e2 = 100
self.assertEqual(model.e2, 100)
self.assertEqual(model.distortion_coefficients[0], 100)
def test_e3(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 0, 0, 0, 1]))
self.assertEqual(model.e3, 1)
model.e3 = 100
self.assertEqual(model.e3, 100)
self.assertEqual(model.distortion_coefficients[5], 100)
def test_e4(self):
model = self.Class(distortion_coefficients=np.array([0, 1, 0, 0, 0, 0]))
self.assertEqual(model.e4, 1)
model.e4 = 100
self.assertEqual(model.e4, 100)
self.assertEqual(model.distortion_coefficients[1], 100)
def test_e5(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 1, 0, 0, 0]))
self.assertEqual(model.e5, 1)
model.e5 = 100
self.assertEqual(model.e5, 100)
self.assertEqual(model.distortion_coefficients[2], 100)
def test_e6(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 0, 1, 0, 0]))
self.assertEqual(model.e6, 1)
model.e6 = 100
self.assertEqual(model.e6, 100)
self.assertEqual(model.distortion_coefficients[3], 100)
def test_pinwheel1(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 0, 0, 1, 0]))
self.assertEqual(model.pinwheel1, 1)
model.pinwheel1 = 100
self.assertEqual(model.pinwheel1, 100)
self.assertEqual(model.distortion_coefficients[4], 100)
def test_radial2(self):
model = self.Class(distortion_coefficients=np.array([1, 0, 0, 0, 0, 0]))
self.assertEqual(model.radial2, 1)
model.radial2 = 100
self.assertEqual(model.radial2, 100)
self.assertEqual(model.distortion_coefficients[0], 100)
def test_pinwheel2(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 0, 0, 0, 1]))
self.assertEqual(model.pinwheel2, 1)
model.pinwheel2 = 100
self.assertEqual(model.pinwheel2, 100)
self.assertEqual(model.distortion_coefficients[5], 100)
def test_radial4(self):
model = self.Class(distortion_coefficients=np.array([0, 1, 0, 0, 0, 0]))
self.assertEqual(model.radial4, 1)
model.radial4 = 100
self.assertEqual(model.radial4, 100)
self.assertEqual(model.distortion_coefficients[1], 100)
def test_tangential_y(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 1, 0, 0, 0]))
self.assertEqual(model.tangential_y, 1)
model.tangential_y = 100
self.assertEqual(model.tangential_y, 100)
self.assertEqual(model.distortion_coefficients[2], 100)
def test_tangential_x(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 0, 1, 0, 0]))
self.assertEqual(model.tangential_x, 1)
model.tangential_x = 100
self.assertEqual(model.tangential_x, 100)
self.assertEqual(model.distortion_coefficients[3], 100)
def test_apply_distortion(self):
dist_coefs = [{"radial2": 1.5, "radial4": 0, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 1.5, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 1.5, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 0, "tangential_y": 1.5,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 1.5, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 1.5}]
inputs = [[0, 0], [1, 0], [-1, 0], [1.5, 0], [-1.5, 0], [[1.5], [0]], [[1.5, -1], [0, 0]],
[0, 1], [0, -1], [0, 1.5], [0, -1.5], [[0], [1.5]], [[0, 0], [1.5, -1]], [1, 1]]
solus = [[[0, 0], [2.5, 0], [-2.5, 0], [1.5 + 1.5 ** 4, 0], [-(1.5 + 1.5 ** 4), 0],
[[(1.5 + 1.5 ** 4)], [0]], [[(1.5 + 1.5 ** 4), -2.5], [0, 0]],
[0, 2.5], [0, -2.5], [0, (1.5 + 1.5 ** 4)], [0, -(1.5 + 1.5 ** 4)], [[0], [(1.5 + 1.5 ** 4)]],
[[0, 0], [(1.5 + 1.5 ** 4), -2.5]], [1 + 2 * 1.5, 1 + 2 * 1.5]],
[[0, 0], [2.5, 0], [-2.5, 0], [(1.5 + 1.5 ** 6), 0], [-(1.5 + 1.5 ** 6), 0],
[[(1.5 + 1.5 ** 6)], [0]], [[(1.5 + 1.5 ** 6), -2.5], [0, 0]],
[0, 2.5], [0, -2.5], [0, (1.5 + 1.5 ** 6)], [0, -(1.5 + 1.5 ** 6)], [[0], [(1.5 + 1.5 ** 6)]],
[[0, 0], [(1.5 + 1.5 ** 6), -2.5]], [1 + 4 * 1.5, 1 + 4 * 1.5]],
[[0, 0], [2.5, 0], [0.5, 0], [(1.5 + 1.5 ** 3), 0], [-1.5 + 1.5 ** 3, 0],
[[(1.5 + 1.5 ** 3)], [0]], [[(1.5 + 1.5 ** 3), 0.5], [0, 0]],
[0, 1], [0, -1], [0, 1.5], [0, -1.5], [[0], [1.5]], [[0, 0], [1.5, -1]], [2.5, 2.5]],
[[0, 0], [1, 0], [-1, 0], [1.5, 0], [-1.5, 0], [[1.5], [0]], [[1.5, -1], [0, 0]],
[0, 2.5], [0, 0.5], [0, 1.5 + 1.5 ** 3], [0, -1.5 + 1.5 ** 3], [[0], [1.5 + 1.5 ** 3]],
[[0, 0], [1.5 + 1.5 ** 3, 0.5]], [2.5, 2.5]],
[[0, 0], [1, 1.5], [-1, -1.5], [1.5, 1.5 ** 3], [-1.5, -1.5 ** 3],
[[1.5], [1.5 ** 3]], [[1.5, -1], [1.5 ** 3, -1.5]],
[-1.5, 1], [1.5, -1], [-1.5 ** 3, 1.5], [1.5 ** 3, -1.5], [[-1.5 ** 3], [1.5]],
[[-1.5 ** 3, 1.5], [1.5, -1]],
[1 - np.sqrt(2) * 1.5, 1 + np.sqrt(2) * 1.5]],
[[0, 0], [1, 1.5], [-1, -1.5], [1.5, 1.5 ** 5], [-1.5, -1.5 ** 5],
[[1.5], [1.5 ** 5]], [[1.5, -1], [1.5 ** 5, -1.5]],
[-1.5, 1], [1.5, -1], [-1.5 ** 5, 1.5], [1.5 ** 5, -1.5], [[-1.5 ** 5], [1.5]],
[[-1.5 ** 5, 1.5], [1.5, -1]],
[1 - 2 * np.sqrt(2) * 1.5, 1 + 2 * np.sqrt(2) * 1.5]]]
for dist, sols in zip(dist_coefs, solus):
with self.subTest(**dist):
model = self.Class(**dist)
for inp, solu in zip(inputs, sols):
gnom_dist = model.apply_distortion(np.array(inp))
np.testing.assert_array_almost_equal(gnom_dist, solu)
def test_get_projections(self):
points = [[0, 0, 1], [-0.1, 0.2, 2.2], [[-0.1], [0.2], [2.2]], [[-0.1, 0], [0.2, 0], [2.2, 1]]]
model = self.Class(focal_length=8.7, kx=500, ky=500.5, kxy=1.5, kyx=-1.5, px=1500, py=1500.5,
radial2=1e-3, radial4=-2.2e-5, tangential_y=1e-3, tangential_x=1e-6,
pinwheel1=1e-6, pinwheel2=-2.23e-8, a1=1e-1, a2=1e-6, a3=-3e-7)
temps = [0, 1, -1, 10, -10]
for temp in temps:
with self.subTest(misalignment=None, temp=temp):
for point in points:
pin, dist, pix = model.get_projections(point, temperature=temp)
pin_true = model.focal_length * np.array(point[:2]) / point[2]
dist_true = model.apply_distortion(pin_true)
dist_true *= model.get_temperature_scale(temp)
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], dist_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_equal(pin, pin_true)
np.testing.assert_array_equal(dist, dist_true)
np.testing.assert_array_equal(pix, pix_true)
model = self.Class(focal_length=8.7, kx=500, ky=500.5, kxy=1.5, kyx=-1.5, px=1500, py=1500.5,
radial2=1e-3, radial4=-2.2e-5, tangential_y=1e-3, tangential_x=1e-6,
pinwheel1=1e-6, pinwheel2=-2.23 - 8, misalignment=[0, 0, np.pi])
with self.subTest(misalignment=[0, 0, np.pi]):
for point in points:
pin, dist, pix = model.get_projections(point)
pin_true = -model.focal_length * np.array(point[:2]) / point[2]
dist_true = model.apply_distortion(pin_true)
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], dist_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(pin, pin_true)
np.testing.assert_array_almost_equal(dist, dist_true)
np.testing.assert_array_almost_equal(pix, pix_true)
model = self.Class(focal_length=8.7, kx=500, ky=500.5, kxy=1.5, kyx=-1.5, px=1500, py=1500.5,
radial2=1e-3, radial4=-2.2e-5, tangential_y=1e-3, tangential_x=1e-6,
pinwheel1=1e-6, pinwheel2=-2.23 - 8, misalignment=[np.pi, 0, 0])
with self.subTest(misalignment=[np.pi, 0, 0]):
for point in points:
pin, dist, pix = model.get_projections(point)
pin_true = model.focal_length * np.array(point[:2]) / point[2]
pin_true[0] *= -1
dist_true = model.apply_distortion(pin_true)
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], dist_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(pin, pin_true)
np.testing.assert_array_almost_equal(dist, dist_true)
np.testing.assert_array_almost_equal(pix, pix_true)
model = self.Class(focal_length=8.7, kx=500, ky=500.5, kxy=1.5, kyx=-1.5, px=1500, py=1500.5,
radial2=1e-3, radial4=-2.2e-5, tangential_y=1e-3, tangential_x=1e-6,
pinwheel1=1e-6, pinwheel2=-2.23 - 8, misalignment=[0, np.pi, 0])
with self.subTest(misalignment=[0, np.pi, 0]):
for point in points:
pin, dist, pix = model.get_projections(point)
pin_true = model.focal_length * np.array(point[:2]) / point[2]
pin_true[1] *= -1
dist_true = model.apply_distortion(pin_true)
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], dist_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(pin, pin_true)
np.testing.assert_array_almost_equal(dist, dist_true)
np.testing.assert_array_almost_equal(pix, pix_true)
model = self.Class(focal_length=8.7, kx=500, ky=500.5, kxy=1.5, kyx=-1.5, px=1500, py=1500.5,
radial2=1e-3, radial4=-2.2e-5, tangential_y=1e-3, tangential_x=1e-6,
pinwheel1=1e-6, pinwheel2=-2.23 - 8, misalignment=[1, 0.2, 0.3])
with self.subTest(misalignment=[1, 0.2, 0.3]):
rot_mat = at.rotvec_to_rotmat([1, 0.2, 0.3]).squeeze()
for point in points:
point_new = rot_mat @ point
pin, dist, pix = model.get_projections(point)
pin_true = model.focal_length * np.array(point_new[:2]) / np.array(point_new[2])
dist_true = model.apply_distortion(pin_true)
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], dist_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(pin, pin_true)
np.testing.assert_array_almost_equal(dist, dist_true)
np.testing.assert_array_almost_equal(pix, pix_true)
model = self.Class(focal_length=8.7, kx=500, ky=500.5, kxy=1.5, kyx=-1.5, px=1500, py=1500.5,
radial2=1e-3, radial4=-2.2e-5, tangential_y=1e-3, tangential_x=1e-6,
pinwheel1=1e-6, pinwheel2=-2.23 - 8, misalignment=[[1, 0.2, 0.3], [0, 0, np.pi]])
model.estimate_multiple_misalignments = True
with self.subTest(misalignment=[[1, 0.2, 0.3], [0, 0, np.pi]]):
rot_mat = at.rotvec_to_rotmat([1, 0.2, 0.3]).squeeze()
for point in points:
point_new = rot_mat @ point
pin, dist, pix = model.get_projections(point, image=0)
pin_true = model.focal_length * np.array(point_new[:2]) / np.array(point_new[2])
dist_true = model.apply_distortion(pin_true)
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], dist_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(pin, pin_true)
np.testing.assert_array_almost_equal(dist, dist_true)
np.testing.assert_array_almost_equal(pix, pix_true)
pin, dist, pix = model.get_projections(point, image=1)
pin_true = -model.focal_length * np.array(point[:2]) / point[2]
dist_true = model.apply_distortion(pin_true)
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], dist_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(pin, pin_true)
np.testing.assert_array_almost_equal(dist, dist_true)
np.testing.assert_array_almost_equal(pix, pix_true)
def test_project_onto_image(self):
points = [[0, 0, 1], [-0.1, 0.2, 2.2], [[-0.1], [0.2], [2.2]], [[-0.1, 0], [0.2, 0], [2.2, 1]]]
model = self.Class(focal_length=8.7, kx=500, ky=500.5, kxy=1.5, kyx=-1.5, px=1500, py=1500.5,
radial2=1e-3, radial4=-2.2e-5, tangential_y=1e-3, tangential_x=1e-6,
pinwheel1=1e-6, pinwheel2=-2.23 - 8, a1=1, a2=2, a3=-3)
temps = [0, 1, -1, 10, -10]
for temp in temps:
with self.subTest(misalignment=None, temp=temp):
for point in points:
_, __, pix = model.get_projections(point, temperature=temp)
pix_proj = model.project_onto_image(point, temperature=temp)
np.testing.assert_array_equal(pix, pix_proj)
model = self.Class(focal_length=8.7, kx=500, ky=500.5, kxy=1.5, kyx=-1.5, px=1500, py=1500.5,
radial2=1e-3, radial4=-2.2e-5, tangential_y=1e-3, tangential_x=1e-6,
pinwheel1=1e-6, pinwheel2=-2.23 - 8, misalignment=[[1, 0.2, 0.3], [0, 0, np.pi]])
model.estimate_multiple_misalignments = True
with self.subTest(misalignment=[[1, 0.2, 0.3], [0, 0, np.pi]]):
for point in points:
_, __, pix = model.get_projections(point, image=0)
pix_proj = model.project_onto_image(point, image=0)
np.testing.assert_array_equal(pix, pix_proj)
_, __, pix = model.get_projections(point, image=1)
pix_proj = model.project_onto_image(point, image=1)
np.testing.assert_array_equal(pix, pix_proj)
def test_compute_pixel_jacobian(self):
def num_deriv(uvec, cmodel, delta=1e-8, image=0, temperature=0) -> np.ndarray:
uvec = np.array(uvec).reshape(3, -1)
pix_true = cmodel.project_onto_image(uvec, image=image, temperature=temperature)
uvec_pert = uvec + [[delta], [0], [0]]
pix_pert_x_f = cmodel.project_onto_image(uvec_pert, image=image, temperature=temperature)
uvec_pert = uvec + [[0], [delta], [0]]
pix_pert_y_f = cmodel.project_onto_image(uvec_pert, image=image, temperature=temperature)
uvec_pert = uvec + [[0], [0], [delta]]
pix_pert_z_f = cmodel.project_onto_image(uvec_pert, image=image, temperature=temperature)
uvec_pert = uvec - [[delta], [0], [0]]
pix_pert_x_b = cmodel.project_onto_image(uvec_pert, image=image, temperature=temperature)
uvec_pert = uvec - [[0], [delta], [0]]
pix_pert_y_b = cmodel.project_onto_image(uvec_pert, image=image, temperature=temperature)
uvec_pert = uvec - [[0], [0], [delta]]
pix_pert_z_b = cmodel.project_onto_image(uvec_pert, image=image, temperature=temperature)
return np.array([(pix_pert_x_f-pix_pert_x_b)/(2*delta),
(pix_pert_y_f-pix_pert_y_b)/(2*delta),
(pix_pert_z_f-pix_pert_z_b)/(2*delta)]).swapaxes(0, -1)
model_param = {"focal_length": 100.75, "radial2": 1.5e-5, "radial4": 1.5e-5, "tangential_x": 1.5e-5,
"tangential_y": 1.5e-5, "pinwheel1": 1.5e-5, "pinwheel2": 1.5e-5, "kx": 30, "ky": 40,
"kxy": 0.5, "kyx": -0.8, "px": 4005.23, "py": 4005.23,
"misalignment": [[1e-8, 1e-9, 1e-10], [-1e-8, 2e-9, -1e-11], [2e-10, -5e-12, 1e-9]],
"a1": 1e-6, "a2": 2e-7, "a3": 3e-8}
inputs = [np.array([[0.5, 0, 1]]).T,
np.array([[0, 0.5, 1], [0.5, 0.5, 1], [-0.5, 0, 1]]).T,
np.array([[0.1, -0.5, 1], [-0.5, -0.5, 1], [5, 10, 1000.23], [1, 2, 1200.23]]).T]
temperatures = [0, 1, -1, 10.5, -10.5]
model = self.Class(**model_param)
model.estimate_multiple_misalignments = True
for temp in temperatures:
for input in inputs:
for image in range(3):
with self.subTest(image=image, temp=temp, input=input):
jac_ana = model.compute_pixel_jacobian(input, image=image, temperature=temp)
jac_num = num_deriv(input, model, image=image, temperature=temp, delta=1e-6)
np.testing.assert_allclose(jac_ana, jac_num, rtol=1e-3, atol=1e-10)
def test__compute_dcamera_point_dgnomic(self):
def num_deriv(gnomic_locations, cmodel, delta=1e-6) -> np.ndarray:
def g2u(g):
v = np.vstack([g, cmodel.focal_length*np.ones(g.shape[-1])])
return v/np.linalg.norm(v, axis=0, keepdims=True)
gnomic_locations = np.asarray(gnomic_locations).reshape(2, -1)
gnom_pert = gnomic_locations + [[delta], [0]]
cam_loc_pert_x_f = g2u(gnom_pert)
gnom_pert = gnomic_locations + [[0], [delta]]
cam_loc_pert_y_f = g2u(gnom_pert)
gnom_pert = gnomic_locations - [[delta], [0]]
cam_loc_pert_x_b = g2u(gnom_pert)
gnom_pert = gnomic_locations - [[0], [delta]]
cam_loc_pert_y_b = g2u(gnom_pert)
return np.array([(cam_loc_pert_x_f -cam_loc_pert_x_b)/(2*delta),
(cam_loc_pert_y_f -cam_loc_pert_y_b)/(2*delta)]).swapaxes(0, -1)
model_param = {"focal_length": 100.75, "radial2": 1.5e-8, "radial4": 1.5e-8, "tangential_x": 1.5e-8,
"tangential_y": 1.5e-8, "pinwheel1": 1.5e-8, "pinwheel2": 1.5e-8, "kx": 300, "ky": 400,
"kxy": 0.5, "kyx": -0.8, "px": 1005.23, "py": 1005.23,
"misalignment": [[1e-8, 1e-9, 1e-10], [-1e-8, 2e-9, -1e-11], [2e-10, -5e-12, 1e-9]],
"a1": 1e-6, "a2": 2e-7, "a3": 3e-8}
inputs = [np.array([[0, 0]]).T,
np.array([[0, 2000], [2000, 0], [2000, 2000]]).T,
np.array([[1000, 1000], [1000, 2000], [2000, 1000], [0, 1000], [1000, 0]]).T]
model = self.Class(**model_param)
model.estimate_multiple_misalignments = True
for input in inputs:
with self.subTest(input=input):
jac_ana = []
for gnom in input.T:
jac_ana.append(
model._compute_dcamera_point_dgnomic(gnom, np.sqrt(np.sum(gnom*gnom) + model.focal_length**2)))
jac_ana = np.array(jac_ana)
jac_num = num_deriv(input, model)
np.testing.assert_almost_equal(jac_ana, jac_num)
def test__compute_dgnomic_ddist_gnomic(self):
def num_deriv(dist_gnomic_locations, cmodel, delta=1e-6) -> np.ndarray:
def dg2g(dg):
gnomic_guess = dg.copy()
# perform the fpa
for _ in np.arange(20):
# get the distorted location assuming the current guess is correct
gnomic_guess_distorted = cmodel.apply_distortion(gnomic_guess)
# subtract off the residual distortion from the gnomic guess
gnomic_guess += dg - gnomic_guess_distorted
# check for convergence
if np.all(np.linalg.norm(gnomic_guess_distorted - dg, axis=0) <= 1e-15):
break
return gnomic_guess
dist_gnomic_locations = np.asarray(dist_gnomic_locations).reshape(2, -1)
dist_gnom_pert = dist_gnomic_locations + [[delta], [0]]
gnom_loc_pert_x_f = dg2g(dist_gnom_pert)
dist_gnom_pert = dist_gnomic_locations + [[0], [delta]]
gnom_loc_pert_y_f = dg2g(dist_gnom_pert)
dist_gnom_pert = dist_gnomic_locations - [[delta], [0]]
gnom_loc_pert_x_b = dg2g(dist_gnom_pert)
dist_gnom_pert = dist_gnomic_locations - [[0], [delta]]
gnom_loc_pert_y_b = dg2g(dist_gnom_pert)
return np.array([(gnom_loc_pert_x_f - gnom_loc_pert_x_b)/(2*delta),
(gnom_loc_pert_y_f - gnom_loc_pert_y_b)/(2*delta)]).swapaxes(0, -1)
model_param = {"focal_length": 100.75, "radial2": 1.5e-8, "radial4": 1.5e-8, "tangential_x": 1.5e-8,
"tangential_y": 1.5e-8, "pinwheel1": 1.5e-8, "pinwheel2": 1.5e-8, "kx": 300, "ky": 400,
"kxy": 0.5, "kyx": -0.8, "px": 1005.23, "py": 1005.23,
"misalignment": [[1e-8, 1e-9, 1e-10], [-1e-8, 2e-9, -1e-11], [2e-10, -5e-12, 1e-9]],
"a1": 1e-6, "a2": 2e-7, "a3": 3e-8}
inputs = [np.array([[0, 0]]).T,
np.array([[0, 0.1], [0.1, 0], [0.1, 0.1]]).T,
np.array([[-0.1, 0], [0, -0.1], [-0.1, -0.1], [0.1, -0.1], [-0.1, 0.1]]).T]
model = self.Class(**model_param)
model.estimate_multiple_misalignments = True
for input in inputs:
with self.subTest(input=input):
jac_ana = []
for dist_gnom in input.T:
jac_ana.append(model._compute_dgnomic_ddist_gnomic(dist_gnom))
jac_ana = np.array(jac_ana)
jac_num = num_deriv(input, model)
np.testing.assert_almost_equal(jac_ana, jac_num)
def test_compute_unit_vector_jacobian(self):
def num_deriv(pixels, cmodel, delta=1e-6, image=0, temperature=0) -> np.ndarray:
pixels = np.array(pixels).reshape(2, -1)
pix_pert = pixels + [[delta], [0]]
uvec_pert_x_f = cmodel.pixels_to_unit(pix_pert, image=image, temperature=temperature)
pix_pert = pixels + [[0], [delta]]
uvec_pert_y_f = cmodel.pixels_to_unit(pix_pert, image=image, temperature=temperature)
pix_pert = pixels - [[delta], [0]]
uvec_pert_x_b = cmodel.pixels_to_unit(pix_pert, image=image, temperature=temperature)
pix_pert = pixels - [[0], [delta]]
uvec_pert_y_b = cmodel.pixels_to_unit(pix_pert, image=image, temperature=temperature)
return np.array([(uvec_pert_x_f-uvec_pert_x_b)/(2*delta),
(uvec_pert_y_f-uvec_pert_y_b)/(2*delta)]).swapaxes(0, -1)
model_param = {"focal_length": 100.75, "radial2": 1.5e-8, "radial4": 1.5e-8, "tangential_x": 1.5e-8,
"tangential_y": 1.5e-8, "pinwheel1": 1.5e-8, "pinwheel2": 1.5e-8, "kx": 300, "ky": 400,
"kxy": 0.5, "kyx": -0.8, "px": 1005.23, "py": 1005.23,
"misalignment": [[1e-8, 1e-9, 1e-10], [-1e-8, 2e-9, -1e-11], [2e-10, -5e-12, 1e-9]],
"a1": 1e-6, "a2": 2e-7, "a3": 3e-8}
inputs = [np.array([[0, 0]]).T,
np.array([[0, 2000], [2000, 0], [2000, 2000]]).T,
np.array([[1000, 1000], [1000, 2000], [2000, 1000], [0, 1000], [1000, 0]]).T]
temperatures = [0, 1, -1, 10.5, -10.5]
model = self.Class(**model_param)
model.estimate_multiple_misalignments = True
for temp in temperatures:
for input in inputs:
for image in range(3):
with self.subTest(image=image, temp=temp, input=input):
jac_ana = model.compute_unit_vector_jacobian(input, image=image, temperature=temp)
jac_num = num_deriv(input, model, image=image, temperature=temp, delta=1e-2)
np.testing.assert_allclose(jac_ana, jac_num, rtol=1e-3, atol=1e-10)
def test__compute_ddistortion_dgnomic(self):
def num_deriv(loc, cmodel, delta=1e-8) -> np.ndarray:
loc_pert = np.array(loc) + [delta, 0]
dist_pert_x_f = cmodel.apply_distortion(loc_pert) - loc_pert
loc_pert = np.array(loc) + [0, delta]
dist_pert_y_f = cmodel.apply_distortion(loc_pert) - loc_pert
loc_pert = np.array(loc) - [delta, 0]
dist_pert_x_b = cmodel.apply_distortion(loc_pert) - loc_pert
loc_pert = np.array(loc) - [0, delta]
dist_pert_y_b = cmodel.apply_distortion(loc_pert) - loc_pert
return np.array(
[(dist_pert_x_f - dist_pert_x_b) / (2 * delta), (dist_pert_y_f - dist_pert_y_b) / (2 * delta)]).T
dist_coefs = [{"radial2": 1.5, "radial4": 0, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 1.5, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 1.5, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 0, "tangential_y": 1.5,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 1.5, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 1.5},
{"e1": -1.5, "e2": -1.5, "e3": -1.5, "e4": -1.5, "e5": -1.5, "e6": -1.5}]
inputs = [[1e-6, 1e-6], [1, 0], [-1, 0], [1.5, 0], [-1.5, 0], [0, 1], [0, -1], [0, 1.5], [0, -1.5], [1, 1]]
for dist_coef in dist_coefs:
model = self.Class(**dist_coef)
with self.subTest(**dist_coef):
for inp in inputs:
r = np.sqrt(inp[0] ** 2 + inp[1] ** 2)
r2 = r ** 2
r3 = r ** 3
r4 = r ** 4
num = num_deriv(inp, model)
ana = model._compute_ddistortion_dgnomic(np.array(inp), r, r2, r3, r4)
np.testing.assert_allclose(num, ana, atol=1e-10)
def test__compute_dpixel_ddistorted_gnomic(self):
def num_deriv(loc, cmodel, delta=1e-8, temperature=0) -> np.ndarray:
loc_pert = np.array(loc) + [delta, 0]
loc_pert *= cmodel.get_temperature_scale(temperature)
pix_pert_x_f = cmodel.intrinsic_matrix[:, :2] @ loc_pert + cmodel.intrinsic_matrix[:, 2]
loc_pert = np.array(loc) + [0, delta]
loc_pert *= cmodel.get_temperature_scale(temperature)
pix_pert_y_f = cmodel.intrinsic_matrix[:, :2] @ loc_pert + cmodel.intrinsic_matrix[:, 2]
loc_pert = np.array(loc) - [delta, 0]
loc_pert *= cmodel.get_temperature_scale(temperature)
pix_pert_x_b = cmodel.intrinsic_matrix[:, :2] @ loc_pert + cmodel.intrinsic_matrix[:, 2]
loc_pert = np.array(loc) - [0, delta]
loc_pert *= cmodel.get_temperature_scale(temperature)
pix_pert_y_b = cmodel.intrinsic_matrix[:, :2] @ loc_pert + cmodel.intrinsic_matrix[:, 2]
return np.array(
[(pix_pert_x_f - pix_pert_x_b) / (2 * delta), (pix_pert_y_f - pix_pert_y_b) / (2 * delta)]).T
intrins_coefs = [{"kx": 1.5, "kxy": 0, "ky": 0, "kyx": 0, "px": 0, "py": 0},
{"kx": 0, "kxy": 1.5, "ky": 0, "kyx": 0, "px": 0, "py": 0},
{"kx": 0, "kxy": 0, "ky": 1.5, "kyx": 0, "px": 0, "py": 0},
{"kx": 0, "kxy": 0, "ky": 0, "kyx": 1.5, "px": 0, "py": 0},
{"kx": 0, "kxy": 0, "ky": 0, "kyx": 0, "px": 1.5, "py": 0},
{"kx": 0, "kxy": 0, "ky": 0, "kyx": 0, "px": 0, "py": 1.5},
{"kx": 0, "kxy": 0, "ky": 0, "kyx": 0, "px": 0, "py": 0, "a1": 1.5, "a2": 0, "a3": 0},
{"kx": 0, "kxy": 0, "ky": 0, "kyx": 0, "px": 0, "py": 0, "a1": 0, "a2": 1.5, "a3": 0},
{"kx": 0, "kxy": 0, "ky": 0, "kyx": 0, "px": 0, "py": 0, "a1": 0, "a2": 0, "a3": 1.5},
{"kx": 1.5, "kxy": 1.5, "ky": 1.5, "kyx": 1.5, "px": 1.5, "py": 1.5,
"a1": 1.5, "a2": 1.5, "a3": 1.5}]
inputs = [[1e-6, 1e-6], [1, 0], [-1, 0], [1.5, 0], [-1.5, 0], [0, 1], [0, -1], [0, 1.5], [0, -1.5], [1, 1]]
temps = [0, 1, -1, 10.5, -10.5]
for temp in temps:
for intrins_coef in intrins_coefs:
model = self.Class(**intrins_coef)
with self.subTest(**intrins_coef, temp=temp):
for inp in inputs:
num = num_deriv(inp, model, temperature=temp)
ana = model._compute_dpixel_ddistorted_gnomic(temperature=temp)
np.testing.assert_allclose(num, ana, atol=1e-10)
def test__compute_dpixel_dintrinsic(self):
def num_deriv(loc, cmodel, delta=1e-6) -> np.ndarray:
model_pert = cmodel.copy()
model_pert.kx += delta
pix_pert_kx_f = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.kxy += delta
pix_pert_kxy_f = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.kyx += delta
pix_pert_kyx_f = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.ky += delta
pix_pert_ky_f = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.px += delta
pix_pert_px_f = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.py += delta
pix_pert_py_f = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.kx -= delta
pix_pert_kx_b = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.kxy -= delta
pix_pert_kxy_b = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.kyx -= delta
pix_pert_kyx_b = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.ky -= delta
pix_pert_ky_b = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.px -= delta
pix_pert_px_b = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.py -= delta
pix_pert_py_b = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
return np.array([(pix_pert_kx_f - pix_pert_kx_b) / (2 * delta),
(pix_pert_kxy_f - pix_pert_kxy_b) / (2 * delta),
(pix_pert_kyx_f - pix_pert_kyx_b) / (2 * delta),
(pix_pert_ky_f - pix_pert_ky_b) / (2 * delta),
(pix_pert_px_f - pix_pert_px_b) / (2 * delta),
(pix_pert_py_f - pix_pert_py_b) / (2 * delta)]).T
intrins_coefs = [{"kx": 1.5, "kxy": 0, "ky": 0, "kyx": 0, "px": 0, "py": 0},
{"kx": 0, "kxy": 1.5, "ky": 0, "kyx": 0, "px": 0, "py": 0},
{"kx": 0, "kxy": 0, "ky": 1.5, "kyx": 0, "px": 0, "py": 0},
{"kx": 0, "kxy": 0, "ky": 0, "kyx": 1.5, "px": 0, "py": 0},
{"kx": 0, "kxy": 0, "ky": 0, "kyx": 0, "px": 1.5, "py": 0},
{"kx": 0, "kxy": 0, "ky": 0, "kyx": 0, "px": 0, "py": 1.5},
{"kx": 1.5, "kxy": 1.5, "ky": 1.5, "kyx": 1.5, "px": 1.5, "py": 1.5}]
inputs = [[1e-6, 1e-6], [1, 0], [-1, 0], [1.5, 0], [-1.5, 0], [0, 1], [0, -1], [0, 1.5], [0, -1.5], [1, 1]]
for intrins_coef in intrins_coefs:
model = self.Class(**intrins_coef)
with self.subTest(**intrins_coef):
for inp in inputs:
num = num_deriv(inp, model)
ana = model._compute_dpixel_dintrinsic(np.array(inp))
np.testing.assert_allclose(num, ana, atol=1e-10)
def test__compute_ddistorted_gnomic_ddistortion(self):
def num_deriv(loc, cmodel, delta=1e-8) -> np.ndarray:
model_pert = cmodel.copy()
model_pert.radial2 += delta
loc_pert_r2_f = model_pert.apply_distortion(loc)
model_pert = cmodel.copy()
model_pert.radial4 += delta
loc_pert_r4_f = model_pert.apply_distortion(loc)
model_pert = cmodel.copy()
model_pert.tangential_y += delta
loc_pert_ty_f = model_pert.apply_distortion(loc)
model_pert = cmodel.copy()
model_pert.tangential_x += delta
loc_pert_tx_f = model_pert.apply_distortion(loc)
model_pert = cmodel.copy()
model_pert.pinwheel1 += delta
loc_pert_p1_f = model_pert.apply_distortion(loc)
model_pert = cmodel.copy()
model_pert.pinwheel2 += delta
loc_pert_p2_f = model_pert.apply_distortion(loc)
model_pert = cmodel.copy()
model_pert.radial2 -= delta
loc_pert_r2_b = model_pert.apply_distortion(loc)
model_pert = cmodel.copy()
model_pert.radial4 -= delta
loc_pert_r4_b = model_pert.apply_distortion(loc)
model_pert = cmodel.copy()
model_pert.tangential_y -= delta
loc_pert_ty_b = model_pert.apply_distortion(loc)
model_pert = cmodel.copy()
model_pert.tangential_x -= delta
loc_pert_tx_b = model_pert.apply_distortion(loc)
model_pert = cmodel.copy()
model_pert.pinwheel1 -= delta
loc_pert_p1_b = model_pert.apply_distortion(loc)
model_pert = cmodel.copy()
model_pert.pinwheel2 -= delta
loc_pert_p2_b = model_pert.apply_distortion(loc)
return np.array([(loc_pert_r2_f - loc_pert_r2_b) / (2 * delta),
(loc_pert_r4_f - loc_pert_r4_b) / (2 * delta),
(loc_pert_ty_f - loc_pert_ty_b) / (2 * delta),
(loc_pert_tx_f - loc_pert_tx_b) / (2 * delta),
(loc_pert_p1_f - loc_pert_p1_b) / (2 * delta),
(loc_pert_p2_f - loc_pert_p2_b) / (2 * delta)]).T
dist_coefs = [{"radial2": 1.5, "radial4": 0, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 1.5, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 1.5, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 0, "tangential_y": 1.5,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 1.5, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 1.5}]
inputs = [[1e-6, 1e-6], [1, 0], [-1, 0], [1.5, 0], [-1.5, 0], [0, 1], [0, -1], [0, 1.5], [0, -1.5], [1, 1]]
for dist_coef in dist_coefs:
model = self.Class(**dist_coef)
with self.subTest(**dist_coef):
for inp in inputs:
r = np.sqrt(inp[0] ** 2 + inp[1] ** 2)
r2 = r ** 2
r3 = r ** 3
r4 = r ** 4
num = num_deriv(inp, model)
ana = model._compute_ddistorted_gnomic_ddistortion(np.array(inp), r, r2, r3, r4)
np.testing.assert_allclose(num, ana, atol=1e-10)
def test_get_jacobian_row(self):
def num_deriv(loc, cmodel, delta=1e-8, image=0, temperature=0) -> np.ndarray:
model_pert = cmodel.copy()
model_pert.focal_length += delta
pix_pert_f_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.focal_length -= delta
pix_pert_f_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.kx += delta
pix_pert_kx_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.kxy += delta
pix_pert_kxy_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.kyx += delta
pix_pert_kyx_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.ky += delta
pix_pert_ky_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.px += delta
pix_pert_px_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.py += delta
pix_pert_py_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.kx -= delta
pix_pert_kx_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.kxy -= delta
pix_pert_kxy_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.kyx -= delta
pix_pert_kyx_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.ky -= delta
pix_pert_ky_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.px -= delta
pix_pert_px_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.py -= delta
pix_pert_py_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.radial2 += delta
pix_pert_r2_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.radial4 += delta
pix_pert_r4_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.tangential_y += delta
pix_pert_ty_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.tangential_x += delta
pix_pert_tx_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.pinwheel1 += delta
pix_pert_p1_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.pinwheel2 += delta
pix_pert_p2_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.radial2 -= delta
pix_pert_r2_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.radial4 -= delta
pix_pert_r4_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.tangential_y -= delta
pix_pert_ty_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.tangential_x -= delta
pix_pert_tx_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.pinwheel1 -= delta
pix_pert_p1_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.pinwheel2 -= delta
pix_pert_p2_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a1 += delta
pix_pert_a1_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a2 += delta
pix_pert_a2_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a3 += delta
pix_pert_a3_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a1 -= delta
pix_pert_a1_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a2 -= delta
pix_pert_a2_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a3 -= delta
pix_pert_a3_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
delta_m = 1e-6
model_pert = cmodel.copy()
model_pert.misalignment[image][0] += delta_m
pix_pert_mx_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][1] += delta_m
pix_pert_my_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][2] += delta_m
pix_pert_mz_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][0] -= delta_m
pix_pert_mx_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][1] -= delta_m
pix_pert_my_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][2] -= delta_m
pix_pert_mz_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
return np.vstack([(pix_pert_f_f - pix_pert_f_b) / (delta * 2),
(pix_pert_kx_f - pix_pert_kx_b) / (delta * 2),
(pix_pert_kxy_f - pix_pert_kxy_b) / (delta * 2),
(pix_pert_kyx_f - pix_pert_kyx_b) / (delta * 2),
(pix_pert_ky_f - pix_pert_ky_b) / (delta * 2),
(pix_pert_px_f - pix_pert_px_b) / (delta * 2),
(pix_pert_py_f - pix_pert_py_b) / (delta * 2),
(pix_pert_r2_f - pix_pert_r2_b) / (delta * 2),
(pix_pert_r4_f - pix_pert_r4_b) / (delta * 2),
(pix_pert_ty_f - pix_pert_ty_b) / (delta * 2),
(pix_pert_tx_f - pix_pert_tx_b) / (delta * 2),
(pix_pert_p1_f - pix_pert_p1_b) / (delta * 2),
(pix_pert_p2_f - pix_pert_p2_b) / (delta * 2),
(pix_pert_a1_f - pix_pert_a1_b) / (delta * 2),
(pix_pert_a2_f - pix_pert_a2_b) / (delta * 2),
(pix_pert_a3_f - pix_pert_a3_b) / (delta * 2),
np.zeros((image * 3, 2)),
(pix_pert_mx_f - pix_pert_mx_b) / (delta_m * 2),
(pix_pert_my_f - pix_pert_my_b) / (delta_m * 2),
(pix_pert_mz_f - pix_pert_mz_b) / (delta_m * 2)]).T
model_param = {"focal_length": 100.75, "radial2": 1.5e-5, "radial4": 1.5e-5, "tangential_x": 1.5e-5,
"tangential_y": 1.5e-5, "pinwheel1": 1.5e-5, "pinwheel2": 1.5e-5, "kx": 30, "ky": 40,
"kxy": 0.5, "kyx": -0.8, "px": 4005.23, "py": 4005.23,
"misalignment": [[1e-8, 1e-9, 1e-10], [-1e-8, 2e-9, -1e-11]],
"a1": 1e-6, "a2": 2e-7, "a3": 3e-8}
inputs = [[0.1, 0, 1], [0, 0.1, 1], [0.1, 0.1, 1], [-0.1, 0, 1], [0, -0.1, 1], [-0.1, -0.1, 1],
[5, 10, 1000.23], [[1], [2], [1200.23]]]
temps = [0, 1, -1, 10.5, -10.5]
model = self.Class(**model_param)
model.estimate_multiple_misalignments = True
for temp in temps:
for inp in inputs:
with self.subTest(temp=temp, inp=inp):
num = num_deriv(inp, model, delta=1e-3, temperature=temp)
ana = model._get_jacobian_row(np.array(inp), 0, 1, temperature=temp)
np.testing.assert_allclose(ana, num, rtol=1e-3, atol=1e-10)
num = num_deriv(inp, model, delta=1e-3, image=1, temperature=temp)
ana = model._get_jacobian_row(np.array(inp), 1, 2, temperature=temp)
np.testing.assert_allclose(ana, num, atol=1e-10, rtol=1e-3)
def test_compute_jacobian(self):
def num_deriv(loc, cmodel, delta=1e-8, image=0, nimages=1, temperature=0) -> np.ndarray:
model_pert = cmodel.copy()
model_pert.focal_length += delta
pix_pert_f_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.focal_length -= delta
pix_pert_f_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.kx += delta
pix_pert_kx_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.kxy += delta
pix_pert_kxy_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.kyx += delta
pix_pert_kyx_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.ky += delta
pix_pert_ky_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.px += delta
pix_pert_px_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.py += delta
pix_pert_py_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.kx -= delta
pix_pert_kx_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.kxy -= delta
pix_pert_kxy_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.kyx -= delta
pix_pert_kyx_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.ky -= delta
pix_pert_ky_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.px -= delta
pix_pert_px_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.py -= delta
pix_pert_py_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.radial2 += delta
pix_pert_r2_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.radial4 += delta
pix_pert_r4_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.tangential_y += delta
pix_pert_ty_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.tangential_x += delta
pix_pert_tx_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.pinwheel1 += delta
pix_pert_p1_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.pinwheel2 += delta
pix_pert_p2_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.radial2 -= delta
pix_pert_r2_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.radial4 -= delta
pix_pert_r4_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.tangential_y -= delta
pix_pert_ty_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.tangential_x -= delta
pix_pert_tx_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.pinwheel1 -= delta
pix_pert_p1_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.pinwheel2 -= delta
pix_pert_p2_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a1 += delta
pix_pert_a1_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a2 += delta
pix_pert_a2_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a3 += delta
pix_pert_a3_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a1 -= delta
pix_pert_a1_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a2 -= delta
pix_pert_a2_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a3 -= delta
pix_pert_a3_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][0] += delta
pix_pert_mx_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][1] += delta
pix_pert_my_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][2] += delta
pix_pert_mz_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][0] -= delta
pix_pert_mx_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][1] -= delta
pix_pert_my_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][2] -= delta
pix_pert_mz_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
return np.vstack([(pix_pert_f_f - pix_pert_f_b) / (delta * 2),
(pix_pert_kx_f - pix_pert_kx_b) / (delta * 2),
(pix_pert_kxy_f - pix_pert_kxy_b) / (delta * 2),
(pix_pert_kyx_f - pix_pert_kyx_b) / (delta * 2),
(pix_pert_ky_f - pix_pert_ky_b) / (delta * 2),
(pix_pert_px_f - pix_pert_px_b) / (delta * 2),
(pix_pert_py_f - pix_pert_py_b) / (delta * 2),
(pix_pert_r2_f - pix_pert_r2_b) / (delta * 2),
(pix_pert_r4_f - pix_pert_r4_b) / (delta * 2),
(pix_pert_ty_f - pix_pert_ty_b) / (delta * 2),
(pix_pert_tx_f - pix_pert_tx_b) / (delta * 2),
(pix_pert_p1_f - pix_pert_p1_b) / (delta * 2),
(pix_pert_p2_f - pix_pert_p2_b) / (delta * 2),
(pix_pert_a1_f - pix_pert_a1_b) / (delta * 2),
(pix_pert_a2_f - pix_pert_a2_b) / (delta * 2),
(pix_pert_a3_f - pix_pert_a3_b) / (delta * 2),
np.zeros((image * 3, 2)),
(pix_pert_mx_f - pix_pert_mx_b) / (delta * 2),
(pix_pert_my_f - pix_pert_my_b) / (delta * 2),
(pix_pert_mz_f - pix_pert_mz_b) / (delta * 2),
np.zeros(((nimages - image - 1) * 3, 2))]).T
model_param = {"focal_length": 100.75, "radial2": 1.5e-5, "radial4": 1.5e-5, "tangential_x": 1.5e-5,
"tangential_y": 1.5e-5, "pinwheel1": 1.5e-5, "pinwheel2": 1.5e-5, "kx": 30, "ky": 40,
"kxy": 0.5, "kyx": -0.8, "px": 4005.23, "py": 4005.23,
"misalignment": [[1e-8, 1e-9, 1e-10], [-1e-8, 2e-9, -1e-11], [2e-10, -5e-12, 1e-9]],
"a1": 1e-6, "a2": 2e-7, "a3": 3e-8}
inputs = [np.array([[0.5, 0, 1]]).T,
np.array([[0, 0.5, 1], [0.5, 0.5, 1], [-0.5, 0, 1]]).T,
np.array([[0.1, -0.5, 1], [-0.5, -0.5, 1], [5, 10, 1000.23], [1, 2, 1200.23]]).T]
temperatures = [0, 1, -1, 10.5, -10.5, [1, -10, 10]]
model = self.Class(**model_param, estimation_parameters=['intrinsic', 'temperature dependence',
'multiple misalignments'])
for temp in temperatures:
with self.subTest(temp=temp):
model.use_a_priori = False
jac_ana = model.compute_jacobian(inputs, temperature=temp)
jac_num = []
numim = len(inputs)
for ind, inp in enumerate(inputs):
if isinstance(temp, list):
templ = temp[ind]
else:
templ = temp
for vec in inp.T:
jac_num.append(num_deriv(vec.T, model, delta=1e-4, image=ind, nimages=numim, temperature=templ))
np.testing.assert_allclose(jac_ana, np.vstack(jac_num), rtol=1e-3, atol=1e-10)
model.use_a_priori = True
jac_ana = model.compute_jacobian(inputs, temperature=temp)
jac_num = []
numim = len(inputs)
for ind, inp in enumerate(inputs):
if isinstance(temp, list):
templ = temp[ind]
else:
templ = temp
for vec in inp.T:
jac_num.append(num_deriv(vec.T, model, delta=1e-4, image=ind, nimages=numim, temperature=templ))
jac_num = np.vstack(jac_num)
jac_num = np.pad(jac_num, [(0, jac_num.shape[1]), (0, 0)], 'constant', constant_values=0)
jac_num[-jac_num.shape[1]:] = np.eye(jac_num.shape[1])
np.testing.assert_allclose(jac_ana, jac_num, rtol=1e-3, atol=1e-10)
def test_apply_update(self):
model_param = {"focal_length": 0, "radial2": 0, "radial4": 0, "tangential_x": 0,
"tangential_y": 0, "pinwheel1": 0, "pinwheel2": 0, "kx": 0, "ky": 0,
"kxy": 0, "kyx": 0, "px": 0,
"misalignment": [[0, 0, 0], [0, 0, 0]],
"a1": 0, "a2": 0, "a3": 0}
model = self.Class(**model_param, estimation_parameters=['intrinsic', "temperature dependence",
'multiple misalignments'])
update_vec = np.arange(22)
model.apply_update(update_vec)
keys = list(model_param.keys())
keys.remove('misalignment')
for key in keys:
self.assertEqual(getattr(model, key), update_vec[model.element_dict[key][0]])
for ind, vec in enumerate(update_vec[16:].reshape(-1, 3)):
np.testing.assert_array_almost_equal(at.Rotation(vec).q, at.Rotation(model.misalignment[ind]).q)
def test_pixels_to_gnomic(self):
intrins_param = {"kx": 3000, "ky": 4000, "kxy": 0.5, "kyx": -0.8, "px": 4005.23, 'py': 2000.33,
'a1': 1e-6, 'a2': 1e-7, 'a3': 1e-8}
dist_coefs = [{"radial2": 1.5e-3, "radial4": 0, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 1.5e-3, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 1.5e-3, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 0, "tangential_y": 1.5e-3,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 1.5e-3, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 1.5e-3},
{"radial2": 1.5e-3, "radial4": 1.5e-3, "tangential_x": 1.5e-3, "tangential_y": 1.5e-3,
"pinwheel1": 1.5e-3, "pinwheel2": 1.5e-3}]
pinhole = [[0, 0], [1, 0], [-1, 0], [1.5, 0], [-1.5, 0], [[1.5], [0]], [[1.5, -1], [0, 0]],
[0, 1], [0, -1], [0, 1.5], [0, -1.5], [[0], [1.5]], [[0, 0], [1.5, -1]], [1, 1]]
temperatures = [0, 1, -1, 10.5, -10.5]
for temp in temperatures:
for dist in dist_coefs:
model = self.Class(**dist, **intrins_param)
for gnoms in pinhole:
with self.subTest(**dist, temp=temp, gnoms=gnoms):
mm_dist = model.apply_distortion(np.array(gnoms))
mm_dist *= model.get_temperature_scale(temp)
pix_dist = ((model.intrinsic_matrix[:, :2] @ mm_dist).T + model.intrinsic_matrix[:, 2]).T
mm_undist = model.pixels_to_gnomic(pix_dist, temperature=temp)
np.testing.assert_allclose(mm_undist, gnoms, atol=1e-13)
def test_undistort_pixels(self):
intrins_param = {"kx": 3000, "ky": 4000, "kxy": 0.5, "kyx": -0.8, "px": 4005.23, 'py': 2000.33,
"a1": 1e-3, "a2": 1e-4, "a3": 1e-5}
dist_coefs = [{"radial2": 1.5e-3, "radial4": 0, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 1.5e-3, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 1.5e-3, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 0, "tangential_y": 1.5e-3,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 1.5e-3, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 1.5e-3},
{"radial2": 1.5e-3, "radial4": 1.5e-3, "tangential_x": 1.5e-3, "tangential_y": 1.5e-3,
"pinwheel1": 1.5e-3, "pinwheel2": 1.5e-3}]
pinhole = [[0, 0], [1, 0], [-1, 0], [1.5, 0], [-1.5, 0], [[1.5], [0]], [[1.5, -1], [0, 0]],
[0, 1], [0, -1], [0, 1.5], [0, -1.5], [[0], [1.5]], [[0, 0], [1.5, -1]], [1, 1]]
temperatures = [0, 1, -1, 10.5, -10.5]
for temp in temperatures:
for dist in dist_coefs:
model = self.Class(**dist, **intrins_param)
for gnoms in pinhole:
with self.subTest(**dist, temp=temp, gnoms=gnoms):
gnoms = np.array(gnoms).astype(np.float64)
mm_dist = model.apply_distortion(np.array(gnoms))
mm_dist *= model.get_temperature_scale(temp)
pix_dist = ((model.intrinsic_matrix[:, :2] @ mm_dist).T + model.intrinsic_matrix[:, 2]).T
pix_undist = model.undistort_pixels(pix_dist, temperature=temp)
gnoms *= model.get_temperature_scale(temp)
pix_pinhole = ((model.intrinsic_matrix[:, :2] @ gnoms).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_allclose(pix_undist, pix_pinhole, atol=1e-13)
def test_pixels_to_unit(self):
intrins_param = {"focal_length": 32.7, "kx": 3000, "ky": 4000, "kxy": 0.5, "kyx": -0.8,
"px": 4005.23, 'py': 2000.33, "a1": 1e-6, "a2": 1e-7, "a3": -3e-5}
dist_coefs = [{"radial2": 1.5e-3, "radial4": 0, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 1.5e-3, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 1.5e-3, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 0, "tangential_y": 1.5e-3,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 1.5e-3, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 1.5e-3},
{"radial2": 1.5e-3, "radial4": 1.5e-3, "tangential_x": 1.5e-3, "tangential_y": 1.5e-3,
"pinwheel1": 1.5e-3, "pinwheel2": 1.5e-3},
{"misalignment": np.array([1e-11, 2e-12, -1e-10])},
{"misalignment": np.array([[1e-11, 2e-12, -1e-10], [-1e-13, 1e-11, 2e-12]]),
"estimation_parameters": "multiple misalignments"}]
camera_vecs = [[0, 0, 1], [0.01, 0, 1], [-0.01, 0, 1], [0, 0.01, 1], [0, -0.01, 1], [0.01, 0.01, 1],
[-0.01, -0.01, 1], [[0.01, -0.01], [-0.01, 0.01], [1, 1]]]
temperatures = [0, 1, -1, 10.5, -10.5]
for temp in temperatures:
for dist in dist_coefs:
model = self.Class(**dist, **intrins_param)
for vec in camera_vecs:
with self.subTest(**dist, temp=temp, vec=vec):
pixel_loc = model.project_onto_image(vec, image=-1, temperature=temp)
unit_vec = model.pixels_to_unit(pixel_loc, image=-1, temperature=temp)
unit_true = np.array(vec).astype(np.float64)
unit_true /= np.linalg.norm(unit_true, axis=0, keepdims=True)
np.testing.assert_allclose(unit_vec, unit_true, atol=1e-13)
def test_overwrite(self):
model1 = self.Class(field_of_view=10, intrinsic_matrix=np.array([[1, 2, 3], [4, 5, 6]]),
distortion_coefficients=np.array([1, 2, 3, 4, 5, 6]), focal_length=60,
misalignment=[[1, 2, 3], [4, 5, 6]], use_a_priori=False,
estimation_parameters=['multiple misalignments'], a1=0, a2=3, a3=5)
model2 = self.Class(field_of_view=20, intrinsic_matrix=np.array([[11, 12, 13], [14, 15, 16]]),
distortion_coefficients=np.array([11, 12, 13, 14, 15, 16]), focal_length=160,
misalignment=[[11, 12, 13], [14, 15, 16]], use_a_priori=True,
estimation_parameters=['single misalignment'], a1=-100, a2=-200, a3=-300)
modeltest = model1.copy()
modeltest.overwrite(model2)
self.assertEqual(modeltest, model2)
modeltest = model2.copy()
modeltest.overwrite(model1)
self.assertEqual(modeltest, model1)
def test_intrinsic_matrix_inv(self):
model = self.Class(kx=5, ky=10, kxy=20, kyx=-30.4, px=100, py=-5)
np.testing.assert_array_almost_equal(
model.intrinsic_matrix @ np.vstack([model.intrinsic_matrix_inv, [0, 0, 1]]),
[[1, 0, 0], [0, 1, 0]])
np.testing.assert_array_almost_equal(
model.intrinsic_matrix_inv @ np.vstack([model.intrinsic_matrix, [0, 0, 1]]),
[[1, 0, 0], [0, 1, 0]])
def test_distort_pixels(self):
model = self.Class(kx=1000, ky=-950.5, px=4500, py=139.32, a1=1e-3, a2=1e-4, a3=1e-5,
kxy=0.5, kyx=-8, radial2=1e-5, radial4=1e-5, pinwheel2=1e-7, pinwheel1=-1e-12,
tangential_x=1e-6, tangential_y=2e-12)
pixels = [[0, 1], [1, 0], [-1, 0], [0, -1], [9000., 200.2],
[[4500, 100, 10.98], [0, 139.23, 200.3]]]
temperatures = [0, 1, -1, 10.5, -10.5]
for pix in pixels:
for temp in temperatures:
with self.subTest(pix=pix, temp=temp):
undist_pix = model.undistort_pixels(pix, temperature=temp)
dist_pix = model.distort_pixels(undist_pix, temperature=temp)
np.testing.assert_allclose(dist_pix, pix, atol=1e-10)
def test_distortion_map(self):
model = self.Class(kx=100, ky=-985.234, px=1000, py=1095, kxy=10, kyx=-5,
e1=1e-6, e2=1e-12, e3=-4e-10, e5=6e-7, e6=-1e-5, e4=1e-7,
a1=1e-6, a2=-1e-7, a3=4e-12)
rows, cols, dist = model.distortion_map((2000, 250), step=10)
rl, cl = np.arange(0, 2000, 10), np.arange(0, 250, 10)
rs, cs = np.meshgrid(rl, cl, indexing='ij')
np.testing.assert_array_equal(rows, rs)
np.testing.assert_array_equal(cols, cs)
distl = model.distort_pixels(np.vstack([cs.flatten(), rs.flatten()]))
np.testing.assert_array_equal(distl - np.vstack([cs.flatten(), rs.flatten()]), dist)
class TestBrownModel(TestPinholeModel):
def setUp(self):
self.Class = BrownModel
# Not supported for this model
test__compute_dgnomic_dfocal_length = None
def test___init__(self):
model = self.Class(intrinsic_matrix=np.array([[1, 2, 3], [4, 5, 6]]), field_of_view=20.5,
use_a_priori=True, misalignment=[np.zeros(3), np.ones(3)],
distortion_coefficients=np.array([1, 2, 3, 4, 5]),
estimation_parameters='basic intrinsic',
a1=1, a2=2, a3=3)
np.testing.assert_array_equal(model.intrinsic_matrix, [[1, 2, 3], [4, 5, 6]])
self.assertEqual(model.focal_length, 1)
self.assertEqual(model.field_of_view, 20.5)
self.assertTrue(model.use_a_priori)
self.assertEqual(model.estimation_parameters, ['basic intrinsic'])
self.assertEqual(model.a1, 1)
self.assertEqual(model.a2, 2)
self.assertEqual(model.a3, 3)
np.testing.assert_array_equal(model.distortion_coefficients, np.arange(1, 6))
model = self.Class(kx=1, fy=2, px=4, py=5, field_of_view=20.5,
use_a_priori=True, misalignment=[np.zeros(3), np.ones(3)], kxy=80,
estimation_parameters=['kx', 'px'], n_rows=500, n_cols=600,
radial2=1, radial4=2, k3=3, p1=4, tiptilt_x=5)
np.testing.assert_array_equal(model.intrinsic_matrix, [[1, 80, 4], [0, 2, 5]])
self.assertEqual(model.focal_length, 1)
self.assertEqual(model.field_of_view, 20.5)
self.assertTrue(model.use_a_priori)
self.assertEqual(model.estimation_parameters, ['kx', 'px'])
self.assertEqual(model.n_rows, 500)
self.assertEqual(model.n_cols, 600)
np.testing.assert_array_equal(model.distortion_coefficients, np.arange(1, 6))
def test_fx(self):
model = self.Class(intrinsic_matrix=np.array([[1, 0, 0], [0, 0, 0]]))
self.assertEqual(model.kx, 1)
model.kx = 100
self.assertEqual(model.kx, 100)
self.assertEqual(model.intrinsic_matrix[0, 0], 100)
def test_fy(self):
model = self.Class(intrinsic_matrix=np.array([[0, 0, 0], [0, 1, 0]]))
self.assertEqual(model.ky, 1)
model.ky = 100
self.assertEqual(model.ky, 100)
self.assertEqual(model.intrinsic_matrix[1, 1], 100)
def test_kxy(self):
model = self.Class(intrinsic_matrix=np.array([[0, 1, 0], [0, 0, 0]]))
self.assertEqual(model.kxy, 1)
model.kxy = 100
self.assertEqual(model.kxy, 100)
self.assertEqual(model.intrinsic_matrix[0, 1], 100)
def test_alpha(self):
model = self.Class(intrinsic_matrix=np.array([[0, 1, 0], [0, 0, 0]]))
self.assertEqual(model.alpha, 1)
model.alpha = 100
self.assertEqual(model.alpha, 100)
self.assertEqual(model.intrinsic_matrix[0, 1], 100)
def test_k1(self):
model = self.Class(distortion_coefficients=np.array([1, 0, 0, 0, 0]))
self.assertEqual(model.k1, 1)
model.k1 = 100
self.assertEqual(model.k1, 100)
self.assertEqual(model.distortion_coefficients[0], 100)
def test_k2(self):
model = self.Class(distortion_coefficients=np.array([0, 1, 0, 0, 0]))
self.assertEqual(model.k2, 1)
model.k2 = 100
self.assertEqual(model.k2, 100)
self.assertEqual(model.distortion_coefficients[1], 100)
def test_k3(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 1, 0, 0]))
self.assertEqual(model.k3, 1)
model.k3 = 100
self.assertEqual(model.k3, 100)
self.assertEqual(model.distortion_coefficients[2], 100)
def test_p1(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 0, 1, 0]))
self.assertEqual(model.p1, 1)
model.p1 = 100
self.assertEqual(model.p1, 100)
self.assertEqual(model.distortion_coefficients[3], 100)
def test_p2(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 0, 0, 1]))
self.assertEqual(model.p2, 1)
model.p2 = 100
self.assertEqual(model.p2, 100)
self.assertEqual(model.distortion_coefficients[4], 100)
def test_radial2(self):
model = self.Class(distortion_coefficients=np.array([1, 0, 0, 0, 0]))
self.assertEqual(model.radial2, 1)
model.radial2 = 100
self.assertEqual(model.radial2, 100)
self.assertEqual(model.distortion_coefficients[0], 100)
def test_radial4(self):
model = self.Class(distortion_coefficients=np.array([0, 1, 0, 0, 0]))
self.assertEqual(model.radial4, 1)
model.radial4 = 100
self.assertEqual(model.radial4, 100)
self.assertEqual(model.distortion_coefficients[1], 100)
def test_radial6(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 1, 0, 0]))
self.assertEqual(model.radial6, 1)
model.radial6 = 100
self.assertEqual(model.radial6, 100)
self.assertEqual(model.distortion_coefficients[2], 100)
def test_tiptilt_y(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 0, 1, 0]))
self.assertEqual(model.tiptilt_y, 1)
model.tiptilt_y = 100
self.assertEqual(model.tiptilt_y, 100)
self.assertEqual(model.distortion_coefficients[3], 100)
def test_tiptilt_x(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 0, 0, 1]))
self.assertEqual(model.tiptilt_x, 1)
model.tiptilt_x = 100
self.assertEqual(model.tiptilt_x, 100)
self.assertEqual(model.distortion_coefficients[4], 100)
def test_apply_distortion(self):
dist_coefs = [{"k1": 1.5, "k2": 0, "k3": 0, "p1": 0, "p2": 0},
{"k1": 0, "k2": 1.5, "k3": 0, "p1": 0, "p2": 0},
{"k1": 0, "k2": 0, "k3": 1.5, "p1": 0, "p2": 0},
{"k1": 0, "k2": 0, "k3": 0, "p1": 1.5, "p2": 0},
{"k1": 0, "k2": 0, "k3": 0, "p1": 0, "p2": 1.5}]
inputs = [[0, 0], [1, 0], [-1, 0], [1.5, 0], [-1.5, 0], [[1.5], [0]], [[1.5, -1], [0, 0]],
[0, 1], [0, -1], [0, 1.5], [0, -1.5], [[0], [1.5]], [[0, 0], [1.5, -1]], [1, 1]]
solus = [[[0, 0], [2.5, 0], [-2.5, 0], [1.5 + 1.5 ** 4, 0], [-(1.5 + 1.5 ** 4), 0],
[[(1.5 + 1.5 ** 4)], [0]], [[(1.5 + 1.5 ** 4), -2.5], [0, 0]],
[0, 2.5], [0, -2.5], [0, (1.5 + 1.5 ** 4)], [0, -(1.5 + 1.5 ** 4)], [[0], [(1.5 + 1.5 ** 4)]],
[[0, 0], [(1.5 + 1.5 ** 4), -2.5]], [1 + 2 * 1.5, 1 + 2 * 1.5]],
[[0, 0], [2.5, 0], [-2.5, 0], [(1.5 + 1.5 ** 6), 0], [-(1.5 + 1.5 ** 6), 0],
[[(1.5 + 1.5 ** 6)], [0]], [[(1.5 + 1.5 ** 6), -2.5], [0, 0]],
[0, 2.5], [0, -2.5], [0, (1.5 + 1.5 ** 6)], [0, -(1.5 + 1.5 ** 6)], [[0], [(1.5 + 1.5 ** 6)]],
[[0, 0], [(1.5 + 1.5 ** 6), -2.5]], [1 + 4 * 1.5, 1 + 4 * 1.5]],
[[0, 0], [2.5, 0], [-2.5, 0], [(1.5 + 1.5 ** 8), 0], [-(1.5 + 1.5 ** 8), 0],
[[(1.5 + 1.5 ** 8)], [0]], [[(1.5 + 1.5 ** 8), -2.5], [0, 0]],
[0, 2.5], [0, -2.5], [0, (1.5 + 1.5 ** 8)], [0, -(1.5 + 1.5 ** 8)], [[0], [(1.5 + 1.5 ** 8)]],
[[0, 0], [(1.5 + 1.5 ** 8), -2.5]], [1 + 8 * 1.5, 1 + 8 * 1.5]],
[[0, 0], [1, 1.5], [-1, 1.5], [1.5, 1.5 ** 3], [-1.5, 1.5 ** 3], [[1.5], [1.5 ** 3]],
[[1.5, -1], [1.5 ** 3, 1.5]],
[0, 1 + 3 * 1.5], [0, -1 + 3 * 1.5], [0, 1.5 + 3 * 1.5 ** 3], [0, -1.5 + 3 * 1.5 ** 3],
[[0], [1.5 + 3 * 1.5 ** 3]],
[[0, 0], [1.5 + 3 * 1.5 ** 3, -1 + 3 * 1.5]], [1 + 2 * 1.5, 1 + 4 * 1.5]],
[[0, 0], [1 + 3 * 1.5, 0], [-1 + 3 * 1.5, 0], [1.5 + 3 * 1.5 ** 3, 0], [-1.5 + 3 * 1.5 ** 3, 0],
[[1.5 + 3 * 1.5 ** 3], [0]], [[1.5 + 3 * 1.5 ** 3, -1 + 3 * 1.5], [0, 0]],
[1.5, 1], [1.5, -1], [1.5 ** 3, 1.5], [1.5 ** 3, -1.5], [[1.5 ** 3], [1.5]],
[[1.5 ** 3, 1.5], [1.5, -1]],
[1 + 4 * 1.5, 1 + 2 * 1.5]]]
for dist, sols in zip(dist_coefs, solus):
with self.subTest(**dist):
model = self.Class(**dist)
for inp, solu in zip(inputs, sols):
gnom_dist = model.apply_distortion(np.array(inp))
np.testing.assert_array_almost_equal(gnom_dist, solu)
def test_get_projections(self):
points = [[0, 0, 1], [-0.1, 0.2, 2.2], [[-0.1], [0.2], [2.2]], [[-0.1, 0], [0.2, 0], [2.2, 1]]]
model = self.Class(fx=4050.5, fy=3050.25, alpha=1.5, px=1500, py=1500.5,
k1=0.5, k2=-0.3, k3=0.15, p1=1e-7, p2=1e-6, a1=1e-1, a2=-1e-6, a3=3e-7)
temps = [0, 1, -1, 10, -10]
for temp in temps:
with self.subTest(misalignment=None, temp=temp):
for point in points:
pin, dist, pix = model.get_projections(point, temperature=temp)
pin_true = np.array(point[:2]) / point[2]
dist_true = model.apply_distortion(pin_true)
dist_true *= model.get_temperature_scale(temp)
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], dist_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_equal(pin, pin_true)
np.testing.assert_array_equal(dist, dist_true)
np.testing.assert_array_equal(pix, pix_true)
model = self.Class(fx=4050.5, fy=3050.25, alpha=1.5, px=1500, py=1500.5,
k1=0.5, k2=-0.3, k3=0.15, p1=1e-7, p2=1e-6, misalignment=[0, 0, np.pi])
with self.subTest(misalignment=[0, 0, np.pi]):
for point in points:
pin, dist, pix = model.get_projections(point)
pin_true = -np.array(point[:2]) / point[2]
dist_true = model.apply_distortion(pin_true)
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], dist_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(pin, pin_true)
np.testing.assert_array_almost_equal(dist, dist_true)
np.testing.assert_array_almost_equal(pix, pix_true)
model = self.Class(fx=4050.5, fy=3050.25, alpha=1.5, px=1500, py=1500.5,
k1=0.5, k2=-0.3, k3=0.15, p1=1e-7, p2=1e-6, misalignment=[np.pi, 0, 0])
with self.subTest(misalignment=[np.pi, 0, 0]):
for point in points:
pin, dist, pix = model.get_projections(point)
pin_true = np.array(point[:2]) / point[2]
pin_true[0] *= -1
dist_true = model.apply_distortion(pin_true)
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], dist_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(pin, pin_true)
np.testing.assert_array_almost_equal(dist, dist_true)
np.testing.assert_array_almost_equal(pix, pix_true)
model = self.Class(fx=4050.5, fy=3050.25, alpha=1.5, px=1500, py=1500.5,
k1=0.5, k2=-0.3, k3=0.15, p1=1e-7, p2=1e-6, misalignment=[0, np.pi, 0])
with self.subTest(misalignment=[0, np.pi, 0]):
for point in points:
pin, dist, pix = model.get_projections(point)
pin_true = np.array(point[:2]) / point[2]
pin_true[1] *= -1
dist_true = model.apply_distortion(pin_true)
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], dist_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(pin, pin_true)
np.testing.assert_array_almost_equal(dist, dist_true)
np.testing.assert_array_almost_equal(pix, pix_true)
model = self.Class(fx=4050.5, fy=3050.25, alpha=1.5, px=1500, py=1500.5,
k1=0.5, k2=-0.3, k3=0.15, p1=1e-7, p2=1e-6, misalignment=[1, 0.2, 0.3])
with self.subTest(misalignment=[1, 0.2, 0.3]):
rot_mat = at.rotvec_to_rotmat([1, 0.2, 0.3]).squeeze()
for point in points:
point_new = rot_mat @ point
pin, dist, pix = model.get_projections(point)
pin_true = np.array(point_new[:2]) / np.array(point_new[2])
dist_true = model.apply_distortion(pin_true)
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], dist_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(pin, pin_true)
np.testing.assert_array_almost_equal(dist, dist_true)
np.testing.assert_array_almost_equal(pix, pix_true)
model = self.Class(fx=4050.5, fy=3050.25, alpha=1.5, px=1500, py=1500.5,
k1=0.5, k2=-0.3, k3=0.15, p1=1e-7, p2=1e-6, misalignment=[[1, 0.2, 0.3], [0, 0, np.pi]])
model.estimate_multiple_misalignments = True
with self.subTest(misalignment=[[1, 0.2, 0.3], [0, 0, np.pi]]):
rot_mat = at.rotvec_to_rotmat([1, 0.2, 0.3]).squeeze()
for point in points:
point_new = rot_mat @ point
pin, dist, pix = model.get_projections(point, image=0)
pin_true = np.array(point_new[:2]) / np.array(point_new[2])
dist_true = model.apply_distortion(pin_true)
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], dist_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(pin, pin_true)
np.testing.assert_array_almost_equal(dist, dist_true)
np.testing.assert_array_almost_equal(pix, pix_true)
pin, dist, pix = model.get_projections(point, image=1)
pin_true = -np.array(point[:2]) / point[2]
dist_true = model.apply_distortion(pin_true)
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], dist_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(pin, pin_true)
np.testing.assert_array_almost_equal(dist, dist_true)
np.testing.assert_array_almost_equal(pix, pix_true)
def test_project_onto_image(self):
points = [[0, 0, 1], [-0.1, 0.2, 2.2], [[-0.1], [0.2], [2.2]], [[-0.1, 0], [0.2, 0], [2.2, 1]]]
model = self.Class(fx=4050.5, fy=3050.25, alpha=1.5, px=1500, py=1500.5,
k1=0.5, k2=-0.3, k3=0.15, p1=1e-7, p2=1e-6, a1=1, a2=2, a3=-3)
temps = [0, 1, -1, 10, -10]
for temp in temps:
with self.subTest(temp=temp, misalignment=None):
for point in points:
_, __, pix = model.get_projections(point, temperature=temp)
pix_proj = model.project_onto_image(point, temperature=temp)
np.testing.assert_array_equal(pix, pix_proj)
model = self.Class(fx=4050.5, fy=3050.25, alpha=1.5, px=1500, py=1500.5,
k1=0.5, k2=-0.3, k3=0.15, p1=1e-7, p2=1e-6, misalignment=[[1, 0.2, 0.3], [0, 0, np.pi]])
model.estimate_multiple_misalignments = True
with self.subTest(misalignment=[[1, 0.2, 0.3], [0, 0, np.pi]]):
for point in points:
_, __, pix = model.get_projections(point, image=0)
pix_proj = model.project_onto_image(point, image=0)
np.testing.assert_array_equal(pix, pix_proj)
_, __, pix = model.get_projections(point, image=1)
pix_proj = model.project_onto_image(point, image=1)
np.testing.assert_array_equal(pix, pix_proj)
def test_compute_pixel_jacobian(self):
def num_deriv(uvec, cmodel, delta=1e-8, image=0, temperature=0) -> np.ndarray:
uvec = np.array(uvec).reshape(3, -1)
pix_true = cmodel.project_onto_image(uvec, image=image, temperature=temperature)
uvec_pert = uvec + [[delta], [0], [0]]
pix_pert_x_f = cmodel.project_onto_image(uvec_pert, image=image, temperature=temperature)
uvec_pert = uvec + [[0], [delta], [0]]
pix_pert_y_f = cmodel.project_onto_image(uvec_pert, image=image, temperature=temperature)
uvec_pert = uvec + [[0], [0], [delta]]
pix_pert_z_f = cmodel.project_onto_image(uvec_pert, image=image, temperature=temperature)
uvec_pert = uvec - [[delta], [0], [0]]
pix_pert_x_b = cmodel.project_onto_image(uvec_pert, image=image, temperature=temperature)
uvec_pert = uvec - [[0], [delta], [0]]
pix_pert_y_b = cmodel.project_onto_image(uvec_pert, image=image, temperature=temperature)
uvec_pert = uvec - [[0], [0], [delta]]
pix_pert_z_b = cmodel.project_onto_image(uvec_pert, image=image, temperature=temperature)
return np.array([(pix_pert_x_f-pix_pert_x_b)/(2*delta),
(pix_pert_y_f-pix_pert_y_b)/(2*delta),
(pix_pert_z_f-pix_pert_z_b)/(2*delta)]).swapaxes(0, -1)
inputs = [np.array([[0.5, 0, 1]]).T,
np.array([[0, 0.5, 1], [0.5, 0.5, 1], [-0.5, 0, 1]]).T,
np.array([[0.1, -0.5, 1], [-0.5, -0.5, 1], [5, 10, 1000.23], [1, 2, 1200.23]]).T]
temperatures = [0, 1, -1, 10.5, -10.5]
model = self.Class(fx=4050.5, fy=5050.25, alpha=1.5, px=1500, py=1500.5,
a1=0.15e-7, a2=-0.01e-8, a3=1e-9,
k1=0.5, k2=-0.3, k3=0.15, p1=1e-7, p2=1e-6,
misalignment=[[1e-9, -1e-9, 2e-9], [-1e-9, 2e-9, -1e-9]])
model.estimate_multiple_misalignments = True
for temp in temperatures:
for input in inputs:
for image in range(2):
with self.subTest(image=image, temp=temp, input=input):
jac_ana = model.compute_pixel_jacobian(input, image=image, temperature=temp)
jac_num = num_deriv(input, model, image=image, temperature=temp, delta=1e-2)
np.testing.assert_allclose(jac_ana, jac_num, rtol=1e-3, atol=1e-10)
def test__compute_dcamera_point_dgnomic(self):
def num_deriv(gnomic_locations, cmodel, delta=1e-6) -> np.ndarray:
def g2u(g):
v = np.vstack([g, cmodel.focal_length*np.ones(g.shape[-1])])
return v/np.linalg.norm(v, axis=0, keepdims=True)
gnomic_locations = np.asarray(gnomic_locations).reshape(2, -1)
gnom_pert = gnomic_locations + [[delta], [0]]
cam_loc_pert_x_f = g2u(gnom_pert)
gnom_pert = gnomic_locations + [[0], [delta]]
cam_loc_pert_y_f = g2u(gnom_pert)
gnom_pert = gnomic_locations - [[delta], [0]]
cam_loc_pert_x_b = g2u(gnom_pert)
gnom_pert = gnomic_locations - [[0], [delta]]
cam_loc_pert_y_b = g2u(gnom_pert)
return np.array([(cam_loc_pert_x_f -cam_loc_pert_x_b)/(2*delta),
(cam_loc_pert_y_f -cam_loc_pert_y_b)/(2*delta)]).swapaxes(0, -1)
inputs = [np.array([[0, 0]]).T,
np.array([[0, 2000], [2000, 0], [2000, 2000]]).T,
np.array([[1000, 1000], [1000, 2000], [2000, 1000], [0, 1000], [1000, 0]]).T]
model = self.Class(fx=4050.5, fy=5050.25, alpha=1.5, px=1500, py=1500.5,
a1=0.15e-7, a2=-0.01e-8, a3=1e-9,
k1=0.5, k2=-0.3, k3=0.15, p1=1e-7, p2=1e-6,
misalignment=[[1e-9, -1e-9, 2e-9], [-1e-9, 2e-9, -1e-9]])
model.estimate_multiple_misalignments = True
for input in inputs:
with self.subTest(input=input):
jac_ana = []
for gnom in input.T:
jac_ana.append(
model._compute_dcamera_point_dgnomic(gnom, np.sqrt(np.sum(gnom*gnom) + model.focal_length**2)))
jac_ana = np.array(jac_ana)
jac_num = num_deriv(input, model)
np.testing.assert_almost_equal(jac_ana, jac_num)
def test__compute_dgnomic_ddist_gnomic(self):
def num_deriv(dist_gnomic_locations, cmodel, delta=1e-6) -> np.ndarray:
def dg2g(dg):
gnomic_guess = dg.copy()
# perform the fpa
for _ in np.arange(20):
# get the distorted location assuming the current guess is correct
gnomic_guess_distorted = cmodel.apply_distortion(gnomic_guess)
# subtract off the residual distortion from the gnomic guess
gnomic_guess += dg - gnomic_guess_distorted
# check for convergence
if np.all(np.linalg.norm(gnomic_guess_distorted - dg, axis=0) <= 1e-15):
break
return gnomic_guess
dist_gnomic_locations = np.asarray(dist_gnomic_locations).reshape(2, -1)
dist_gnom_pert = dist_gnomic_locations + [[delta], [0]]
gnom_loc_pert_x_f = dg2g(dist_gnom_pert)
dist_gnom_pert = dist_gnomic_locations + [[0], [delta]]
gnom_loc_pert_y_f = dg2g(dist_gnom_pert)
dist_gnom_pert = dist_gnomic_locations - [[delta], [0]]
gnom_loc_pert_x_b = dg2g(dist_gnom_pert)
dist_gnom_pert = dist_gnomic_locations - [[0], [delta]]
gnom_loc_pert_y_b = dg2g(dist_gnom_pert)
return np.array([(gnom_loc_pert_x_f - gnom_loc_pert_x_b)/(2*delta),
(gnom_loc_pert_y_f - gnom_loc_pert_y_b)/(2*delta)]).swapaxes(0, -1)
inputs = [np.array([[0, 0]]).T,
np.array([[0, 0.1], [0.1, 0], [0.1, 0.1]]).T,
np.array([[-0.1, 0], [0, -0.1], [-0.1, -0.1], [0.1, -0.1], [-0.1, 0.1]]).T]
model = self.Class(fx=4050.5, fy=5050.25, alpha=1.5, px=1500, py=1500.5,
a1=0.15e-7, a2=-0.01e-8, a3=1e-9,
k1=0.5, k2=-0.3, k3=0.15, p1=1e-7, p2=1e-6,
misalignment=[[1e-9, -1e-9, 2e-9], [-1e-9, 2e-9, -1e-9]])
model.estimate_multiple_misalignments = True
for input in inputs:
with self.subTest(input=input):
jac_ana = []
for dist_gnom in input.T:
jac_ana.append(model._compute_dgnomic_ddist_gnomic(dist_gnom))
jac_ana = np.array(jac_ana)
jac_num = num_deriv(input, model, delta=1e-8)
np.testing.assert_allclose(jac_ana, jac_num, rtol=1e-1, atol=1e-10)
def test_compute_unit_vector_jacobian(self):
def num_deriv(pixels, cmodel, delta=1e-6, image=0, temperature=0) -> np.ndarray:
pixels = np.array(pixels).reshape(2, -1)
pix_pert = pixels + [[delta], [0]]
uvec_pert_x_f = cmodel.pixels_to_unit(pix_pert, image=image, temperature=temperature)
pix_pert = pixels + [[0], [delta]]
uvec_pert_y_f = cmodel.pixels_to_unit(pix_pert, image=image, temperature=temperature)
pix_pert = pixels - [[delta], [0]]
uvec_pert_x_b = cmodel.pixels_to_unit(pix_pert, image=image, temperature=temperature)
pix_pert = pixels - [[0], [delta]]
uvec_pert_y_b = cmodel.pixels_to_unit(pix_pert, image=image, temperature=temperature)
return np.array([(uvec_pert_x_f-uvec_pert_x_b)/(2*delta),
(uvec_pert_y_f-uvec_pert_y_b)/(2*delta)]).swapaxes(0, -1)
inputs = [np.array([[0, 0]]).T,
np.array([[0, 2000], [2000, 0], [2000, 2000]]).T/10,
np.array([[1000, 1000], [1000, 2000], [2000, 1000], [0, 1000], [1000, 0]]).T/10]
temperatures = [0, 1, -1, 10.5, -10.5]
model = self.Class(fx=4050.5, fy=5050.25, alpha=1.5, px=100, py=100.5,
a1=0.15e-7, a2=-0.01e-8, a3=1e-9,
k1=0.05, k2=-0.03, k3=0.015, p1=1e-7, p2=1e-6,
misalignment=[[1e-9, -1e-9, 2e-9], [-1e-9, 2e-9, -1e-9]])
model.estimate_multiple_misalignments = True
for temp in temperatures:
for input in inputs:
for image in range(2):
with self.subTest(image=image, temp=temp, input=input):
jac_ana = model.compute_unit_vector_jacobian(input, image=image, temperature=temp)
jac_num = num_deriv(input, model, image=image, temperature=temp, delta=1e-2)
np.testing.assert_allclose(jac_ana, jac_num, rtol=1e-3, atol=1e-10)
def test__compute_ddistorted_gnomic_dgnomic(self):
def num_deriv(loc, cmodel, delta=1e-8) -> np.ndarray:
loc_pert = np.array(loc) + [delta, 0]
dist_pert_x_f = cmodel.apply_distortion(loc_pert)
loc_pert = np.array(loc) + [0, delta]
dist_pert_y_f = cmodel.apply_distortion(loc_pert)
loc_pert = np.array(loc) - [delta, 0]
dist_pert_x_b = cmodel.apply_distortion(loc_pert)
loc_pert = np.array(loc) - [0, delta]
dist_pert_y_b = cmodel.apply_distortion(loc_pert)
return np.array(
[(dist_pert_x_f - dist_pert_x_b) / (2 * delta), (dist_pert_y_f - dist_pert_y_b) / (2 * delta)]).T
dist_coefs = [{"k1": 1.5, "k2": 0, "k3": 0, "p1": 0, "p2": 0},
{"k1": 0, "k2": 1.5, "k3": 0, "p1": 0, "p2": 0},
{"k1": 0, "k2": 0, "k3": 1.5, "p1": 0, "p2": 0},
{"k1": 0, "k2": 0, "k3": 0, "p1": 1.5, "p2": 0},
{"k1": 0, "k2": 0, "k3": 0, "p1": 0, "p2": 1.5},
{"k1": -1.5, "k2": -1.5, "k3": -1.5, "p1": -1.5, "p2": -1.5}]
inputs = [[0, 0], [1, 0], [-1, 0], [1.5, 0], [-1.5, 0], [0, 1], [0, -1], [0, 1.5], [0, -1.5], [1, 1]]
for dist_coef in dist_coefs:
model = self.Class(**dist_coef)
with self.subTest(**dist_coef):
for inp in inputs:
r = np.sqrt(inp[0] ** 2 + inp[1] ** 2)
r2 = r ** 2
r4 = r ** 4
r6 = r ** 6
num = num_deriv(inp, model)
ana = model._compute_ddistorted_gnomic_dgnomic(np.array(inp), r2, r4, r6)
np.testing.assert_allclose(num, ana, atol=1e-14)
def test__compute_dpixel_ddistorted_gnomic(self):
def num_deriv(loc, cmodel, delta=1e-8, temperature=0) -> np.ndarray:
loc_pert = np.array(loc) + [delta, 0]
loc_pert *= cmodel.get_temperature_scale(temperature)
pix_pert_x_f = cmodel.intrinsic_matrix[:, :2] @ loc_pert + cmodel.intrinsic_matrix[:, 2]
loc_pert = np.array(loc) + [0, delta]
loc_pert *= cmodel.get_temperature_scale(temperature)
pix_pert_y_f = cmodel.intrinsic_matrix[:, :2] @ loc_pert + cmodel.intrinsic_matrix[:, 2]
loc_pert = np.array(loc) - [delta, 0]
loc_pert *= cmodel.get_temperature_scale(temperature)
pix_pert_x_b = cmodel.intrinsic_matrix[:, :2] @ loc_pert + cmodel.intrinsic_matrix[:, 2]
loc_pert = np.array(loc) - [0, delta]
loc_pert *= cmodel.get_temperature_scale(temperature)
pix_pert_y_b = cmodel.intrinsic_matrix[:, :2] @ loc_pert + cmodel.intrinsic_matrix[:, 2]
return np.array(
[(pix_pert_x_f - pix_pert_x_b) / (2 * delta), (pix_pert_y_f - pix_pert_y_b) / (2 * delta)]).T
intrins_coefs = [{"fx": 1.5, "fy": 0, "alpha": 0, "px": 0, "py": 0},
{"fx": 0, "fy": 1.5, "alpha": 0, "px": 0, "py": 0},
{"fx": 0, "fy": 0, "alpha": 1.5, "px": 0, "py": 0},
{"fx": 0, "fy": 0, "alpha": 0, "px": 1.5, "py": 0},
{"fx": 0, "fy": 0, "alpha": 0, "px": 0, "py": 1.5},
{"fx": -1.5, "fy": -1.5, "alpha": -1.5, "px": 1.5, "py": 1.5}]
inputs = [[0, 0], [1, 0], [-1, 0], [1.5, 0], [-1.5, 0], [0, 1], [0, -1], [0, 1.5], [0, -1.5], [1, 1]]
temps = [0, 1, -1, 10.5, -10.5]
for temp in temps:
for intrins_coef in intrins_coefs:
model = self.Class(**intrins_coef)
with self.subTest(**intrins_coef, temp=temp):
for inp in inputs:
num = num_deriv(np.array(inp), model, temperature=temp)
ana = model._compute_dpixel_ddistorted_gnomic(temperature=temp)
np.testing.assert_allclose(num, ana, atol=1e-14)
def test__compute_dpixel_dintrinsic(self):
def num_deriv(loc, cmodel, delta=1e-6) -> np.ndarray:
model_pert = cmodel.copy()
model_pert.kx += delta
pix_pert_kx_f = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.kxy += delta
pix_pert_kxy_f = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.ky += delta
pix_pert_ky_f = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.px += delta
pix_pert_px_f = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.py += delta
pix_pert_py_f = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.kx -= delta
pix_pert_kx_b = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.kxy -= delta
pix_pert_kxy_b = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.ky -= delta
pix_pert_ky_b = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.px -= delta
pix_pert_px_b = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.py -= delta
pix_pert_py_b = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
return np.array([(pix_pert_kx_f - pix_pert_kx_b) / (2 * delta),
(pix_pert_ky_f - pix_pert_ky_b) / (2 * delta),
(pix_pert_kxy_f - pix_pert_kxy_b) / (2 * delta),
(pix_pert_px_f - pix_pert_px_b) / (2 * delta),
(pix_pert_py_f - pix_pert_py_b) / (2 * delta)]).T
intrins_coefs = [{"fx": 1.5, "fy": 0, "alpha": 0, "px": 0, "py": 0},
{"fx": 0, "fy": 1.5, "alpha": 0, "px": 0, "py": 0},
{"fx": 0, "fy": 0, "alpha": 1.5, "px": 0, "py": 0},
{"fx": 0, "fy": 0, "alpha": 0, "px": 1.5, "py": 0},
{"fx": 0, "fy": 0, "alpha": 0, "px": 0, "py": 1.5},
{"fx": -1.5, "fy": -1.5, "alpha": -1.5, "px": 1.5, "py": 1.5}]
inputs = [[1e-6, 1e-6], [1, 0], [-1, 0], [1.5, 0], [-1.5, 0], [0, 1], [0, -1], [0, 1.5], [0, -1.5], [1, 1]]
for intrins_coef in intrins_coefs:
model = self.Class(**intrins_coef)
for inp in inputs:
with self.subTest(**intrins_coef, inp=inp):
num = num_deriv(inp, model, delta=1e-5)
ana = model._compute_dpixel_dintrinsic(np.array(inp))
np.testing.assert_allclose(num, ana, atol=1e-14, rtol=1e-5)
def test__compute_ddistorted_gnomic_ddistortion(self):
def num_deriv(loc, cmodel, delta=1e-8) -> np.ndarray:
model_pert = cmodel.copy()
model_pert.k1 += delta
loc_pert_k1_f = model_pert.apply_distortion(loc)
model_pert = cmodel.copy()
model_pert.k2 += delta
loc_pert_k2_f = model_pert.apply_distortion(loc)
model_pert = cmodel.copy()
model_pert.k3 += delta
loc_pert_k3_f = model_pert.apply_distortion(loc)
model_pert = cmodel.copy()
model_pert.p1 += delta
loc_pert_p1_f = model_pert.apply_distortion(loc)
model_pert = cmodel.copy()
model_pert.p2 += delta
loc_pert_p2_f = model_pert.apply_distortion(loc)
model_pert = cmodel.copy()
model_pert.k1 -= delta
loc_pert_k1_b = model_pert.apply_distortion(loc)
model_pert = cmodel.copy()
model_pert.k2 -= delta
loc_pert_k2_b = model_pert.apply_distortion(loc)
model_pert = cmodel.copy()
model_pert.k3 -= delta
loc_pert_k3_b = model_pert.apply_distortion(loc)
model_pert = cmodel.copy()
model_pert.p1 -= delta
loc_pert_p1_b = model_pert.apply_distortion(loc)
model_pert = cmodel.copy()
model_pert.p2 -= delta
loc_pert_p2_b = model_pert.apply_distortion(loc)
return np.array([(loc_pert_k1_f - loc_pert_k1_b) / (2 * delta),
(loc_pert_k2_f - loc_pert_k2_b) / (2 * delta),
(loc_pert_k3_f - loc_pert_k3_b) / (2 * delta),
(loc_pert_p1_f - loc_pert_p1_b) / (2 * delta),
(loc_pert_p2_f - loc_pert_p2_b) / (2 * delta)]).T
dist_coefs = [{"k1": 1.5, "k2": 0, "k3": 0, "p1": 0, "p2": 0},
{"k1": 0, "k2": 1.5, "k3": 0, "p1": 0, "p2": 0},
{"k1": 0, "k2": 0, "k3": 1.5, "p1": 0, "p2": 0},
{"k1": 0, "k2": 0, "k3": 0, "p1": 1.5, "p2": 0},
{"k1": 0, "k2": 0, "k3": 0, "p1": 0, "p2": 1.5}]
inputs = [[0, 0], [1, 0], [-1, 0], [1.5, 0], [-1.5, 0], [0, 1], [0, -1], [0, 1.5], [0, -1.5], [1, 1]]
for dist_coef in dist_coefs:
model = self.Class(**dist_coef)
with self.subTest(**dist_coef):
for inp in inputs:
r = np.sqrt(inp[0] ** 2 + inp[1] ** 2)
r2 = r ** 2
r4 = r ** 4
r6 = r ** 6
num = num_deriv(np.array(inp), model)
ana = model._compute_ddistorted_gnomic_ddistortion(np.array(inp), r2, r4, r6)
np.testing.assert_allclose(num, ana, atol=1e-14)
def test__compute_dgnomic_dcamera_point(self):
def num_deriv(loc, cmodel, delta=1e-8) -> np.ndarray:
loc_pert = np.array(loc) + [delta, 0, 0]
gnom_pert_x_f = cmodel.get_projections(loc_pert)[0]
loc_pert = np.array(loc) + [0, delta, 0]
gnom_pert_y_f = cmodel.get_projections(loc_pert)[0]
loc_pert = np.array(loc) + [0, 0, delta]
gnom_pert_z_f = cmodel.get_projections(loc_pert)[0]
loc_pert = np.array(loc) - [delta, 0, 0]
gnom_pert_x_b = cmodel.get_projections(loc_pert)[0]
loc_pert = np.array(loc) - [0, delta, 0]
gnom_pert_y_b = cmodel.get_projections(loc_pert)[0]
loc_pert = np.array(loc) - [0, 0, delta]
gnom_pert_z_b = cmodel.get_projections(loc_pert)[0]
return np.array([(gnom_pert_x_f - gnom_pert_x_b) / (2 * delta),
(gnom_pert_y_f - gnom_pert_y_b) / (2 * delta),
(gnom_pert_z_f - gnom_pert_z_b) / (2 * delta)]).T
inputs = [[0, 0, 1], [0.5, 0, 1], [0, 0.5, 1], [0.5, 0.5, 1], [-0.5, 0, 1], [0, -0.5, 1], [-0.5, -0.5, 1],
[5, 10, 1000.23], [0.5, 1e-14, 1]]
model = self.Class()
for inp in inputs:
num = num_deriv(inp, model)
ana = model._compute_dgnomic_dcamera_point(np.array(inp))
np.testing.assert_allclose(num, ana, atol=1e-9, rtol=1e-5)
def test_get_jacobian_row(self):
def num_deriv(loc, temp, cmodel, delta=1e-8, image=0) -> np.ndarray:
model_pert = cmodel.copy()
model_pert.fx += delta
pix_pert_fx_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.fy += delta
pix_pert_fy_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.alpha += delta
pix_pert_skew_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.px += delta
pix_pert_px_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.py += delta
pix_pert_py_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.a1 += delta
pix_pert_a1_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.a2 += delta
pix_pert_a2_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.a3 += delta
pix_pert_a3_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.fx -= delta
pix_pert_fx_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.fy -= delta
pix_pert_fy_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.alpha -= delta
pix_pert_skew_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.px -= delta
pix_pert_px_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.py -= delta
pix_pert_py_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.k1 += delta
pix_pert_k1_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.k2 += delta
pix_pert_k2_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.k3 += delta
pix_pert_k3_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.p1 += delta
pix_pert_p1_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.p2 += delta
pix_pert_p2_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.k1 -= delta
pix_pert_k1_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.k2 -= delta
pix_pert_k2_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.k3 -= delta
pix_pert_k3_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.p1 -= delta
pix_pert_p1_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.p2 -= delta
pix_pert_p2_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][0] += delta
pix_pert_mx_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][1] += delta
pix_pert_my_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][2] += delta
pix_pert_mz_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][0] -= delta
pix_pert_mx_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][1] -= delta
pix_pert_my_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][2] -= delta
pix_pert_mz_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.a1 -= delta
pix_pert_a1_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.a2 -= delta
pix_pert_a2_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.a3 -= delta
pix_pert_a3_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
return np.vstack([(pix_pert_fx_f - pix_pert_fx_b) / (delta * 2),
(pix_pert_fy_f - pix_pert_fy_b) / (delta * 2),
(pix_pert_skew_f - pix_pert_skew_b) / (delta * 2),
(pix_pert_px_f - pix_pert_px_b) / (delta * 2),
(pix_pert_py_f - pix_pert_py_b) / (delta * 2),
(pix_pert_k1_f - pix_pert_k1_b) / (delta * 2),
(pix_pert_k2_f - pix_pert_k2_b) / (delta * 2),
(pix_pert_k3_f - pix_pert_k3_b) / (delta * 2),
(pix_pert_p1_f - pix_pert_p1_b) / (delta * 2),
(pix_pert_p2_f - pix_pert_p2_b) / (delta * 2),
(pix_pert_a1_f - pix_pert_a1_b) / (delta * 2),
(pix_pert_a2_f - pix_pert_a2_b) / (delta * 2),
(pix_pert_a3_f - pix_pert_a3_b) / (delta * 2),
np.zeros((image * 3, 2)),
(pix_pert_mx_f - pix_pert_mx_b) / (delta * 2),
(pix_pert_my_f - pix_pert_my_b) / (delta * 2),
(pix_pert_mz_f - pix_pert_mz_b) / (delta * 2)]).T
model = self.Class(fx=4050.5, fy=5050.25, alpha=1.5, px=1500, py=1500.5,
a1=0.15e-7, a2=-0.01e-8, a3=1e-9,
k1=0.5, k2=-0.3, k3=0.15, p1=1e-7, p2=1e-6, misalignment=[[1e-9, -1e-9, 2e-9],
[-1e-9, 2e-9, -1e-9]])
model.estimate_multiple_misalignments = True
inputs = [[0.5, 0, 1], [0, 0.5, 1], [0.5, 0.5, 1], [-0.5, 0, 1], [0, -0.5, 1], [-0.5, -0.5, 1],
[5, 10, 1000.23], [[1], [2], [1200.23]]]
temps = [0, 1.5, -10]
# TODO: investigate if this is actually correct
for temperature in temps:
for inp in inputs:
with self.subTest(temperature=temperature, inp=inp):
num = num_deriv(inp, temperature, model, delta=1e-2)
ana = model._get_jacobian_row(np.array(inp), 0, 1, temperature=temperature)
np.testing.assert_allclose(ana, num, rtol=1e-1, atol=1e-10)
num = num_deriv(inp, temperature, model, delta=1e-2, image=1)
ana = model._get_jacobian_row(np.array(inp), 1, 2, temperature=temperature)
np.testing.assert_allclose(ana, num, atol=1e-10, rtol=1e-1)
def test_compute_jacobian(self):
def num_deriv(loc, temp, cmodel, delta=1e-8, image=0, nimages=2) -> np.ndarray:
model_pert = cmodel.copy()
model_pert.fx += delta
pix_pert_fx_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.fy += delta
pix_pert_fy_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.alpha += delta
pix_pert_skew_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.px += delta
pix_pert_px_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.py += delta
pix_pert_py_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.a1 += delta
pix_pert_a1_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.a2 += delta
pix_pert_a2_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.a3 += delta
pix_pert_a3_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.fx -= delta
pix_pert_fx_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.fy -= delta
pix_pert_fy_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.alpha -= delta
pix_pert_skew_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.px -= delta
pix_pert_px_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.py -= delta
pix_pert_py_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.k1 += delta
pix_pert_k1_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.k2 += delta
pix_pert_k2_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.k3 += delta
pix_pert_k3_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.p1 += delta
pix_pert_p1_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.p2 += delta
pix_pert_p2_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.k1 -= delta
pix_pert_k1_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.k2 -= delta
pix_pert_k2_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.k3 -= delta
pix_pert_k3_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.p1 -= delta
pix_pert_p1_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.p2 -= delta
pix_pert_p2_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][0] += delta
pix_pert_mx_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][1] += delta
pix_pert_my_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][2] += delta
pix_pert_mz_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][0] -= delta
pix_pert_mx_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][1] -= delta
pix_pert_my_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][2] -= delta
pix_pert_mz_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.a1 -= delta
pix_pert_a1_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.a2 -= delta
pix_pert_a2_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.a3 -= delta
pix_pert_a3_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
return np.vstack([(pix_pert_fx_f - pix_pert_fx_b) / (delta * 2),
(pix_pert_fy_f - pix_pert_fy_b) / (delta * 2),
(pix_pert_skew_f - pix_pert_skew_b) / (delta * 2),
(pix_pert_px_f - pix_pert_px_b) / (delta * 2),
(pix_pert_py_f - pix_pert_py_b) / (delta * 2),
(pix_pert_k1_f - pix_pert_k1_b) / (delta * 2),
(pix_pert_k2_f - pix_pert_k2_b) / (delta * 2),
(pix_pert_k3_f - pix_pert_k3_b) / (delta * 2),
(pix_pert_p1_f - pix_pert_p1_b) / (delta * 2),
(pix_pert_p2_f - pix_pert_p2_b) / (delta * 2),
(pix_pert_a1_f - pix_pert_a1_b) / (delta * 2),
(pix_pert_a2_f - pix_pert_a2_b) / (delta * 2),
(pix_pert_a3_f - pix_pert_a3_b) / (delta * 2),
np.zeros((image * 3, 2)),
(pix_pert_mx_f - pix_pert_mx_b) / (delta * 2),
(pix_pert_my_f - pix_pert_my_b) / (delta * 2),
(pix_pert_mz_f - pix_pert_mz_b) / (delta * 2),
np.zeros(((nimages - image - 1) * 3, 2))]).T
model = self.Class(fx=4050.5, fy=5050.25, alpha=1.5, px=1500, py=1500.5,
k1=0.5, k2=-0.3, k3=0.15, p1=1e-7, p2=1e-6, misalignment=[[1e-9, -1e-9, 2e-9],
[-1e-9, 2e-9, -1e-9],
[1e-10, 2e-11, 3e-12]],
a1=0.15e-6, a2=-0.01e-7, a3=0.5e-8,
estimation_parameters=['intrinsic', 'temperature dependence', 'multiple misalignments'])
inputs = [np.array([[0.5, 0, 1]]).T,
np.array([[0, 0.5, 1], [0.5, 0.5, 1], [-0.5, 0, 1]]).T,
np.array([[0.1, -0.5, 1], [-0.5, -0.5, 1], [5, 10, 1000.23], [1, 2, 1200.23]]).T]
model.use_a_priori = False
temps = [0, -20, 20.5]
jac_ana = model.compute_jacobian(inputs, temperature=temps)
jac_num = []
numim = len(inputs)
for ind, inp in enumerate(inputs):
temperature = temps[ind]
for vec in inp.T:
jac_num.append(num_deriv(vec.T, temperature, model, delta=1e-3, image=ind, nimages=numim))
np.testing.assert_allclose(jac_ana, np.vstack(jac_num), rtol=1e-1, atol=1e-9)
model.use_a_priori = True
jac_ana = model.compute_jacobian(inputs, temperature=temps)
jac_num = []
numim = len(inputs)
for ind, inp in enumerate(inputs):
temperature = temps[ind]
for vec in inp.T:
jac_num.append(num_deriv(vec.T, temperature, model, delta=1e-3, image=ind, nimages=numim))
jac_num = np.vstack(jac_num)
jac_num = np.pad(jac_num, [(0, jac_num.shape[1]), (0, 0)], 'constant', constant_values=0)
jac_num[-jac_num.shape[1]:] = np.eye(jac_num.shape[1])
np.testing.assert_allclose(jac_ana, jac_num, rtol=1e-1, atol=1e-9)
def test_apply_update(self):
model_param = {"fx": 0, "fy": 0, "alpha": 0, "k1": 0,
"k2": 0, "k3": 0, "p1": 0, "p2": 0, 'a1': 0, 'a2': 0, 'a3': 0,
"px": 0, "py": 0,
"misalignment": [[0, 0, 0], [0, 0, 0]]}
model = self.Class(**model_param,
estimation_parameters=['intrinsic', 'temperature dependence', 'multiple misalignments'])
update_vec = np.arange(19)
model.apply_update(update_vec)
keys = list(model_param.keys())
keys.remove('misalignment')
for key in keys:
self.assertEqual(getattr(model, key), update_vec[model.element_dict[key][0]])
for ind, vec in enumerate(update_vec[13:].reshape(-1, 3)):
np.testing.assert_array_almost_equal(at.Rotation(vec).q, at.Rotation(model.misalignment[ind]).q)
def test_pixels_to_gnomic(self):
intrins_param = {"fx": 3000, "fy": 4000, "alpha": 0.5,
"px": 4005.23, 'py': 2000.33, 'a1': 1e-5, 'a2': 1e-6, 'a3': -1e-7}
dist_coefs = [{"k1": 1.5e-1, "k2": 0, "k3": 0, "p1": 0, "p2": 0},
{"k1": 0, "k2": 1.5e-1, "k3": 0, "p1": 0, "p2": 0},
{"k1": 0, "k2": 0, "k3": 1.5e-1, "p1": 0, "p2": 0},
{"k1": 0, "k2": 0, "k3": 0, "p1": 1.5e-6, "p2": 0},
{"k1": 0, "k2": 0, "k3": 0, "p1": 0, "p2": 1.5e-6}]
pinhole = [[0, 0], [0.1, 0], [-0.1, 0], [0.15, 0], [-0.15, 0], [[0.15], [0]], [[0.15, -0.1], [0, 0]],
[0, 0.1], [0, -0.1], [0, 0.15], [0, -0.15], [[0], [0.15]], [[0, 0], [0.15, -0.1]], [0.1, 0.1]]
temperatures = [0, 1, -1, 10.5, -10.5]
for temp in temperatures:
for dist in dist_coefs:
model = self.Class(**dist, **intrins_param)
for fp_pinhole in pinhole:
with self.subTest(**dist, temp=temp, fp_pinhole=fp_pinhole):
fp_dist = model.apply_distortion(np.array(fp_pinhole))
fp_dist *= model.get_temperature_scale(temp)
pix_dist = ((model.intrinsic_matrix[:, :2] @ fp_dist).T + model.intrinsic_matrix[:, 2]).T
fp_undist = model.pixels_to_gnomic(pix_dist, temperature=temp)
np.testing.assert_allclose(fp_undist, fp_pinhole, atol=1e-13)
def test_undistort_pixels(self):
intrins_param = {"fx": 3000, "fy": 4000, "alpha": 0.5,
"px": 4005.23, 'py': 2000.33, 'a1': 1e-5, 'a2': 1e-6, 'a3': -1e-7}
dist_coefs = [{"k1": 1.5e-1, "k2": 0, "k3": 0, "p1": 0, "p2": 0},
{"k1": 0, "k2": 1.5e-1, "k3": 0, "p1": 0, "p2": 0},
{"k1": 0, "k2": 0, "k3": 1.5e-1, "p1": 0, "p2": 0},
{"k1": 0, "k2": 0, "k3": 0, "p1": 1.5e-6, "p2": 0},
{"k1": 0, "k2": 0, "k3": 0, "p1": 0, "p2": 1.5e-6}]
pinhole = [[0, 0], [0.1, 0], [-0.1, 0], [0.15, 0], [-0.15, 0], [[0.15], [0]], [[0.15, -0.1], [0, 0]],
[0, 0.1], [0, -0.1], [0, 0.15], [0, -0.15], [[0], [0.15]], [[0, 0], [0.15, -0.1]], [0.1, 0.1]]
temperatures = [0, 1, -1, 10.5, -10.5]
for temp in temperatures:
for dist in dist_coefs:
model = self.Class(**dist, **intrins_param)
with self.subTest(**dist, temp=temp):
for fp_pinhole in pinhole:
fp_pinhole = np.array(fp_pinhole).astype(np.float64)
fp_dist = model.apply_distortion(fp_pinhole)
fp_dist *= model.get_temperature_scale(temp)
pix_dist = ((model.intrinsic_matrix[:, :2] @ fp_dist).T + model.intrinsic_matrix[:, 2]).T
pix_undist = model.undistort_pixels(pix_dist, temperature=temp)
fp_pinhole *= model.get_temperature_scale(temp)
pix_pinhole = ((model.intrinsic_matrix[:, :2] @ fp_pinhole).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_allclose(pix_undist, pix_pinhole, atol=1e-13)
def test_pixels_to_unit(self):
intrins_param = {"fx": 3000, "fy": 4000, "alpha": 0.5,
"px": 4005.23, 'py': 2000.33, 'a1': 1e-6, 'a2': -2e-7, 'a3': 4.5e-8}
dist_coefs = [{"k1": 1.5e-1, "k2": 0, "k3": 0, "p1": 0, "p2": 0},
{"k1": 0, "k2": 1.5e-1, "k3": 0, "p1": 0, "p2": 0},
{"k1": 0, "k2": 0, "k3": 1.5e-1, "p1": 0, "p2": 0},
{"k1": 0, "k2": 0, "k3": 0, "p1": 1.5e-6, "p2": 0},
{"k1": 0, "k2": 0, "k3": 0, "p1": 0, "p2": 1.5e-6},
{"misalignment": np.array([1e-11, 2e-12, -1e-10])},
{"misalignment": np.array([[1e-11, 2e-12, -1e-10], [-1e-13, 1e-11, 2e-12]]),
"estimation_parameters": "multiple misalignments"}]
camera_vecs = [[0, 0, 1], [0.1, 0, 1], [-0.1, 0, 1], [0, 0.1, 1], [0, -0.1, 1], [0.1, 0.1, 1],
[-0.1, -0.1, 1], [[0.1, -0.1], [-0.1, 0.1], [1, 1]]]
temperatures = [0, 1, -1, 10.5, -10.5]
for temp in temperatures:
for dist in dist_coefs:
model = self.Class(**dist, **intrins_param)
with self.subTest(**dist, temp=temp):
for vec in camera_vecs:
pixel_loc = model.project_onto_image(vec, image=-1, temperature=temp)
unit_vec = model.pixels_to_unit(pixel_loc, image=-1, temperature=temp)
unit_true = np.array(vec).astype(np.float64)
unit_true /= np.linalg.norm(unit_true, axis=0, keepdims=True)
np.testing.assert_allclose(unit_vec, unit_true, atol=1e-13)
def test_overwrite(self):
model1 = self.Class(field_of_view=10, intrinsic_matrix=np.array([[1, 2, 3], [0, 5, 6]]),
distortion_coefficients=np.array([1, 2, 3, 4, 5]),
misalignment=[[1, 2, 3], [4, 5, 6]], use_a_priori=False,
estimation_parameters=['multiple misalignments'])
model2 = self.Class(field_of_view=20, intrinsic_matrix=np.array([[11, 12, 13], [0, 15, 16]]),
distortion_coefficients=np.array([11, 12, 13, 14, 15]),
misalignment=[[11, 12, 13], [14, 15, 16]], use_a_priori=True,
estimation_parameters=['single misalignment'])
modeltest = model1.copy()
modeltest.overwrite(model2)
self.assertEqual(model2.field_of_view, modeltest.field_of_view)
self.assertEqual(model2.use_a_priori, modeltest.use_a_priori)
self.assertEqual(model2.estimate_multiple_misalignments, modeltest.estimate_multiple_misalignments)
np.testing.assert_array_equal(model2.intrinsic_matrix, modeltest.intrinsic_matrix)
np.testing.assert_array_equal(model2.distortion_coefficients, modeltest.distortion_coefficients)
np.testing.assert_array_equal(model2.misalignment, modeltest.misalignment)
np.testing.assert_array_equal(model2.estimation_parameters, modeltest.estimation_parameters)
modeltest = model2.copy()
modeltest.overwrite(model1)
self.assertEqual(model1.field_of_view, modeltest.field_of_view)
self.assertEqual(model1.use_a_priori, modeltest.use_a_priori)
self.assertEqual(model1.estimate_multiple_misalignments, modeltest.estimate_multiple_misalignments)
np.testing.assert_array_equal(model1.intrinsic_matrix, modeltest.intrinsic_matrix)
np.testing.assert_array_equal(model1.distortion_coefficients, modeltest.distortion_coefficients)
np.testing.assert_array_equal(model1.misalignment, modeltest.misalignment)
np.testing.assert_array_equal(model1.estimation_parameters, modeltest.estimation_parameters)
def test_distort_pixels(self):
model = self.Class(kx=1000, ky=-950.5, px=4500, py=139.32, a1=1e-3, a2=1e-4, a3=1e-5,
kxy=0.5, radial2=1e-5, radial4=1e-5, radial6=1e-7,
tiptilt_x=1e-6, tiptilt_y=2e-12)
pixels = [[0, 1], [1, 0], [-1, 0], [0, -1], [9000., 200.2],
[[4500, 100, 10.98], [0, 139.23, 200.3]]]
temperatures = [0, 1, -1, 10.5, -10.5]
for pix in pixels:
for temp in temperatures:
with self.subTest(pix=pix, temp=temp):
undist_pix = model.undistort_pixels(pix, temperature=temp)
dist_pix = model.distort_pixels(undist_pix, temperature=temp)
np.testing.assert_allclose(dist_pix, pix, atol=1e-10)
def test_to_from_elem(self):
element = etree.Element(self.Class.__name__)
model = self.Class(field_of_view=5, use_a_priori=True,
misalignment=[1, 2, 3], kx=2, ky=200, px=50, py=300, kxy=12123,
a1=37, a2=1, a3=-1230, k1=5, k2=10, k3=20, p1=-10, p2=35,
estimation_parameters=['kx', 'multiple misalignments'], n_rows=20, n_cols=30)
model_copy = model.copy()
with self.subTest(misalignment=True):
element = model.to_elem(element, misalignment=True)
self.assertEqual(model, model_copy)
model_new = self.Class.from_elem(element)
self.assertEqual(model, model_new)
with self.subTest(misalignment=False):
element = model.to_elem(element, misalignment=False)
self.assertEqual(model, model_copy)
model_new = self.Class.from_elem(element)
model.estimation_parameters[-1] = 'single misalignment'
model.estimate_multiple_misalignments = False
model.misalignment = np.zeros(3)
self.assertEqual(model, model_new)
def test_distortion_map(self):
model = self.Class(kx=100, ky=-985.234, px=1000, py=1095, kxy=10,
k1=1e-6, k2=1e-12, k3=-4e-10, p1=6e-7, p2=-1e-5,
a1=1e-6, a2=-1e-7, a3=4e-12)
rows, cols, dist = model.distortion_map((2000, 250), step=10)
rl, cl = np.arange(0, 2000, 10), np.arange(0, 250, 10)
rs, cs = np.meshgrid(rl, cl, indexing='ij')
np.testing.assert_array_equal(rows, rs)
np.testing.assert_array_equal(cols, cs)
distl = model.distort_pixels(np.vstack([cs.flatten(), rs.flatten()]))
np.testing.assert_array_equal(distl - np.vstack([cs.flatten(), rs.flatten()]), dist)
class TestOpenCVModel(TestPinholeModel):
def setUp(self):
self.Class = OpenCVModel
# Not supported for this model
test__compute_dgnomic_dfocal_length = None
def test___init__(self):
model = self.Class(intrinsic_matrix=np.array([[1, 2, 3], [4, 5, 6]]), field_of_view=20.5,
use_a_priori=True, misalignment=[np.zeros(3), np.ones(3)],
distortion_coefficients=np.array([1, 2, 3, 4, 5]),
estimation_parameters='basic intrinsic',
a1=1, a2=2, a3=3)
np.testing.assert_array_equal(model.intrinsic_matrix, [[1, 2, 3], [4, 5, 6]])
self.assertEqual(model.focal_length, 1)
self.assertEqual(model.field_of_view, 20.5)
self.assertTrue(model.use_a_priori)
self.assertEqual(model.estimation_parameters, ['basic intrinsic'])
self.assertEqual(model.a1, 1)
self.assertEqual(model.a2, 2)
self.assertEqual(model.a3, 3)
np.testing.assert_array_equal(model.distortion_coefficients, np.arange(1, 6))
model = self.Class(kx=1, fy=2, px=4, py=5, field_of_view=20.5,
use_a_priori=True, misalignment=[np.zeros(3), np.ones(3)], kxy=80,
estimation_parameters=['kx', 'px'], n_rows=500, n_cols=600,
radial2n=1, radial4n=2, k3=3, p1=4, tiptilt_x=5, radial2d=9, k5=100, k6=-90,
s1=400, thinprism_2=-500, s3=600, s4=5)
np.testing.assert_array_equal(model.intrinsic_matrix, [[1, 80, 4], [0, 2, 5]])
self.assertEqual(model.focal_length, 1)
self.assertEqual(model.field_of_view, 20.5)
self.assertTrue(model.use_a_priori)
self.assertEqual(model.estimation_parameters, ['kx', 'px'])
self.assertEqual(model.n_rows, 500)
self.assertEqual(model.n_cols, 600)
np.testing.assert_array_equal(model.distortion_coefficients, [1, 2, 3, 9, 100, -90, 4, 5, 400, -500, 600, 5])
def test_fx(self):
model = self.Class(intrinsic_matrix=np.array([[1, 0, 0], [0, 0, 0]]))
self.assertEqual(model.kx, 1)
model.kx = 100
self.assertEqual(model.kx, 100)
self.assertEqual(model.intrinsic_matrix[0, 0], 100)
def test_fy(self):
model = self.Class(intrinsic_matrix=np.array([[0, 0, 0], [0, 1, 0]]))
self.assertEqual(model.ky, 1)
model.ky = 100
self.assertEqual(model.ky, 100)
self.assertEqual(model.intrinsic_matrix[1, 1], 100)
def test_kxy(self):
model = self.Class(intrinsic_matrix=np.array([[0, 1, 0], [0, 0, 0]]))
self.assertEqual(model.kxy, 1)
model.kxy = 100
self.assertEqual(model.kxy, 100)
self.assertEqual(model.intrinsic_matrix[0, 1], 100)
def test_alpha(self):
model = self.Class(intrinsic_matrix=np.array([[0, 1, 0], [0, 0, 0]]))
self.assertEqual(model.alpha, 1)
model.alpha = 100
self.assertEqual(model.alpha, 100)
self.assertEqual(model.intrinsic_matrix[0, 1], 100)
def test_k1(self):
model = self.Class(distortion_coefficients=np.array([1, 0, 0, 0, 0]))
self.assertEqual(model.k1, 1)
model.k1 = 100
self.assertEqual(model.k1, 100)
self.assertEqual(model.distortion_coefficients[0], 100)
def test_k2(self):
model = self.Class(distortion_coefficients=np.array([0, 1, 0, 0, 0]))
self.assertEqual(model.k2, 1)
model.k2 = 100
self.assertEqual(model.k2, 100)
self.assertEqual(model.distortion_coefficients[1], 100)
def test_k3(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0]))
self.assertEqual(model.k3, 1)
model.k3 = 100
self.assertEqual(model.k3, 100)
self.assertEqual(model.distortion_coefficients[2], 100)
def test_k4(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0]))
self.assertEqual(model.k4, 1)
model.k4 = 100
self.assertEqual(model.k4, 100)
self.assertEqual(model.distortion_coefficients[3], 100)
def test_k5(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0]))
self.assertEqual(model.k5, 1)
model.k5 = 100
self.assertEqual(model.k5, 100)
self.assertEqual(model.distortion_coefficients[4], 100)
def test_k6(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0]))
self.assertEqual(model.k6, 1)
model.k6 = 100
self.assertEqual(model.k6, 100)
self.assertEqual(model.distortion_coefficients[5], 100)
def test_p1(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 0, 0, 0, 0, 1, 0]))
self.assertEqual(model.p1, 1)
model.p1 = 100
self.assertEqual(model.p1, 100)
self.assertEqual(model.distortion_coefficients[6], 100)
def test_p2(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 0, 0, 0, 0, 0, 1]))
self.assertEqual(model.p2, 1)
model.p2 = 100
self.assertEqual(model.p2, 100)
self.assertEqual(model.distortion_coefficients[7], 100)
def test_s1(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0]))
self.assertEqual(model.s1, 1)
model.s1 = 100
self.assertEqual(model.s1, 100)
self.assertEqual(model.distortion_coefficients[8], 100)
def test_s2(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0]))
self.assertEqual(model.s2, 1)
model.s2 = 100
self.assertEqual(model.s2, 100)
self.assertEqual(model.distortion_coefficients[9], 100)
def test_s3(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0]))
self.assertEqual(model.s3, 1)
model.s3 = 100
self.assertEqual(model.s3, 100)
self.assertEqual(model.distortion_coefficients[10], 100)
def test_s4(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]))
self.assertEqual(model.s4, 1)
model.s4 = 100
self.assertEqual(model.s4, 100)
self.assertEqual(model.distortion_coefficients[11], 100)
def test_radial2n(self):
model = self.Class(distortion_coefficients=np.array([1, 0, 0, 0, 0]))
self.assertEqual(model.radial2n, 1)
model.radial2n = 100
self.assertEqual(model.radial2n, 100)
self.assertEqual(model.distortion_coefficients[0], 100)
def test_radial4n(self):
model = self.Class(distortion_coefficients=np.array([0, 1, 0, 0, 0]))
self.assertEqual(model.radial4n, 1)
model.radial4n = 100
self.assertEqual(model.radial4n, 100)
self.assertEqual(model.distortion_coefficients[1], 100)
def test_radial6n(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 1, 0, 0]))
self.assertEqual(model.radial6n, 1)
model.radial6n = 100
self.assertEqual(model.radial6n, 100)
self.assertEqual(model.distortion_coefficients[2], 100)
def test_radial2d(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0]))
self.assertEqual(model.radial2d, 1)
model.radial2d = 100
self.assertEqual(model.radial2d, 100)
self.assertEqual(model.distortion_coefficients[3], 100)
def test_radial4d(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0]))
self.assertEqual(model.radial4d, 1)
model.radial4d = 100
self.assertEqual(model.radial4d, 100)
self.assertEqual(model.distortion_coefficients[4], 100)
def test_radial6d(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0]))
self.assertEqual(model.radial6d, 1)
model.radial6d = 100
self.assertEqual(model.radial6d, 100)
self.assertEqual(model.distortion_coefficients[5], 100)
def test_tiptilt_y(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 0, 0, 0, 0, 1, 0]))
self.assertEqual(model.tiptilt_y, 1)
model.tiptilt_y = 100
self.assertEqual(model.tiptilt_y, 100)
self.assertEqual(model.distortion_coefficients[6], 100)
def test_tiptilt_x(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 0, 0, 0, 0, 0, 1]))
self.assertEqual(model.tiptilt_x, 1)
model.tiptilt_x = 100
self.assertEqual(model.tiptilt_x, 100)
self.assertEqual(model.distortion_coefficients[7], 100)
def test_thinprism_1(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0]))
self.assertEqual(model.thinprism_1, 1)
model.thinprism_1 = 100
self.assertEqual(model.thinprism_1, 100)
self.assertEqual(model.distortion_coefficients[8], 100)
def test_thinprism_2(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0]))
self.assertEqual(model.thinprism_2, 1)
model.thinprism_2 = 100
self.assertEqual(model.thinprism_2, 100)
self.assertEqual(model.distortion_coefficients[9], 100)
def test_thinprism_3(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0]))
self.assertEqual(model.thinprism_3, 1)
model.thinprism_3 = 100
self.assertEqual(model.thinprism_3, 100)
self.assertEqual(model.distortion_coefficients[10], 100)
def test_thinprism_4(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]))
self.assertEqual(model.thinprism_4, 1)
model.thinprism_4 = 100
self.assertEqual(model.thinprism_4, 100)
self.assertEqual(model.distortion_coefficients[11], 100)
def test_apply_distortion(self):
dist_coefs = [{"k1": 1.5},
{"k2": 1.5},
{"k3": 1.5},
{"p1": 1.5},
{"p2": 1.5},
{"k4": 1.5},
{"k5": 1.5},
{"k6": 1.5},
{"s1": 1.5},
{"s2": 1.5},
{"s3": 1.5},
{"s4": 1.5}]
inputs = [[0, 0], [1, 0], [-1, 0], [1.5, 0], [-1.5, 0], [[1.5], [0]], [[1.5, -1], [0, 0]],
[0, 1], [0, -1], [0, 1.5], [0, -1.5], [[0], [1.5]], [[0, 0], [1.5, -1]], [1, 1]]
solus = [
# k1
[[0, 0], [2.5, 0], [-2.5, 0], [1.5 + 1.5 ** 4, 0], [-(1.5 + 1.5 ** 4), 0],
[[(1.5 + 1.5 ** 4)], [0]], [[(1.5 + 1.5 ** 4), -2.5], [0, 0]],
[0, 2.5], [0, -2.5], [0, (1.5 + 1.5 ** 4)], [0, -(1.5 + 1.5 ** 4)], [[0], [(1.5 + 1.5 ** 4)]],
[[0, 0], [(1.5 + 1.5 ** 4), -2.5]], [1 + 2 * 1.5, 1 + 2 * 1.5]],
# k2
[[0, 0], [2.5, 0], [-2.5, 0], [(1.5 + 1.5 ** 6), 0], [-(1.5 + 1.5 ** 6), 0],
[[(1.5 + 1.5 ** 6)], [0]], [[(1.5 + 1.5 ** 6), -2.5], [0, 0]],
[0, 2.5], [0, -2.5], [0, (1.5 + 1.5 ** 6)], [0, -(1.5 + 1.5 ** 6)], [[0], [(1.5 + 1.5 ** 6)]],
[[0, 0], [(1.5 + 1.5 ** 6), -2.5]], [1 + 4 * 1.5, 1 + 4 * 1.5]],
# k3
[[0, 0], [2.5, 0], [-2.5, 0], [(1.5 + 1.5 ** 8), 0], [-(1.5 + 1.5 ** 8), 0],
[[(1.5 + 1.5 ** 8)], [0]], [[(1.5 + 1.5 ** 8), -2.5], [0, 0]],
[0, 2.5], [0, -2.5], [0, (1.5 + 1.5 ** 8)], [0, -(1.5 + 1.5 ** 8)], [[0], [(1.5 + 1.5 ** 8)]],
[[0, 0], [(1.5 + 1.5 ** 8), -2.5]], [1 + 8 * 1.5, 1 + 8 * 1.5]],
# p1
[[0, 0], [1, 1.5], [-1, 1.5], [1.5, 1.5 ** 3], [-1.5, 1.5 ** 3], [[1.5], [1.5 ** 3]],
[[1.5, -1], [1.5 ** 3, 1.5]],
[0, 1 + 3 * 1.5], [0, -1 + 3 * 1.5], [0, 1.5 + 3 * 1.5 ** 3], [0, -1.5 + 3 * 1.5 ** 3],
[[0], [1.5 + 3 * 1.5 ** 3]],
[[0, 0], [1.5 + 3 * 1.5 ** 3, -1 + 3 * 1.5]], [1 + 2 * 1.5, 1 + 4 * 1.5]],
# p2
[[0, 0], [1 + 3 * 1.5, 0], [-1 + 3 * 1.5, 0], [1.5 + 3 * 1.5 ** 3, 0], [-1.5 + 3 * 1.5 ** 3, 0],
[[1.5 + 3 * 1.5 ** 3], [0]], [[1.5 + 3 * 1.5 ** 3, -1 + 3 * 1.5], [0, 0]],
[1.5, 1], [1.5, -1], [1.5 ** 3, 1.5], [1.5 ** 3, -1.5], [[1.5 ** 3], [1.5]],
[[1.5 ** 3, 1.5], [1.5, -1]],
[1 + 4 * 1.5, 1 + 2 * 1.5]],
# k4
[[0, 0], [1 / 2.5, 0], [-1 / 2.5, 0], [1.5 / (1 + 1.5 ** 3), 0], [-1.5 / (1 + 1.5 ** 3), 0],
[[1.5 / (1 + 1.5 ** 3)], [0]], [[1.5 / (1 + 1.5 ** 3), -1 / 2.5], [0, 0]],
[0, 1 / 2.5], [0, -1 / 2.5], [0, 1.5 / (1 + 1.5 ** 3)], [0, -1.5 / (1 + 1.5 ** 3)],
[[0], [1.5 / (1 + 1.5 ** 3)]], [[0, 0], [1.5 / (1 + 1.5 ** 3), -1 / 2.5]], [1 / 4, 1 / 4]],
# k5
[[0, 0], [1 / 2.5, 0], [-1 / 2.5, 0], [1.5 / (1 + 1.5 ** 5), 0], [-1.5 / (1 + 1.5 ** 5), 0],
[[1.5 / (1 + 1.5 ** 5)], [0]], [[1.5 / (1 + 1.5 ** 5), -1 / 2.5], [0, 0]],
[0, 1 / 2.5], [0, -1 / 2.5], [0, 1.5 / (1 + 1.5 ** 5)], [0, -1.5 / (1 + 1.5 ** 5)],
[[0], [1.5 / (1 + 1.5 ** 5)]], [[0, 0], [1.5 / (1 + 1.5 ** 5), -1 / 2.5]], [1 / 7, 1 / 7]],
# k6
[[0, 0], [1 / 2.5, 0], [-1 / 2.5, 0], [1.5 / (1 + 1.5 ** 7), 0], [-1.5 / (1 + 1.5 ** 7), 0],
[[1.5 / (1 + 1.5 ** 7)], [0]], [[1.5 / (1 + 1.5 ** 7), -1 / 2.5], [0, 0]],
[0, 1 / 2.5], [0, -1 / 2.5], [0, 1.5 / (1 + 1.5 ** 7)], [0, -1.5 / (1 + 1.5 ** 7)],
[[0], [1.5 / (1 + 1.5 ** 7)]], [[0, 0], [1.5 / (1 + 1.5 ** 7), -1 / 2.5]], [1 / 13, 1 / 13]],
# s1
[[0, 0], [1 + 1.5, 0], [-1 + 1.5, 0], [1.5 + 1.5 ** 3, 0], [-1.5 + 1.5 ** 3, 0],
[[1.5 + 1.5 ** 3], [0]], [[1.5 + 1.5 ** 3, -1 + 1.5], [0, 0]],
[1.5, 1], [1.5, -1], [1.5 ** 3, 1.5], [1.5 ** 3, -1.5],
[[1.5 ** 3], [1.5]], [[1.5 ** 3, 1.5], [1.5, -1]], [1 + 2 * 1.5, 1]],
# s2
[[0, 0], [1 + 1.5, 0], [-1 + 1.5, 0], [1.5 + 1.5 ** 5, 0], [-1.5 + 1.5 ** 5, 0],
[[1.5 + 1.5 ** 5], [0]], [[1.5 + 1.5 ** 5, -1 + 1.5], [0, 0]],
[1.5, 1], [1.5, -1], [1.5 ** 5, 1.5], [1.5 ** 5, -1.5],
[[1.5 ** 5], [1.5]], [[1.5 ** 5, 1.5], [1.5, -1]], [1 + 4 * 1.5, 1]],
# s3
[[0, 0], [1, 1.5], [-1, 1.5], [1.5, 1.5 ** 3], [-1.5, 1.5 ** 3],
[[1.5], [1.5 ** 3]], [[1.5, -1], [1.5 ** 3, 1.5]],
[0, 1 + 1.5], [0, -1 + 1.5], [0, 1.5 + 1.5 ** 3], [0, -1.5 + 1.5 ** 3],
[[0], [1.5 + 1.5 ** 3]], [[0, 0], [1.5 + 1.5 ** 3, -1 + 1.5]], [1, 1 + 2 * 1.5]],
# s4
[[0, 0], [1, 1.5], [-1, 1.5], [1.5, 1.5 ** 5], [-1.5, 1.5 ** 5],
[[1.5], [1.5 ** 5]], [[1.5, -1], [1.5 ** 5, 1.5]],
[0, 1 + 1.5], [0, -1 + 1.5], [0, 1.5 + 1.5 ** 5], [0, -1.5 + 1.5 ** 5],
[[0], [1.5 + 1.5 ** 5]], [[0, 0], [1.5 + 1.5 ** 5, -1 + 1.5]], [1, 1 + 4 * 1.5]]
]
for dist, sols in zip(dist_coefs, solus):
model = self.Class(**dist)
for inp, solu in zip(inputs, sols):
with self.subTest(**dist, inp=inp):
gnom_dist = model.apply_distortion(np.array(inp))
np.testing.assert_array_almost_equal(gnom_dist, solu)
def test_get_projections(self):
points = [[0, 0, 1], [-0.1, 0.2, 2.2], [[-0.1], [0.2], [2.2]], [[-0.1, 0], [0.2, 0], [2.2, 1]]]
model = self.Class(fx=4050.5, fy=3050.25, alpha=1.5, px=1500, py=1500.5,
k1=0.5, k2=-0.3, k3=0.15, p1=1e-7, p2=1e-6, a1=1e-1, a2=-1e-6, a3=3e-7,
k4=1, k5=-5, k6=11, s1=1e-6, s2=1e2, s3=-3e-3, s4=5e-1)
temps = [0, 1, -1, 10, -10]
for temp in temps:
with self.subTest(misalignment=None, temp=temp):
for point in points:
pin, dist, pix = model.get_projections(point, temperature=temp)
pin_true = np.array(point[:2]) / point[2]
dist_true = model.apply_distortion(pin_true)
dist_true *= model.get_temperature_scale(temp)
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], dist_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_equal(pin, pin_true)
np.testing.assert_array_equal(dist, dist_true)
np.testing.assert_array_equal(pix, pix_true)
model = self.Class(fx=4050.5, fy=3050.25, alpha=1.5, px=1500, py=1500.5,
k1=0.5, k2=-0.3, k3=0.15, p1=1e-7, p2=1e-6,
k4=1, k5=-5, k6=11, s1=1e-6, s2=1e2, s3=-3e-3, s4=5e-1,
misalignment=[0, 0, np.pi])
with self.subTest(misalignment=[0, 0, np.pi]):
for point in points:
pin, dist, pix = model.get_projections(point)
pin_true = -np.array(point[:2]) / point[2]
dist_true = model.apply_distortion(pin_true)
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], dist_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(pin, pin_true)
np.testing.assert_array_almost_equal(dist, dist_true)
np.testing.assert_array_almost_equal(pix, pix_true)
model = self.Class(fx=4050.5, fy=3050.25, alpha=1.5, px=1500, py=1500.5,
k1=0.5, k2=-0.3, k3=0.15, p1=1e-7, p2=1e-6,
k4=1, k5=-5, k6=11, s1=1e-6, s2=1e2, s3=-3e-3, s4=5e-1,
misalignment=[np.pi, 0, 0])
with self.subTest(misalignment=[np.pi, 0, 0]):
for point in points:
pin, dist, pix = model.get_projections(point)
pin_true = np.array(point[:2]) / point[2]
pin_true[0] *= -1
dist_true = model.apply_distortion(pin_true)
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], dist_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(pin, pin_true)
np.testing.assert_array_almost_equal(dist, dist_true)
np.testing.assert_array_almost_equal(pix, pix_true)
model = self.Class(fx=4050.5, fy=3050.25, alpha=1.5, px=1500, py=1500.5,
k1=0.5, k2=-0.3, k3=0.15, p1=1e-7, p2=1e-6,
k4=1, k5=-5, k6=11, s1=1e-6, s2=1e2, s3=-3e-3, s4=5e-1,
misalignment=[0, np.pi, 0])
with self.subTest(misalignment=[0, np.pi, 0]):
for point in points:
pin, dist, pix = model.get_projections(point)
pin_true = np.array(point[:2]) / point[2]
pin_true[1] *= -1
dist_true = model.apply_distortion(pin_true)
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], dist_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(pin, pin_true)
np.testing.assert_array_almost_equal(dist, dist_true)
np.testing.assert_array_almost_equal(pix, pix_true)
model = self.Class(fx=4050.5, fy=3050.25, alpha=1.5, px=1500, py=1500.5,
k1=0.5, k2=-0.3, k3=0.15, p1=1e-7, p2=1e-6,
k4=1, k5=-5, k6=11, s1=1e-6, s2=1e2, s3=-3e-3, s4=5e-1,
misalignment=[1, 0.2, 0.3])
with self.subTest(misalignment=[1, 0.2, 0.3]):
rot_mat = at.rotvec_to_rotmat([1, 0.2, 0.3]).squeeze()
for point in points:
point_new = rot_mat @ point
pin, dist, pix = model.get_projections(point)
pin_true = np.array(point_new[:2]) / np.array(point_new[2])
dist_true = model.apply_distortion(pin_true)
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], dist_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(pin, pin_true)
np.testing.assert_array_almost_equal(dist, dist_true)
np.testing.assert_array_almost_equal(pix, pix_true)
model = self.Class(fx=4050.5, fy=3050.25, alpha=1.5, px=1500, py=1500.5,
k1=0.5, k2=-0.3, k3=0.15, p1=1e-7, p2=1e-6,
k4=1, k5=-5, k6=11, s1=1e-6, s2=1e2, s3=-3e-3, s4=5e-1,
misalignment=[[1, 0.2, 0.3], [0, 0, np.pi]])
model.estimate_multiple_misalignments = True
with self.subTest(misalignment=[[1, 0.2, 0.3], [0, 0, np.pi]]):
rot_mat = at.rotvec_to_rotmat([1, 0.2, 0.3]).squeeze()
for point in points:
point_new = rot_mat @ point
pin, dist, pix = model.get_projections(point, image=0)
pin_true = np.array(point_new[:2]) / np.array(point_new[2])
dist_true = model.apply_distortion(pin_true)
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], dist_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(pin, pin_true)
np.testing.assert_array_almost_equal(dist, dist_true)
np.testing.assert_array_almost_equal(pix, pix_true)
pin, dist, pix = model.get_projections(point, image=1)
pin_true = -np.array(point[:2]) / point[2]
dist_true = model.apply_distortion(pin_true)
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], dist_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(pin, pin_true)
np.testing.assert_array_almost_equal(dist, dist_true)
np.testing.assert_array_almost_equal(pix, pix_true)
def test_project_onto_image(self):
points = [[0, 0, 1], [-0.1, 0.2, 2.2], [[-0.1], [0.2], [2.2]], [[-0.1, 0], [0.2, 0], [2.2, 1]]]
model = self.Class(fx=4050.5, fy=3050.25, alpha=1.5, px=1500, py=1500.5,
k1=0.5, k2=-0.3, k3=0.15, p1=1e-7, p2=1e-6, a1=1, a2=2, a3=-3,
k4=1, k5=-5, k6=11, s1=1e-6, s2=1e2, s3=-3e-3, s4=5e-1
)
temps = [0, 1, -1, 10, -10]
for temp in temps:
with self.subTest(temp=temp, misalignment=None):
for point in points:
_, __, pix = model.get_projections(point, temperature=temp)
pix_proj = model.project_onto_image(point, temperature=temp)
np.testing.assert_array_equal(pix, pix_proj)
model = self.Class(fx=4050.5, fy=3050.25, alpha=1.5, px=1500, py=1500.5,
k1=0.5, k2=-0.3, k3=0.15, p1=1e-7, p2=1e-6,
k4=1, k5=-5, k6=11, s1=1e-6, s2=1e2, s3=-3e-3, s4=5e-1,
misalignment=[[1, 0.2, 0.3], [0, 0, np.pi]])
model.estimate_multiple_misalignments = True
with self.subTest(misalignment=[[1, 0.2, 0.3], [0, 0, np.pi]]):
for point in points:
_, __, pix = model.get_projections(point, image=0)
pix_proj = model.project_onto_image(point, image=0)
np.testing.assert_array_equal(pix, pix_proj)
_, __, pix = model.get_projections(point, image=1)
pix_proj = model.project_onto_image(point, image=1)
np.testing.assert_array_equal(pix, pix_proj)
def test_compute_pixel_jacobian(self):
def num_deriv(uvec, cmodel, delta=1e-8, image=0, temperature=0) -> np.ndarray:
uvec = np.array(uvec).reshape(3, -1)
pix_true = cmodel.project_onto_image(uvec, image=image, temperature=temperature)
uvec_pert = uvec + [[delta], [0], [0]]
pix_pert_x_f = cmodel.project_onto_image(uvec_pert, image=image, temperature=temperature)
uvec_pert = uvec + [[0], [delta], [0]]
pix_pert_y_f = cmodel.project_onto_image(uvec_pert, image=image, temperature=temperature)
uvec_pert = uvec + [[0], [0], [delta]]
pix_pert_z_f = cmodel.project_onto_image(uvec_pert, image=image, temperature=temperature)
uvec_pert = uvec - [[delta], [0], [0]]
pix_pert_x_b = cmodel.project_onto_image(uvec_pert, image=image, temperature=temperature)
uvec_pert = uvec - [[0], [delta], [0]]
pix_pert_y_b = cmodel.project_onto_image(uvec_pert, image=image, temperature=temperature)
uvec_pert = uvec - [[0], [0], [delta]]
pix_pert_z_b = cmodel.project_onto_image(uvec_pert, image=image, temperature=temperature)
return np.array([(pix_pert_x_f-pix_pert_x_b)/(2*delta),
(pix_pert_y_f-pix_pert_y_b)/(2*delta),
(pix_pert_z_f-pix_pert_z_b)/(2*delta)]).swapaxes(0, -1)
inputs = [np.array([[0.5, 0, 1]]).T,
np.array([[0, 0.5, 1], [0.5, 0.5, 1], [-0.5, 0, 1]]).T,
np.array([[0.1, -0.5, 1], [-0.5, -0.5, 1], [5, 10, 1000.23], [1, 2, 1200.23]]).T]
temperatures = [0, 1, -1, 10.5, -10.5]
model = self.Class(fx=4050.5, fy=5050.25, alpha=1.5, px=1500, py=1500.5,
a1=0.15e-7, a2=-0.01e-8, a3=1e-9,
k1=0.5, k2=-0.3, k3=0.15, p1=1e-7, p2=1e-6,
k4=-0.5, k5=0.02, k6=0.01, s1=-0.45, s2=0.0045, s3=-0.8, s4=0.9,
misalignment=[[1e-9, -1e-9, 2e-9], [-1e-9, 2e-9, -1e-9]])
model.estimate_multiple_misalignments = True
for temp in temperatures:
for input in inputs:
for image in range(2):
with self.subTest(image=image, temp=temp, input=input):
jac_ana = model.compute_pixel_jacobian(input, image=image, temperature=temp)
jac_num = num_deriv(input, model, image=image, temperature=temp, delta=1e-2)
np.testing.assert_allclose(jac_ana, jac_num, rtol=1e-3, atol=1e-10)
def test__compute_dcamera_point_dgnomic(self):
def num_deriv(gnomic_locations, cmodel, delta=1e-6) -> np.ndarray:
def g2u(g):
v = np.vstack([g, cmodel.focal_length*np.ones(g.shape[-1])])
return v/np.linalg.norm(v, axis=0, keepdims=True)
gnomic_locations = np.asarray(gnomic_locations).reshape(2, -1)
gnom_pert = gnomic_locations + [[delta], [0]]
cam_loc_pert_x_f = g2u(gnom_pert)
gnom_pert = gnomic_locations + [[0], [delta]]
cam_loc_pert_y_f = g2u(gnom_pert)
gnom_pert = gnomic_locations - [[delta], [0]]
cam_loc_pert_x_b = g2u(gnom_pert)
gnom_pert = gnomic_locations - [[0], [delta]]
cam_loc_pert_y_b = g2u(gnom_pert)
return np.array([(cam_loc_pert_x_f -cam_loc_pert_x_b)/(2*delta),
(cam_loc_pert_y_f -cam_loc_pert_y_b)/(2*delta)]).swapaxes(0, -1)
inputs = [np.array([[0, 0]]).T,
np.array([[0, 2000], [2000, 0], [2000, 2000]]).T,
np.array([[1000, 1000], [1000, 2000], [2000, 1000], [0, 1000], [1000, 0]]).T]
model = self.Class(fx=4050.5, fy=5050.25, alpha=1.5, px=1500, py=1500.5,
a1=0.15e-7, a2=-0.01e-8, a3=1e-9,
k1=0.5, k2=-0.3, k3=0.15, p1=1e-7, p2=1e-6,
k4=-0.5, k5=0.02, k6=0.01, s1=-0.45, s2=0.0045, s3=-0.8, s4=0.9,
misalignment=[[1e-9, -1e-9, 2e-9], [-1e-9, 2e-9, -1e-9]])
model.estimate_multiple_misalignments = True
for input in inputs:
with self.subTest(input=input):
jac_ana = []
for gnom in input.T:
jac_ana.append(
model._compute_dcamera_point_dgnomic(gnom, np.sqrt(np.sum(gnom*gnom) + model.focal_length**2)))
jac_ana = np.array(jac_ana)
jac_num = num_deriv(input, model)
np.testing.assert_almost_equal(jac_ana, jac_num)
def test__compute_dgnomic_ddist_gnomic(self):
def num_deriv(dist_gnomic_locations, cmodel, delta=1e-6) -> np.ndarray:
def dg2g(dg):
gnomic_guess = dg.copy()
# perform the fpa
for _ in np.arange(20):
# get the distorted location assuming the current guess is correct
gnomic_guess_distorted = cmodel.apply_distortion(gnomic_guess)
# subtract off the residual distortion from the gnomic guess
gnomic_guess += dg - gnomic_guess_distorted
# check for convergence
if np.all(np.linalg.norm(gnomic_guess_distorted - dg, axis=0) <= 1e-15):
break
return gnomic_guess
dist_gnomic_locations = np.asarray(dist_gnomic_locations).reshape(2, -1)
dist_gnom_pert = dist_gnomic_locations + [[delta], [0]]
gnom_loc_pert_x_f = dg2g(dist_gnom_pert)
dist_gnom_pert = dist_gnomic_locations + [[0], [delta]]
gnom_loc_pert_y_f = dg2g(dist_gnom_pert)
dist_gnom_pert = dist_gnomic_locations - [[delta], [0]]
gnom_loc_pert_x_b = dg2g(dist_gnom_pert)
dist_gnom_pert = dist_gnomic_locations - [[0], [delta]]
gnom_loc_pert_y_b = dg2g(dist_gnom_pert)
return np.array([(gnom_loc_pert_x_f - gnom_loc_pert_x_b)/(2*delta),
(gnom_loc_pert_y_f - gnom_loc_pert_y_b)/(2*delta)]).swapaxes(0, -1)
inputs = [np.array([[0, 0]]).T,
np.array([[0, 0.1], [0.1, 0], [0.1, 0.1]]).T/100,
np.array([[-0.1, 0], [0, -0.1], [-0.1, -0.1], [0.1, -0.1], [-0.1, 0.1]]).T/100]
model = self.Class(fx=4050.5, fy=5050.25, alpha=1.5, px=1500, py=1500.5,
a1=0.15e-7, a2=-0.01e-8, a3=1e-9,
k1=0.005, k2=-0.003, k3=0.0015, p1=1e-7, p2=1e-6,
k4=-0.005, k5=0.0002, k6=0.0001, s1=-0.0045, s2=0.000045, s3=-0.008, s4=0.009,
misalignment=[[1e-9, -1e-9, 2e-9], [-1e-9, 2e-9, -1e-9]])
model.estimate_multiple_misalignments = True
for input in inputs:
with self.subTest(input=input):
jac_ana = []
for dist_gnom in input.T:
jac_ana.append(model._compute_dgnomic_ddist_gnomic(dist_gnom))
jac_ana = np.array(jac_ana)
jac_num = num_deriv(input, model, delta=1e-4)
np.testing.assert_allclose(jac_ana, jac_num, rtol=1e-1, atol=1e-10)
def test_compute_unit_vector_jacobian(self):
def num_deriv(pixels, cmodel, delta=1e-6, image=0, temperature=0) -> np.ndarray:
pixels = np.array(pixels).reshape(2, -1)
pix_pert = pixels + [[delta], [0]]
uvec_pert_x_f = cmodel.pixels_to_unit(pix_pert, image=image, temperature=temperature)
pix_pert = pixels + [[0], [delta]]
uvec_pert_y_f = cmodel.pixels_to_unit(pix_pert, image=image, temperature=temperature)
pix_pert = pixels - [[delta], [0]]
uvec_pert_x_b = cmodel.pixels_to_unit(pix_pert, image=image, temperature=temperature)
pix_pert = pixels - [[0], [delta]]
uvec_pert_y_b = cmodel.pixels_to_unit(pix_pert, image=image, temperature=temperature)
return np.array([(uvec_pert_x_f-uvec_pert_x_b)/(2*delta),
(uvec_pert_y_f-uvec_pert_y_b)/(2*delta)]).swapaxes(0, -1)
inputs = [np.array([[0, 0]]).T,
np.array([[0, 2000], [2000, 0], [2000, 2000]]).T/10,
np.array([[1000, 1000], [1000, 2000], [2000, 1000], [0, 1000], [1000, 0]]).T/10]
temperatures = [0, 1, -1, 10.5, -10.5]
model = self.Class(fx=4050.5, fy=5050.25, alpha=1.5, px=100, py=100.5,
a1=0.15e-7, a2=-0.01e-8, a3=1e-9,
k1=0.005, k2=-0.003, k3=0.0015, p1=1e-7, p2=1e-6,
k4=-0.005, k5=0.0002, k6=0.0001, s1=-0.0045, s2=0.000045, s3=-0.008, s4=0.009,
misalignment=[[1e-9, -1e-9, 2e-9], [-1e-9, 2e-9, -1e-9]])
model.estimate_multiple_misalignments = True
for temp in temperatures:
for input in inputs:
for image in range(2):
with self.subTest(image=image, temp=temp, input=input):
jac_ana = model.compute_unit_vector_jacobian(input, image=image, temperature=temp)
jac_num = num_deriv(input, model, image=image, temperature=temp, delta=1)
np.testing.assert_allclose(jac_ana, jac_num, rtol=1e-3, atol=1e-10)
def test__compute_ddistorted_gnomic_dgnomic(self):
def num_deriv(loc, cmodel, delta=1e-8) -> np.ndarray:
loc_pert = np.array(loc) + [delta, 0]
dist_pert_x_f = cmodel.apply_distortion(loc_pert)
loc_pert = np.array(loc) + [0, delta]
dist_pert_y_f = cmodel.apply_distortion(loc_pert)
loc_pert = np.array(loc) - [delta, 0]
dist_pert_x_b = cmodel.apply_distortion(loc_pert)
loc_pert = np.array(loc) - [0, delta]
dist_pert_y_b = cmodel.apply_distortion(loc_pert)
return np.array(
[(dist_pert_x_f - dist_pert_x_b) / (2 * delta), (dist_pert_y_f - dist_pert_y_b) / (2 * delta)]).T
dist_coefs = [{"k1": 1.5},
{"k2": 1.5},
{"k3": 1.5},
{"p1": 1.5},
{"p2": 1.5},
{"k4": 1.5},
{"k5": 1.5},
{"k6": 1.5},
{"s1": 1.5},
{"s2": 1.5},
{"s3": 1.5},
{"s4": 1.5},
{"k1": -1.5, "k2": -1.5, "k3": -1.5, "p1": -1.5, "p2": -1.5,
"k4": -1.5, "k5": -1.5, "k6": -1.5, "s1": -1.5, "s2": -1.5, "s3": -1.5, "s4": -1.5}]
inputs = [[0, 0], [1, 0], [-1, 0], [1.5, 0], [-1.5, 0], [0, 1], [0, -1], [0, 1.5], [0, -1.5], [1, 1]]
for dist_coef in dist_coefs:
model = self.Class(**dist_coef)
for inp in inputs:
with self.subTest(**dist_coef, inp=inp):
r = np.sqrt(inp[0] ** 2 + inp[1] ** 2)
r2 = r ** 2
r4 = r ** 4
r6 = r ** 6
num = num_deriv(inp, model)
ana = model._compute_ddistorted_gnomic_dgnomic(np.array(inp), r2, r4, r6)
np.testing.assert_allclose(num, ana, atol=1e-14)
def test__compute_dpixel_ddistorted_gnomic(self):
def num_deriv(loc, cmodel, delta=1e-8, temperature=0) -> np.ndarray:
loc_pert = np.array(loc) + [delta, 0]
loc_pert *= cmodel.get_temperature_scale(temperature)
pix_pert_x_f = cmodel.intrinsic_matrix[:, :2] @ loc_pert + cmodel.intrinsic_matrix[:, 2]
loc_pert = np.array(loc) + [0, delta]
loc_pert *= cmodel.get_temperature_scale(temperature)
pix_pert_y_f = cmodel.intrinsic_matrix[:, :2] @ loc_pert + cmodel.intrinsic_matrix[:, 2]
loc_pert = np.array(loc) - [delta, 0]
loc_pert *= cmodel.get_temperature_scale(temperature)
pix_pert_x_b = cmodel.intrinsic_matrix[:, :2] @ loc_pert + cmodel.intrinsic_matrix[:, 2]
loc_pert = np.array(loc) - [0, delta]
loc_pert *= cmodel.get_temperature_scale(temperature)
pix_pert_y_b = cmodel.intrinsic_matrix[:, :2] @ loc_pert + cmodel.intrinsic_matrix[:, 2]
return np.array(
[(pix_pert_x_f - pix_pert_x_b) / (2 * delta), (pix_pert_y_f - pix_pert_y_b) / (2 * delta)]).T
intrins_coefs = [{"fx": 1.5, "fy": 0, "alpha": 0, "px": 0, "py": 0},
{"fx": 0, "fy": 1.5, "alpha": 0, "px": 0, "py": 0},
{"fx": 0, "fy": 0, "alpha": 1.5, "px": 0, "py": 0},
{"fx": 0, "fy": 0, "alpha": 0, "px": 1.5, "py": 0},
{"fx": 0, "fy": 0, "alpha": 0, "px": 0, "py": 1.5},
{"fx": -1.5, "fy": -1.5, "alpha": -1.5, "px": 1.5, "py": 1.5}]
inputs = [[0, 0], [1, 0], [-1, 0], [1.5, 0], [-1.5, 0], [0, 1], [0, -1], [0, 1.5], [0, -1.5], [1, 1]]
temps = [0, 1, -1, 10.5, -10.5]
for temp in temps:
for intrins_coef in intrins_coefs:
model = self.Class(**intrins_coef)
with self.subTest(**intrins_coef, temp=temp):
for inp in inputs:
num = num_deriv(np.array(inp), model, temperature=temp)
ana = model._compute_dpixel_ddistorted_gnomic(temperature=temp)
np.testing.assert_allclose(num, ana, atol=1e-14)
def test__compute_dpixel_dintrinsic(self):
def num_deriv(loc, cmodel, delta=1e-6) -> np.ndarray:
model_pert = cmodel.copy()
model_pert.kx += delta
pix_pert_kx_f = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.kxy += delta
pix_pert_kxy_f = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.ky += delta
pix_pert_ky_f = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.px += delta
pix_pert_px_f = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.py += delta
pix_pert_py_f = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.kx -= delta
pix_pert_kx_b = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.kxy -= delta
pix_pert_kxy_b = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.ky -= delta
pix_pert_ky_b = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.px -= delta
pix_pert_px_b = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.py -= delta
pix_pert_py_b = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
return np.array([(pix_pert_kx_f - pix_pert_kx_b) / (2 * delta),
(pix_pert_ky_f - pix_pert_ky_b) / (2 * delta),
(pix_pert_kxy_f - pix_pert_kxy_b) / (2 * delta),
(pix_pert_px_f - pix_pert_px_b) / (2 * delta),
(pix_pert_py_f - pix_pert_py_b) / (2 * delta)]).T
intrins_coefs = [{"fx": 1.5, "fy": 0, "alpha": 0, "px": 0, "py": 0},
{"fx": 0, "fy": 1.5, "alpha": 0, "px": 0, "py": 0},
{"fx": 0, "fy": 0, "alpha": 1.5, "px": 0, "py": 0},
{"fx": 0, "fy": 0, "alpha": 0, "px": 1.5, "py": 0},
{"fx": 0, "fy": 0, "alpha": 0, "px": 0, "py": 1.5},
{"fx": -1.5, "fy": -1.5, "alpha": -1.5, "px": 1.5, "py": 1.5}]
inputs = [[1e-6, 1e-6], [1, 0], [-1, 0], [1.5, 0], [-1.5, 0], [0, 1], [0, -1], [0, 1.5], [0, -1.5], [1, 1]]
for intrins_coef in intrins_coefs:
model = self.Class(**intrins_coef)
for inp in inputs:
with self.subTest(**intrins_coef, inp=inp):
num = num_deriv(inp, model, delta=1e-5)
ana = model._compute_dpixel_dintrinsic(
|
np.array(inp)
|
numpy.array
|
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import tensorflow as tf
import numpy as np
import nltk
import pickle
from nltk.tokenize import word_tokenize
from nltk.stem import WordNetLemmatizer
from create_sentiment_featuresets import create_feature_sets_and_labels
#train_x, train_y, test_x, test_y = create_feature_sets_and_labels('./files/pos.txt', './files/neg.txt')
n_nodes_hl1 = 500
n_nodes_hl2 = 500
n_nodes_hl3 = 500
n_classes = 2
batch_size = 100
#long_training = len(train_x[0])
long_training = 423
x = tf.placeholder('float', [None, long_training], name='x')
#x = tf.placeholder('float', name='x')
y = tf.placeholder('float', name='y')
hidden_1_layer = {'weights':tf.Variable(tf.random_normal([long_training, n_nodes_hl1])),
'biases':tf.Variable(tf.random_normal([n_nodes_hl1]))}
hidden_2_layer = {'weights':tf.Variable(tf.random_normal([n_nodes_hl1, n_nodes_hl2])),
'biases':tf.Variable(tf.random_normal([n_nodes_hl2]))}
hidden_3_layer = {'weights':tf.Variable(tf.random_normal([n_nodes_hl2, n_nodes_hl3])),
'biases':tf.Variable(tf.random_normal([n_nodes_hl3]))}
output_layer = {'weights':tf.Variable(tf.random_normal([n_nodes_hl3, n_classes])),
'biases':tf.Variable(tf.random_normal([n_classes]))}
def neural_network_model(data):
# y = (x * weights) + biases
l1 = tf.add(tf.matmul(data, hidden_1_layer['weights']), hidden_1_layer['biases'])
l1 = tf.nn.relu(l1) # f(x) = max(0, x)
l2 = tf.add(tf.matmul(l1, hidden_2_layer['weights']), hidden_2_layer['biases'])
l2 = tf.nn.relu(l2)
l3 = tf.add(tf.matmul(l2, hidden_3_layer['weights']), hidden_3_layer['biases'])
l3 = tf.nn.relu(l3)
output = tf.matmul(l3, output_layer['weights']) + output_layer['biases']
return output, hidden_1_layer['weights']
saver = tf.train.Saver()
def train_neural_network(x):
prediction, whl1 = neural_network_model(x)
cost = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(logits=prediction, labels=y) )
optimizer = tf.train.AdamOptimizer().minimize(cost)
f = open('./outputs/loss.txt', 'w')
f2 = open('./outputs/weights-epochs.csv', 'w')
# feed forward + backpropagation = epoch
hm_epochs = 10
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for epoch in range(hm_epochs):
epoch_loss = 0
i = 0
while i < len(train_x):
start = i
end = i + batch_size
batch_x =
|
np.array(train_x[start:end])
|
numpy.array
|
"""Functions for rendering the final result in PyVista."""
import time
import numpy as np
import pyvista as pv
from matplotlib.colors import LinearSegmentedColormap
from util import latlon2xyz, find_percent_val, find_val_percent
import cmocean
# zmaps = cmocean.tools.get_dict(cmocean.cm.thermal, N=9)
# zmaps = cmocean.cm.thermal
# zmaps = cmocean.cm.get_cmap("thermal").copy()
def add_points(pl, points):
# Is it strictly necessary that these be np.arrays?
for key, data in points.items():
dots = pv.PolyData(np.array(data))
pl.add_mesh(dots, point_size=10.0, color=key)
def add_lines(pl, radius, tilt):
x_axisline = pv.Line([-1.5*radius,0,0],[1.5*radius,0,0])
y_axisline = pv.Line([0,-1.5*radius,0],[0,1.5*radius,0])
z_axisline = pv.Line([0,0,-1.5*radius],[0,0,1.5*radius])
# Axial tilt line
# ax, ay, az = latlon2xyz(tilt, 45, radius)
ax, ay, az = latlon2xyz(tilt, 0, radius)
t_axisline = pv.Line([0,0,0], [ax * 1.5, ay * 1.5, az * 1.5])
# Sun tilt line (line that is perpendicular to the incoming solar flux)
# ax, ay, az = latlon2xyz(90-tilt, -135, radius)
ax, ay, az = latlon2xyz(90-tilt, 180, radius)
s_axisline = pv.Line([0,0,0], [ax * 1.5, ay * 1.5, az * 1.5])
pl.add_mesh(x_axisline, line_width=5, color = "red")
pl.add_mesh(y_axisline, line_width=5, color = "green")
pl.add_mesh(z_axisline, line_width=5, color = "blue")
pl.add_mesh(t_axisline, line_width=5, color = "magenta")
pl.add_mesh(s_axisline, line_width=5, color = "yellow")
def visualize(verts, tris, heights=None, scalars=None, zero_level=0.0, surf_points=None, radius=1.0, tilt=0.0):
"""Visualize the output."""
pl = pv.Plotter()
if scalars["s-mode"] in ("insolation", "temperature"):
show_ocean_shell = False
else:
show_ocean_shell = True
# pyvista expects that faces have a leading number telling it how many
# vertices a face has, e.g. [3, 0, 11, 5] where 3 means triangle.
# https://docs.pyvista.org/examples/00-load/create-poly.html
# So we fill an array with the number '3' and merge it with the cells
# from meshzoo to get a 'proper' array for pyvista.
time_start = time.perf_counter()
tri_size = np.full((len(tris), 1), 3)
new_tris = np.hstack((tri_size, tris))
time_end = time.perf_counter()
print(f"Time to reshape triangle array: {time_end - time_start :.5f} sec")
tri_size = None
# Create pyvista mesh from our icosphere
time_start = time.perf_counter()
planet_mesh = pv.PolyData(verts, new_tris)
time_end = time.perf_counter()
print(f"Time to create the PyVista planet mesh: {time_end - time_start :.5f} sec")
# Separate mesh for ocean water
if show_ocean_shell:
ocean_shell = pv.ParametricEllipsoid(radius, radius, radius, u_res=300, v_res=300)
pl.add_mesh(ocean_shell, show_edges=False, smooth_shading=True, color="blue", opacity=0.15)
# Add any surface marker points or other floating marker points
if surf_points is not None:
add_points(pl, surf_points)
# Add axis lines
add_lines(pl, radius, tilt)
# minval = np.amin(scalars["scalars"])
# maxval = np.amax(scalars["scalars"])
# heights = np.clip(heights, minval*1.001, maxval)
# Prepare scalar gradient, scalar bar, and annotations
color_map, anno = make_scalars(scalars["s-mode"], scalars["scalars"])
sargs = dict(n_labels=0, label_font_size=12, position_y=0.07)
# ToDo: Add title to the scalar bar sargs and dynamically change it based on what is being visualized (e.g. Elevation, Surface Temperature, etc.)
# title="whatever" (remove the quotes and make 'whatever' into a variable, like the s-mode or whatever. like title=scalars["s-mode"])
# "Current"? ".items()"? https://stackoverflow.com/questions/3545331/how-can-i-get-dictionary-key-as-variable-directly-in-python-not-by-searching-fr
# https://stackoverflow.com/questions/16819222/how-to-return-dictionary-keys-as-a-list-in-python
# pl.add_mesh(planet_mesh, show_edges=False, smooth_shading=True, color="white", below_color="blue", culling="back", scalars=scalars["scalars"], cmap=custom_cmap, scalar_bar_args=sargs, annotations=anno)
pl.add_mesh(planet_mesh, show_edges=False, smooth_shading=True, color="white", culling="back", scalars=scalars["scalars"], cmap=color_map, scalar_bar_args=sargs, annotations=anno)
pl.show_axes()
pl.enable_terrain_style(mouse_wheel_zooms=True) # Use turntable style navigation
print("Sending to PyVista.")
pl.show()
def make_scalars(mode, scalars):
# Define the colors we want to use (NOT BEING USED AT THE MOMENT)
blue = np.array([12/256, 238/256, 246/256, 1])
black =
|
np.array([11/256, 11/256, 11/256, 1])
|
numpy.array
|
import pytest
import dpnp
import numpy
def test_choose():
a = numpy.r_[:4]
ia = dpnp.array(a)
b = numpy.r_[-4:0]
ib = dpnp.array(b)
c = numpy.r_[100:500:100]
ic = dpnp.array(c)
expected = numpy.choose([0, 0, 0, 0], [a, b, c])
result = dpnp.choose([0, 0, 0, 0], [ia, ib, ic])
numpy.testing.assert_array_equal(expected, result)
@pytest.mark.parametrize("offset",
[0, 1],
ids=['0', '1'])
@pytest.mark.parametrize("array",
[[[0, 0], [0, 0]],
[[1, 2], [1, 2]],
[[1, 2], [3, 4]],
[[0, 1, 2], [3, 4, 5], [6, 7, 8]],
[[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]],
[[[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]]],
[[[[1, 2], [3, 4]], [[1, 2], [2, 1]]], [[[1, 3], [3, 1]], [[0, 1], [1, 3]]]],
[[[[1, 2, 3], [3, 4, 5]], [[1, 2, 3], [2, 1, 0]]], [
[[1, 3, 5], [3, 1, 0]], [[0, 1, 2], [1, 3, 4]]]],
[[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]], [[[13, 14, 15], [16, 17, 18]], [[19, 20, 21], [22, 23, 24]]]]],
ids=['[[0, 0], [0, 0]]',
'[[1, 2], [1, 2]]',
'[[1, 2], [3, 4]]',
'[[0, 1, 2], [3, 4, 5], [6, 7, 8]]',
'[[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]',
'[[[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]]]',
'[[[[1, 2], [3, 4]], [[1, 2], [2, 1]]], [[[1, 3], [3, 1]], [[0, 1], [1, 3]]]]',
'[[[[1, 2, 3], [3, 4, 5]], [[1, 2, 3], [2, 1, 0]]], [[[1, 3, 5], [3, 1, 0]], [[0, 1, 2], [1, 3, 4]]]]',
'[[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]], [[[13, 14, 15], [16, 17, 18]], [[19, 20, 21], [22, 23, 24]]]]'])
def test_diagonal(array, offset):
a = numpy.array(array)
ia = dpnp.array(a)
expected = numpy.diagonal(a, offset)
result = dpnp.diagonal(ia, offset)
numpy.testing.assert_array_equal(expected, result)
@pytest.mark.parametrize("val",
[-1, 0, 1],
ids=['-1', '0', '1'])
@pytest.mark.parametrize("array",
[[[0, 0], [0, 0]],
[[1, 2], [1, 2]],
[[1, 2], [3, 4]],
[[0, 1, 2], [3, 4, 5], [6, 7, 8]],
[[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]],
[[[[1, 2], [3, 4]], [[1, 2], [2, 1]]], [[[1, 3], [3, 1]], [[0, 1], [1, 3]]]]],
ids=['[[0, 0], [0, 0]]',
'[[1, 2], [1, 2]]',
'[[1, 2], [3, 4]]',
'[[0, 1, 2], [3, 4, 5], [6, 7, 8]]',
'[[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]',
'[[[[1, 2], [3, 4]], [[1, 2], [2, 1]]], [[[1, 3], [3, 1]], [[0, 1], [1, 3]]]]'])
def test_fill_diagonal(array, val):
a = numpy.array(array)
ia = dpnp.array(a)
expected = numpy.fill_diagonal(a, val)
result = dpnp.fill_diagonal(ia, val)
numpy.testing.assert_array_equal(expected, result)
@pytest.mark.parametrize("dimension",
[(1, ), (2, ), (1, 2), (2, 3), (3, 2), [1], [2], [1, 2], [2, 3], [3, 2]],
ids=['(1, )', '(2, )', '(1, 2)', '(2, 3)', '(3, 2)',
'[1]', '[2]', '[1, 2]', '[2, 3]', '[3, 2]'])
def test_indices(dimension):
expected = numpy.indices(dimension)
result = dpnp.indices(dimension)
numpy.testing.assert_array_equal(expected, result)
@pytest.mark.parametrize("array",
[[],
[[0, 0], [0, 0]],
[[1, 0], [1, 0]],
[[1, 2], [3, 4]],
[[0, 1, 2], [3, 0, 5], [6, 7, 0]],
[[0, 1, 0, 3, 0], [5, 0, 7, 0, 9]],
[[[1, 2], [0, 4]], [[0, 2], [0, 1]], [[0, 0], [3, 1]]],
[[[[1, 2, 3], [3, 4, 5]], [[1, 2, 3], [2, 1, 0]]], [
[[1, 3, 5], [3, 1, 0]], [[0, 1, 2], [1, 3, 4]]]]],
ids=['[]',
'[[0, 0], [0, 0]]',
'[[1, 0], [1, 0]]',
'[[1, 2], [3, 4]]',
'[[0, 1, 2], [3, 0, 5], [6, 7, 0]]',
'[[0, 1, 0, 3, 0], [5, 0, 7, 0, 9]]',
'[[[1, 2], [0, 4]], [[0, 2], [0, 1]], [[0, 0], [3, 1]]]',
'[[[[1, 2, 3], [3, 4, 5]], [[1, 2, 3], [2, 1, 0]]], [[[1, 3, 5], [3, 1, 0]], [[0, 1, 2], [1, 3, 4]]]]'])
def test_nonzero(array):
a = numpy.array(array)
ia = dpnp.array(array)
expected = numpy.nonzero(a)
result = dpnp.nonzero(ia)
numpy.testing.assert_array_equal(expected, result)
@pytest.mark.parametrize("vals",
[[100, 200],
(100, 200)],
ids=['[100, 200]',
'(100, 200)'])
@pytest.mark.parametrize("mask",
[[[True, False], [False, True]],
[[False, True], [True, False]],
[[False, False], [True, True]]],
ids=['[[True, False], [False, True]]',
'[[False, True], [True, False]]',
'[[False, False], [True, True]]'])
@pytest.mark.parametrize("arr",
[[[0, 0], [0, 0]],
[[1, 2], [1, 2]],
[[1, 2], [3, 4]]],
ids=['[[0, 0], [0, 0]]',
'[[1, 2], [1, 2]]',
'[[1, 2], [3, 4]]'])
def test_place1(arr, mask, vals):
a = numpy.array(arr)
ia = dpnp.array(a)
m = numpy.array(mask)
im = dpnp.array(m)
numpy.place(a, m, vals)
dpnp.place(ia, im, vals)
numpy.testing.assert_array_equal(a, ia)
@pytest.mark.parametrize("vals",
[[100, 200],
[100, 200, 300, 400, 500, 600],
[100, 200, 300, 400, 500, 600, 800, 900]],
ids=['[100, 200]',
'[100, 200, 300, 400, 500, 600]',
'[100, 200, 300, 400, 500, 600, 800, 900]'])
@pytest.mark.parametrize("mask",
[[[[True, False], [False, True]], [[False, True], [True, False]], [[False, False], [True, True]]]],
ids=['[[[True, False], [False, True]], [[False, True], [True, False]], [[False, False], [True, True]]]'])
@pytest.mark.parametrize("arr",
[[[[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]]]],
ids=['[[[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]]]'])
def test_place2(arr, mask, vals):
a = numpy.array(arr)
ia = dpnp.array(a)
m = numpy.array(mask)
im = dpnp.array(m)
numpy.place(a, m, vals)
dpnp.place(ia, im, vals)
numpy.testing.assert_array_equal(a, ia)
@pytest.mark.parametrize("vals",
[[100, 200],
[100, 200, 300, 400, 500, 600],
[100, 200, 300, 400, 500, 600, 800, 900]],
ids=['[100, 200]',
'[100, 200, 300, 400, 500, 600]',
'[100, 200, 300, 400, 500, 600, 800, 900]'])
@pytest.mark.parametrize("mask",
[[[[[False, False], [True, True]], [[True, True], [True, True]]], [
[[False, False], [True, True]], [[False, False], [False, False]]]]],
ids=['[[[[False, False], [True, True]], [[True, True], [True, True]]], [[[False, False], [True, True]], [[False, False], [False, False]]]]'])
@pytest.mark.parametrize("arr",
[[[[[1, 2], [3, 4]], [[1, 2], [2, 1]]], [[[1, 3], [3, 1]], [[0, 1], [1, 3]]]]],
ids=['[[[[1, 2], [3, 4]], [[1, 2], [2, 1]]], [[[1, 3], [3, 1]], [[0, 1], [1, 3]]]]'])
def test_place3(arr, mask, vals):
a = numpy.array(arr)
ia = dpnp.array(a)
m = numpy.array(mask)
im = dpnp.array(m)
numpy.place(a, m, vals)
dpnp.place(ia, im, vals)
numpy.testing.assert_array_equal(a, ia)
@pytest.mark.parametrize("v",
[0, 1, 2, 3, 4],
ids=['0', '1', '2', '3', '4'])
@pytest.mark.parametrize("ind",
[0, 1, 2, 3],
ids=['0', '1', '2', '3'])
@pytest.mark.parametrize("array",
[[[0, 0], [0, 0]],
[[1, 2], [1, 2]],
[[1, 2], [3, 4]],
[[[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]]],
[[[[1, 2], [3, 4]], [[1, 2], [2, 1]]], [[[1, 3], [3, 1]], [[0, 1], [1, 3]]]]],
ids=['[[0, 0], [0, 0]]',
'[[1, 2], [1, 2]]',
'[[1, 2], [3, 4]]',
'[[[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]]]',
'[[[[1, 2], [3, 4]], [[1, 2], [2, 1]]], [[[1, 3], [3, 1]], [[0, 1], [1, 3]]]]'])
def test_put(array, ind, v):
a = numpy.array(array)
ia = dpnp.array(a)
numpy.put(a, ind, v)
dpnp.put(ia, ind, v)
numpy.testing.assert_array_equal(a, ia)
@pytest.mark.parametrize("v",
[[10, 20], [30, 40]],
ids=['[10, 20]', '[30, 40]'])
@pytest.mark.parametrize("ind",
[[0, 1], [2, 3]],
ids=['[0, 1]', '[2, 3]'])
@pytest.mark.parametrize("array",
[[[0, 0], [0, 0]],
[[1, 2], [1, 2]],
[[1, 2], [3, 4]],
[[[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]]],
[[[[1, 2], [3, 4]], [[1, 2], [2, 1]]], [[[1, 3], [3, 1]], [[0, 1], [1, 3]]]]],
ids=['[[0, 0], [0, 0]]',
'[[1, 2], [1, 2]]',
'[[1, 2], [3, 4]]',
'[[[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]]]',
'[[[[1, 2], [3, 4]], [[1, 2], [2, 1]]], [[[1, 3], [3, 1]], [[0, 1], [1, 3]]]]'])
def test_put2(array, ind, v):
a = numpy.array(array)
ia = dpnp.array(a)
numpy.put(a, ind, v)
dpnp.put(ia, ind, v)
numpy.testing.assert_array_equal(a, ia)
def test_put3():
a = numpy.arange(5)
ia = dpnp.array(a)
dpnp.put(ia, [0, 2], [-44, -55])
numpy.put(a, [0, 2], [-44, -55])
numpy.testing.assert_array_equal(a, ia)
def test_put_along_axis_val_int():
a = numpy.arange(16).reshape(4, 4)
ai = dpnp.array(a)
ind_r = numpy.array([[3, 0, 2, 1]])
ind_r_i = dpnp.array(ind_r)
for axis in range(2):
numpy.put_along_axis(a, ind_r, 777, axis)
dpnp.put_along_axis(ai, ind_r_i, 777, axis)
numpy.testing.assert_array_equal(a, ai)
def test_put_along_axis1():
a = numpy.arange(64).reshape(4, 4, 4)
ai = dpnp.array(a)
ind_r = numpy.array([[[3, 0, 2, 1]]])
ind_r_i = dpnp.array(ind_r)
for axis in range(3):
numpy.put_along_axis(a, ind_r, 777, axis)
dpnp.put_along_axis(ai, ind_r_i, 777, axis)
numpy.testing.assert_array_equal(a, ai)
def test_put_along_axis2():
a = numpy.arange(64).reshape(4, 4, 4)
ai = dpnp.array(a)
ind_r = numpy.array([[[3, 0, 2, 1]]])
ind_r_i = dpnp.array(ind_r)
for axis in range(3):
numpy.put_along_axis(a, ind_r, [100, 200, 300, 400], axis)
dpnp.put_along_axis(ai, ind_r_i, [100, 200, 300, 400], axis)
numpy.testing.assert_array_equal(a, ai)
@pytest.mark.parametrize("vals",
[[100, 200]],
ids=['[100, 200]'])
@pytest.mark.parametrize("mask",
[[[True, False], [False, True]],
[[False, True], [True, False]],
[[False, False], [True, True]]],
ids=['[[True, False], [False, True]]',
'[[False, True], [True, False]]',
'[[False, False], [True, True]]'])
@pytest.mark.parametrize("arr",
[[[0, 0], [0, 0]],
[[1, 2], [1, 2]],
[[1, 2], [3, 4]]],
ids=['[[0, 0], [0, 0]]',
'[[1, 2], [1, 2]]',
'[[1, 2], [3, 4]]'])
def test_putmask1(arr, mask, vals):
a = numpy.array(arr)
ia = dpnp.array(a)
m = numpy.array(mask)
im = dpnp.array(m)
v = numpy.array(vals)
iv = dpnp.array(v)
numpy.putmask(a, m, v)
dpnp.putmask(ia, im, iv)
numpy.testing.assert_array_equal(a, ia)
@pytest.mark.parametrize("vals",
[[100, 200],
[100, 200, 300, 400, 500, 600],
[100, 200, 300, 400, 500, 600, 800, 900]],
ids=['[100, 200]',
'[100, 200, 300, 400, 500, 600]',
'[100, 200, 300, 400, 500, 600, 800, 900]'])
@pytest.mark.parametrize("mask",
[[[[True, False], [False, True]], [[False, True], [True, False]], [[False, False], [True, True]]]],
ids=['[[[True, False], [False, True]], [[False, True], [True, False]], [[False, False], [True, True]]]'])
@pytest.mark.parametrize("arr",
[[[[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]]]],
ids=['[[[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]]]'])
def test_putmask2(arr, mask, vals):
a = numpy.array(arr)
ia = dpnp.array(a)
m = numpy.array(mask)
im = dpnp.array(m)
v = numpy.array(vals)
iv = dpnp.array(v)
numpy.putmask(a, m, v)
dpnp.putmask(ia, im, iv)
numpy.testing.assert_array_equal(a, ia)
@pytest.mark.parametrize("vals",
[[100, 200],
[100, 200, 300, 400, 500, 600],
[100, 200, 300, 400, 500, 600, 800, 900]],
ids=['[100, 200]',
'[100, 200, 300, 400, 500, 600]',
'[100, 200, 300, 400, 500, 600, 800, 900]'])
@pytest.mark.parametrize("mask",
[[[[[False, False], [True, True]], [[True, True], [True, True]]], [
[[False, False], [True, True]], [[False, False], [False, False]]]]],
ids=['[[[[False, False], [True, True]], [[True, True], [True, True]]], [[[False, False], [True, True]], [[False, False], [False, False]]]]'])
@pytest.mark.parametrize("arr",
[[[[[1, 2], [3, 4]], [[1, 2], [2, 1]]], [[[1, 3], [3, 1]], [[0, 1], [1, 3]]]]],
ids=['[[[[1, 2], [3, 4]], [[1, 2], [2, 1]]], [[[1, 3], [3, 1]], [[0, 1], [1, 3]]]]'])
def test_putmask3(arr, mask, vals):
a = numpy.array(arr)
ia = dpnp.array(a)
m = numpy.array(mask)
im = dpnp.array(m)
v = numpy.array(vals)
iv = dpnp.array(v)
numpy.putmask(a, m, v)
dpnp.putmask(ia, im, iv)
numpy.testing.assert_array_equal(a, ia)
def test_select():
cond_val1 = numpy.array([True, True, True, False, False, False, False, False, False, False])
cond_val2 = numpy.array([False, False, False, False, False, True, True, True, True, True])
icond_val1 = dpnp.array(cond_val1)
icond_val2 = dpnp.array(cond_val2)
condlist = [cond_val1, cond_val2]
icondlist = [icond_val1, icond_val2]
choice_val1 = numpy.full(10, -2)
choice_val2 = numpy.full(10, -1)
ichoice_val1 = dpnp.array(choice_val1)
ichoice_val2 = dpnp.array(choice_val2)
choicelist = [choice_val1, choice_val2]
ichoicelist = [ichoice_val1, ichoice_val2]
expected = numpy.select(condlist, choicelist)
result = dpnp.select(icondlist, ichoicelist)
numpy.testing.assert_array_equal(expected, result)
@pytest.mark.parametrize("indices",
[[[0, 0], [0, 0]],
[[1, 2], [1, 2]],
[[1, 2], [3, 4]]],
ids=['[[0, 0], [0, 0]]',
'[[1, 2], [1, 2]]',
'[[1, 2], [3, 4]]'])
@pytest.mark.parametrize("array",
[[[0, 1, 2], [3, 4, 5], [6, 7, 8]],
[[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]],
[[[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]]],
[[[[1, 2], [3, 4]], [[1, 2], [2, 1]]], [[[1, 3], [3, 1]], [[0, 1], [1, 3]]]],
[[[[1, 2, 3], [3, 4, 5]], [[1, 2, 3], [2, 1, 0]]], [
[[1, 3, 5], [3, 1, 0]], [[0, 1, 2], [1, 3, 4]]]],
[[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]], [[[13, 14, 15], [16, 17, 18]], [[19, 20, 21], [22, 23, 24]]]]],
ids=['[[0, 1, 2], [3, 4, 5], [6, 7, 8]]',
'[[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]',
'[[[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]]]',
'[[[[1, 2], [3, 4]], [[1, 2], [2, 1]]], [[[1, 3], [3, 1]], [[0, 1], [1, 3]]]]',
'[[[[1, 2, 3], [3, 4, 5]], [[1, 2, 3], [2, 1, 0]]], [[[1, 3, 5], [3, 1, 0]], [[0, 1, 2], [1, 3, 4]]]]',
'[[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]], [[[13, 14, 15], [16, 17, 18]], [[19, 20, 21], [22, 23, 24]]]]'])
def test_take(array, indices):
a = numpy.array(array)
ind = numpy.array(indices)
ia = dpnp.array(a)
iind = dpnp.array(ind)
expected = numpy.take(a, ind)
result = dpnp.take(ia, iind)
numpy.testing.assert_array_equal(expected, result)
def test_take_along_axis():
a = numpy.arange(16).reshape(4, 4)
ai = dpnp.array(a)
ind_r = numpy.array([[3, 0, 2, 1]])
ind_r_i = dpnp.array(ind_r)
for axis in range(2):
expected =
|
numpy.take_along_axis(a, ind_r, axis)
|
numpy.take_along_axis
|
import numpy as np
#______________________________________Triangles__________________________________________________
def triangle_area(vertices, triangles):
"""
Compute the area of the given triangles.
Parameters:
vertices (Array (Nx3) type=float): The list of vertices of the triangles
triangles (Array (Nx3) type=int): The list of triangles
Return:
((float, float), Array): The first element is a tuple representing the range where the metric is defined
and the second element is the array of the area of the given triangles
"""
b = vertices[triangles][:,1] - vertices[triangles][:,0]
h = vertices[triangles][:,2] - vertices[triangles][:,1]
return ((None, None), 0.5 * np.linalg.norm(np.cross(b, h), axis = 1))
def triangle_aspect_ratio(vertices, triangles):
"""
Compute the aspect ratio of the given triangles.
Parameters:
vertices (Array (Nx3) type=float): The list of vertices of the triangles
triangles (Array (Nx3) type=int): The list of triangles
Return:
((float, float), Array): The first element is a tuple representing the range where the metric is defined
and the second element is the array of the aspect ration of the given triangles
"""
l1 = np.linalg.norm(vertices[triangles][:,0] - vertices[triangles][:,1], axis = 1)
l2 = np.linalg.norm(vertices[triangles][:,1] - vertices[triangles][:,2], axis = 1)
l3 = np.linalg.norm(vertices[triangles][:,2] - vertices[triangles][:,0], axis = 1)
a = triangle_area(vertices, triangles)[1]
r = (2 * a) / (l1 + l2 + l3)
l_max = np.max(np.c_[l1, l2, l3], axis = 1)
return ((None, None), l_max / (2 * np.sqrt(3) * r))
#______________________________________Quads__________________________________________________
def quad_area(vertices, quads):
"""
Compute the area of the given quadrilaterals.
Parameters:
vertices (Array (Nx3) type=float): The list of vertices of the quadrilaterals
quads (Array (Nx4) type=int): The list of quadrilaterals
Return:
((float, float), Array): The first element is a tuple representing the range where the metric is defined
and the second element is the array of the area of the given quadrilaterals
"""
tris = np.c_[quads[:,:3], quads[:,2:], quads[:,0]]
tris.shape = (-1, 3)
idx_1 = range(0, tris.shape[0], 2)
idx_2 = range(1, tris.shape[0], 2)
a_tri1 = triangle_area(vertices, tris[idx_1])[1]
a_tri2 = triangle_area(vertices, tris[idx_2])[1]
return ((None, None), a_tri1+a_tri2)
def quad_aspect_ratio(vertices, quads):
"""
Compute the aspect ratio of the given quadrilaterals.
Parameters:
vertices (Array (Nx3) type=float): The list of vertices of the quadrilaterals
quads (Array (Nx4) type=int): The list of quadrilaterals
Return:
((float, float), Array): The first element is a tuple representing the range where the metric is defined
and the second element is the array of the aspect ratio of the given quadrilaterals
"""
l1 = np.linalg.norm(vertices[quads][:,0] - vertices[quads][:,1], axis = 1)
l2 = np.linalg.norm(vertices[quads][:,1] - vertices[quads][:,2], axis = 1)
l3 = np.linalg.norm(vertices[quads][:,2] - vertices[quads][:,3], axis = 1)
l4 = np.linalg.norm(vertices[quads][:,3] - vertices[quads][:,0], axis = 1)
a = quad_area(vertices, quads)[1]
l_max = np.max(np.c_[l1, l2, l3, l4], axis = 1)
return ((None, None), l_max / (4*a))
#______________________________________Tets______________________________________________________
def tet_scaled_jacobian(vertices, tets):
"""
Compute the scaled jacobian of the given tetrahedra.
Parameters:
vertices (Array (Nx3) type=float): The list of vertices of the tetrahedra
tets (Array (Nx4) type=int): The list of tetrahedra
Return:
((float, float), Array): The first element is a tuple representing the range where the metric is defined
and the second element is the array of the scaled jacobian of the given tetrahedra
"""
p0 = vertices[tets[:,0]]
p1 = vertices[tets[:,1]]
p2 = vertices[tets[:,2]]
p3 = vertices[tets[:,3]]
l0 = p1 - p0
l1 = p2 - p1
l2 = p0 - p2
l3 = p3 - p0
l4 = p3 - p1
l5 = p3 - p2
l0_length = np.linalg.norm(l0, axis=1)
l1_length = np.linalg.norm(l1, axis=1)
l2_length = np.linalg.norm(l2, axis=1)
l3_length = np.linalg.norm(l3, axis=1)
l4_length = np.linalg.norm(l4, axis=1)
l5_length = np.linalg.norm(l5, axis=1)
J = np.einsum('ij,ij->i', np.cross(l2, l0, axis= 1), l3)
lambda_1 = np.expand_dims(l0_length * l2_length * l3_length, axis = 0).transpose()
lambda_2 = np.expand_dims(l0_length * l1_length * l4_length, axis = 0).transpose()
lambda_3 = np.expand_dims(l1_length * l2_length * l5_length, axis = 0).transpose()
lambda_4 = np.expand_dims(l3_length * l4_length * l5_length, axis = 0).transpose()
lambda_5 = np.expand_dims(J, axis = 0).transpose()
lambda_ = np.concatenate((lambda_1, lambda_2, lambda_3, lambda_4, lambda_5), axis = 1)
max_el = np.amax(lambda_, axis=1)
return ((-1., 1.), (J * np.sqrt(2)) / max_el)
def tet_volume(vertices, tets):
"""
Compute the volume of the given tetrahedra.
Parameters:
vertices (Array (Nx3) type=float): The list of vertices of the tetrahedra
tets (Array (Nx4) type=int): The list of tetrahedra
Return:
((float, float), Array): The first element is a tuple representing the range where the metric is defined
and the second element is the array of the volume of the given tetrahedra
"""
p0 = vertices[tets[:,0]]
p1 = vertices[tets[:,1]]
p2 = vertices[tets[:,2]]
p3 = vertices[tets[:,3]]
ones = np.ones(p0.shape[0])
p0 = np.c_[p0,ones]
p1 = np.c_[p1,ones]
p2 = np.c_[p2,ones]
p3 = np.c_[p3,ones]
matr = np.transpose(np.c_[p0, p1, p2, p3].reshape(-1,4,4), (0,2,1))
volume = np.abs(np.linalg.det(matr))/6
return ((None, None), volume)
#______________________________________Hexes______________________________________________________
def hex_scaled_jacobian(vertices, hexes):
"""
Compute the scaled jacobian of the given hexahedra.
Parameters:
vertices (Array (Nx3) type=float): The list of vertices of the hexahedra
hexes (Array (Nx8) type=int): The list of hexahedra
Return:
((float, float), Array): The first element is a tuple representing the range where the metric is defined
and the second element is the array of the scaled jacobian of the given hexahedra
"""
p0 = vertices[hexes[:,0]]
p1 = vertices[hexes[:,1]]
p2 = vertices[hexes[:,2]]
p3 = vertices[hexes[:,3]]
p4 = vertices[hexes[:,4]]
p5 = vertices[hexes[:,5]]
p6 = vertices[hexes[:,6]]
p7 = vertices[hexes[:,7]]
l0 = p1 - p0
l1 = p2 - p1
l2 = p3 - p2
l3 = p3 - p0
l4 = p4 - p0
l5 = p5 - p1
l6 = p6 - p2
l7 = p7 - p3
l8 = p5 - p4
l9 = p6 - p5
l10 = p7 - p6
l11 = p7 - p4
# cross-derivatives
x1 = (p1 - p0) + (p2 - p3) + (p5 - p4) + (p6 - p7)
x2 = (p3 - p0) + (p2 - p1) + (p7 - p4) + (p6 - p5)
x3 = (p4 - p0) + (p5 - p1) + (p6 - p2) + (p7 - p3)
l0norm = l0/np.linalg.norm(l0, axis=1).reshape(-1,1)
l1norm = l1/np.linalg.norm(l1, axis=1).reshape(-1,1)
l2norm = l2/np.linalg.norm(l2, axis=1).reshape(-1,1)
l3norm = l3/np.linalg.norm(l3, axis=1).reshape(-1,1)
l4norm = l4/np.linalg.norm(l4, axis=1).reshape(-1,1)
l5norm = l5/np.linalg.norm(l5, axis=1).reshape(-1,1)
l6norm = l6/np.linalg.norm(l6, axis=1).reshape(-1,1)
l7norm = l7/np.linalg.norm(l7, axis=1).reshape(-1,1)
l8norm = l8/np.linalg.norm(l8, axis=1).reshape(-1,1)
l9norm = l9/np.linalg.norm(l9, axis=1).reshape(-1,1)
l10norm = l10/np.linalg.norm(l10, axis=1).reshape(-1,1)
l11norm = l11/np.linalg.norm(l11, axis=1).reshape(-1,1)
x1norm = x1/np.linalg.norm(x1, axis=1).reshape(-1,1)
x2norm = x2/np.linalg.norm(x2, axis=1).reshape(-1,1)
x3norm = x3/np.linalg.norm(x3, axis=1).reshape(-1,1)
# normalized jacobian matrices determinants
alpha_1 = np.expand_dims(np.einsum('ij,ij->i', l0norm, np.cross(l3norm, l4norm, axis= 1)), axis = 0).transpose()
alpha_2 = np.expand_dims(np.einsum('ij,ij->i', l1norm, np.cross(-l0norm, l5norm, axis=1)), axis = 0).transpose()
alpha_3 = np.expand_dims(np.einsum('ij,ij->i', l2norm, np.cross(-l1norm, l6norm, axis=1)), axis = 0).transpose()
alpha_4 = np.expand_dims(np.einsum('ij,ij->i', -l3norm, np.cross(-l2norm, l7norm, axis=1)), axis = 0).transpose()
alpha_5 = np.expand_dims(np.einsum('ij,ij->i', l11norm,
|
np.cross(l8norm, -l4norm, axis=1)
|
numpy.cross
|
import numpy as np
import pytest
# pylint: disable=line-too-long
from tensornetwork.block_sparse.charge import U1Charge, fuse_charges, charge_equal, fuse_ndarrays
from tensornetwork.block_sparse.index import Index
# pylint: disable=line-too-long
from tensornetwork.block_sparse.block_tensor import flatten, get_flat_meta_data, fuse_stride_arrays, compute_sparse_lookup, _find_best_partition, compute_fused_charge_degeneracies, compute_unique_fused_charges, compute_num_nonzero
np_dtypes = [np.float64, np.complex128]
np_tensordot_dtypes = [np.float64, np.complex128]
def test_flatten():
listoflist = [[1, 2], [3, 4], [5]]
flat = flatten(listoflist)
np.testing.assert_allclose(flat, [1, 2, 3, 4, 5])
def test_flat_meta_data():
i1 = Index([U1Charge.random(-2, 2, 20),
U1Charge.random(-2, 2, 20)],
flow=[True, False])
i2 = Index([U1Charge.random(-2, 2, 20),
U1Charge.random(-2, 2, 20)],
flow=[False, True])
expected_charges = [
i1._charges[0], i1._charges[1], i2._charges[0], i2._charges[1]
]
expected_flows = [True, False, False, True]
charges, flows = get_flat_meta_data([i1, i2])
np.testing.assert_allclose(flows, expected_flows)
for n, c in enumerate(charges):
assert charge_equal(c, expected_charges[n])
def test_fuse_stride_arrays():
dims = np.asarray([2, 3, 4, 5])
strides =
|
np.asarray([120, 60, 20, 5, 1])
|
numpy.asarray
|
#!/usr/bin/env python
# Copyright (c) 2016, wradlib developers.
# Distributed under the MIT License. See LICENSE.txt for more info.
import os
import numpy as np
import wradlib.util as util
import unittest
import datetime as dt
from deprecation import fail_if_not_removed
class HelperFunctionsTest(unittest.TestCase):
@fail_if_not_removed
def test__get_func(self):
self.assertEqual(util._get_func('arange').__class__,
np.arange.__class__)
self.assertEqual(util._get_func('arange').__module__,
np.arange.__module__)
self.assertRaises(AttributeError, lambda: util._get_func('aranged'))
def test__shape2size(self):
self.assertEqual(util._shape2size((10, 10, 10)), 10 * 10 * 10)
@fail_if_not_removed
def test__tdelta2seconds(self):
self.assertEqual(util._tdelta2seconds(dt.datetime(2001, 1, 1, 1) -
dt.datetime(2000, 1, 1)),
366 * 24 * 60 * 60 + 3600)
self.assertEqual(util._tdelta2seconds(dt.datetime(2002, 1, 1, 1) -
dt.datetime(2001, 1, 1)),
365 * 24 * 60 * 60 + 3600)
@fail_if_not_removed
def test__get_tdelta(self):
tstart = dt.datetime(2000, 1, 1)
tend = dt.datetime(2001, 1, 1, 1)
tstart_str = "2000-01-01 00:00:00"
tend_str = "2001-01-01 01:00:00"
self.assertEqual(util._get_tdelta(tstart, tend), tend - tstart)
self.assertEqual(util._get_tdelta(tstart_str, tend_str), tend - tstart)
self.assertRaises(ValueError,
lambda: util._get_tdelta(tstart_str,
tend_str + ".00000"))
self.assertEqual(util._get_tdelta(tstart_str, tend_str, as_secs=True),
366 * 24 * 60 * 60 + 3600)
def test__idvalid(self):
data = np.array(
[np.inf, np.nan, -99., 99, -9999., -9999, -10., -5., 0., 5., 10.])
self.assertTrue(
np.allclose(util._idvalid(data), np.array([6, 7, 8, 9, 10])))
self.assertTrue(np.allclose(util._idvalid(data, minval=-5., maxval=5.),
np.array([7, 8, 9])))
self.assertTrue(
np.allclose(util._idvalid(data, isinvalid=[-9999], maxval=5.),
|
np.array([2, 6, 7, 8, 9])
|
numpy.array
|
# Code adapted from Tensorflow Object Detection Framework
# https://github.com/tensorflow/models/blob/master/research/object_detection/object_detection_tutorial.ipynb
# Tensorflow Object Detection Detector
import numpy as np
import tensorflow as tf
import cv2
import time
import socket
import struct
import pickle
from PIL import Image
from scipy.spatial import distance as dist
from collections import OrderedDict
#from script.my_codes.centroidtracker import CentroidTracker
#from script.my_codes.nonmax_suppression import nms
class DetectorAPI:
def __init__(self, path_to_ckpt):
self.path_to_ckpt = path_to_ckpt
self.detection_graph = tf.Graph()
with self.detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(self.path_to_ckpt, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
self.default_graph = self.detection_graph.as_default()
self.sess = tf.Session(graph=self.detection_graph)
# Definite input and output Tensors for detection_graph
self.image_tensor = self.detection_graph.get_tensor_by_name('image_tensor:0')
# Each box represents a part of the image where a particular object was detected.
self.detection_boxes = self.detection_graph.get_tensor_by_name('detection_boxes:0')
# Each score represent how level of confidence for each of the objects.
# Score is shown on the result image, together with the class label.
self.detection_scores = self.detection_graph.get_tensor_by_name('detection_scores:0')
self.detection_classes = self.detection_graph.get_tensor_by_name('detection_classes:0')
self.num_detections = self.detection_graph.get_tensor_by_name('num_detections:0')
def processFrame(self, image):
# Expand dimensions since the trained_model expects images to have shape: [1, None, None, 3]
image_np_expanded = np.expand_dims(image, axis=0)
# Actual detection.
start_time = time.time()
(boxes, scores, classes, num) = self.sess.run(
[self.detection_boxes, self.detection_scores, self.detection_classes, self.num_detections],
feed_dict={self.image_tensor: image_np_expanded})
end_time = time.time()
print("Elapsed Time:", end_time-start_time)
im_height, im_width,_ = image.shape
boxes_list = [None for i in range(boxes.shape[1])]
for i in range(boxes.shape[1]):
boxes_list[i] = (int(boxes[0,i,0] * im_height),
int(boxes[0,i,1]*im_width),
int(boxes[0,i,2] * im_height),
int(boxes[0,i,3]*im_width))
return boxes_list, scores[0].tolist(), [int(x) for x in classes[0].tolist()], int(num[0])
def close(self):
self.sess.close()
self.default_graph.close()
def nms(boxes, overlap_threshold=0.2, mode='union'):
"""Non-maximum suppression.
Arguments:
boxes: a float numpy array of shape [n, 5],
where each row is (xmin, ymin, xmax, ymax, score).
overlap_threshold: a float number.
mode: 'union' or 'min'.
Returns:
list with indices of the selected boxes
"""
# if there are no boxes, return the empty list
if len(boxes) == 0:
return []
# list of picked indices
pick = []
# grab the coordinates of the bounding boxes
x1, y1, x2, y2, score = [boxes[:, i] for i in range(5)]
area = (x2 - x1 + 1.0)*(y2 - y1 + 1.0)
ids =
|
np.argsort(score)
|
numpy.argsort
|
'''
This is a implementation of Quantum State Tomography for Qubits,
using techniques of following papars.
'Iterative algorithm for reconstruction of entangled states(10.1103/PhysRevA.63.040303)'
'Diluted maximum-likelihood algorithm for quantum tomography(10.1103/PhysRevA.75.042108)'
'Qudit Quantum State Tomography(10.1103/PhysRevA.66.012303)'
'''
import numpy as np
from numpy import array, kron, trace, identity, sqrt, zeros, exp, pi, conjugate, random
from scipy.linalg import sqrtm
from datetime import datetime
from concurrent import futures
import os
from pathlib import Path
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import pickle
su2b = array([
[[ 1, 0], [ 0, 1]],
[[ 0, 1], [ 1, 0]],
[[ 0,-1j], [ 1j, 0]],
[[ 1, 0], [ 0, -1]]
]
)
su2Bases = []
newbases = su2b.copy()
def makeSU2Bases(numberOfQubits):
global newbases, su2Bases
for _ in range(numberOfQubits-1):
for i in range(len(newbases)):
su2Bases.extend([kron(newbases[i], su2b[j]) for j in range(4)])
newbases = su2Bases.copy()
su2Bases = []
su2Bases = array(newbases) / (2**numberOfQubits)
bH = array([[1,0],[0,0]])
bV = array([[0,0],[0,1]])
bD = array([[1/2,1/2],[1/2,1/2]])
bR = array([[1/2,1j/2],[-1j/2,1/2]])
bL = array([[1/2,-1j/2],[1j/2,1/2]])
initialBases = array([bH, bV, bR, bD])
cycleBases1 = array([bH, bV, bR, bD])
cycleBases2 = array([bD, bR, bV, bH])
def makeBases(numberOfQubits):
beforeBases = initialBases
for _ in range(numberOfQubits - 1):
afterBases = []
for i in range(len(beforeBases)):
if i % 2 == 0:
afterBases.extend([kron(beforeBases[i], cycleBase) for cycleBase in cycleBases1])
else:
afterBases.extend([kron(beforeBases[i], cycleBase) for cycleBase in cycleBases2])
beforeBases = afterBases
baseName = []
for i in range(2**numberOfQubits):
if i%4 == 0:
baseName.append("|"+ str(np.binary_repr(i, width=4)) +">")
return array(afterBases), baseName
def makeBMatrix(numberOfQubits, bases):
global su2Bases
B = np.zeros((4**numberOfQubits, 4**numberOfQubits))
for i in range(4**numberOfQubits):
for j in range(4**numberOfQubits):
B[i][j] = np.trace(bases[i] @ su2Bases[j])
return B
def makeMMatrix(numberOfQubits, bases):
global su2Bases
B = makeBMatrix(numberOfQubits, bases)
BInverse = np.linalg.inv(B)
M = []
for i in range(4**numberOfQubits):
M.append(sum([BInverse[j][i] * su2Bases[j] for j in range(4**numberOfQubits)]))
return array(M)
def makeInitialDensityMatrix(numberOfQubits, dataList, bases, M):
# M = makeMMatrix(numberOfQubits, bases)
N = sum([np.trace(M[i]) * dataList[i] for i in range(4**numberOfQubits)])
densityMatrix = sum([dataList[i] * M[i] for i in range(4**numberOfQubits)]) / N
initialDensityMatrix = choleskyDecomposition(numberOfQubits, densityMatrix)
return initialDensityMatrix
""" cholesky decomposition """
def choleskyDecomposition(numberOfQubits, matrix):
L = np.zeros([2**numberOfQubits, 2**numberOfQubits], dtype=np.complex)
for i in range(2**numberOfQubits-1, -1, -1):
s = matrix[i][i]
for k in range(2**numberOfQubits-1, i, -1):
s -= np.conjugate(L[k][i]) * L[k][i]
if s >= 0:
L[i][i] = np.sqrt(s)
else:
L[i][i] = np.sqrt(s)
for j in range(i):
t = matrix[i][j]
for k in range(2**numberOfQubits-1, i, -1):
t -= (np.conjugate(L[k][i]) * L[k][j])
if L[i][i] != 0:
L[i][j] = t / np.conjugate(L[i][i])
else:
L[i][j] = t / 1e-9
for i in range(2**numberOfQubits):
L[i][i] = np.real(L[i][i])
return (np.conjugate(L).T @ L) / np.trace(np.conjugate(L).T @ L)
""" Get Experimental Data """
def getExperimentalData(pathOfExperimentalData):
"""
getExperimentalData(pathOfExperimentalData(string)):
This function is getting experimental data from file at "pathOfExperimentalData",
----------------------------------------------------------------------------------------
return:
np.array of them.
"""
with open(pathOfExperimentalData) as f:
experimentalData = []
for s in f.readlines():
experimentalData.extend(map(int, s.strip().split()))
return array(experimentalData)
def doIterativeAlgorithm(numberOfQubits, bases, listOfExperimentalDatas, MMatrix):
"""
doIterativeAlgorithm():
This function is to do iterative algorithm(10.1103/PhysRevA.63.040303) and diluted MLE algorithm(10.1103/PhysRevA.75.042108) to a set of datas given from a experiment.
This recieve four variables (numberOfQubits, bases, listAsExperimentalDatas),
and return most likely estimated density matrix (np.array) and total time of calculation(datetime.timedelta).
First quantum state matrix for this algorithm is a identity matrix.
--------------------------------------------------------------------------------------------------------------
Return:
most likely estimated density matrix(np.array),
"""
""" Setting initial parameters """
iter = 0
epsilon = 1000
endDiff = 1e-11
diff = 100
# TolFun = 10e-11
# traceDistance = 100
maxNumberOfIteration = 100000
dataList = listOfExperimentalDatas
totalCountOfData = sum(dataList)
nDataList = dataList / totalCountOfData # nDataList is a list of normarized datas
densityMatrix = makeInitialDensityMatrix(numberOfQubits, dataList, bases, MMatrix)
# densityMatrix = identity(2 ** numberOfQubits)
""" Start iteration """
# while traceDistance > TolFun and iter <= maxNumberOfIteration:
while diff > endDiff and iter <= maxNumberOfIteration and epsilon > 1e-6:
probList = [trace(base @ densityMatrix) for base in bases]
nProbList = probList / sum(probList)
rotationMatrix = sum([(nDataList[i] / probList[i]) * bases[i] for i in range(4 ** numberOfQubits) if probList[i] != 0])
""" Normalization of Matrices for Measurement Bases """
U = np.linalg.inv(sum(bases)) / sum(probList)
rotationMatrixLeft = (identity(2 ** numberOfQubits) + epsilon * U @ rotationMatrix) / (1 + epsilon)
rotationMatrixRight = (identity(2 ** numberOfQubits) + epsilon * rotationMatrix @ U) / (1 + epsilon)
""" Calculation of updated density matrix """
modifiedDensityMatrix = rotationMatrixLeft @ densityMatrix @ rotationMatrixRight / trace(rotationMatrixLeft @ densityMatrix @ rotationMatrixRight)
eigValueArray, eigVectors = np.linalg.eig(densityMatrix - modifiedDensityMatrix)
traceDistance = sum(np.absolute(eigValueArray)) / 2
""" Update Likelihood Function, and Compared with older one """
LikelihoodFunction = sum([nDataList[i] * np.log(nProbList[i]) for i in range(4 ** numberOfQubits) if nProbList[i] != 0])
probList = [trace(base @ modifiedDensityMatrix) for base in bases]
nProbList = probList / sum(probList)
modifiedLikelihoodFunction = sum([nDataList[i] * np.log(nProbList[i]) for i in range(4 ** numberOfQubits) if nProbList[i] != 0])
nowdiff = np.real(modifiedLikelihoodFunction - LikelihoodFunction)
""" Show Progress of Calculation """
progress = 100 * iter / maxNumberOfIteration
if progress % 5 == 0:
msg = "Progress of calculation: " + str(int(progress)) + "%"
print(msg)
""" Increment """
iter += 1
""" Check Increasing of Likelihood Function """
if nowdiff < 0:
epsilon = epsilon * 0.1
continue
else:
diff = nowdiff
""" Update Density Matrix """
densityMatrix = modifiedDensityMatrix.copy()
""" Check That Max Iteration Number was appropriate """
if iter >= maxNumberOfIteration:
print("----------------------------------------------")
print("Iteration time reached max iteration number.")
print("The number of max iteration times is too small.")
print("----------------------------------------------")
""" Show the total number of iteration """
endIterationTimes = iter
print("Iteration was '" + str(endIterationTimes) + "' times.")
return modifiedDensityMatrix, endIterationTimes
""" Calculate Fidelity """
def calculateFidelity(idealDensityMatrix, estimatedDensityMatrix):
"""
calculateFidelity(idealDensityMatrix, estimatedDensityMatrix):
"""
fidelity = np.real(trace(sqrtm(sqrtm(idealDensityMatrix) @ estimatedDensityMatrix @ sqrtm(idealDensityMatrix)))) ** 2
return fidelity
""" Iterative Simulation """
def doIterativeSimulation(numberOfQubits, bases, pathOfExperimentalData, idealDensityMatrix, resultDirectoryName, MMatrix, baseNames):
"""
doIterativeSimulation(numberOfQubits, bases, pathOfExperimentalData, idealDensityMatrix, resultDirectoryName, MMatrix)
"""
""" Get Experimental Data"""
listOfExperimentalData = getExperimentalData(pathOfExperimentalData)
""" Calculate """
estimatedDensityMatrix, totalIterationTime = doIterativeAlgorithm(numberOfQubits, bases, listOfExperimentalData, MMatrix)
fidelity = calculateFidelity(idealDensityMatrix, estimatedDensityMatrix)
""" Make File Name of result """
l = 0
r = len(pathOfExperimentalData)-1
for i in range(len(pathOfExperimentalData)):
if pathOfExperimentalData[len(pathOfExperimentalData)-1-i] == ".":
r = len(pathOfExperimentalData)-1-i
if pathOfExperimentalData[len(pathOfExperimentalData)-1-i] == "/" or pathOfExperimentalData[len(pathOfExperimentalData)-1-i] == "\\":
l = len(pathOfExperimentalData)-i
break
resultFileName = pathOfExperimentalData[l:r]
resultFilePath = '.\\result\\qubit\\iterative\\' + resultDirectoryName + '\\' + 'result' + '.txt'
resultIterationTimeFilePath = '.\\result\\qubit\\iterative\\' + resultDirectoryName + '\\' + 'resultIterationTime' + '.txt'
""" Save Result """
with open(resultFilePath, mode='a') as f:
f.writelines(str(fidelity) + '\n')
with open(resultIterationTimeFilePath, mode='a') as f:
f.writelines(str(totalIterationTime) + '\n')
""" Make 3D Plot """
plotResult(numberOfQubits, estimatedDensityMatrix, baseNames)
""" Poisson Distributed Simulation """
def doPoissonDistributedSimulation(numberOfQubits, bases, pathOfExperimentalData, idealDensityMatrix, resultDirectoryName, MMatrix):
"""
doPoissonDistributedSimulation(numberOfQubits, bases, pathOfExperimentalData, idealDensityMatrix, resultDirectoryName, MMatrix)
"""
""" Get Experimental Data"""
listOfExperimentalData = getExperimentalData(pathOfExperimentalData)
""" Calculate """
estimatedDensityMatrix, totalIterationTime = doIterativeAlgorithm(numberOfQubits, bases, random.poisson(listOfExperimentalData), MMatrix)
fidelity = calculateFidelity(idealDensityMatrix, estimatedDensityMatrix)
""" Make File Name of result """
l = 0
r = len(pathOfExperimentalData)-1
for i in range(len(pathOfExperimentalData)):
if pathOfExperimentalData[len(pathOfExperimentalData)-1-i] == ".":
r = len(pathOfExperimentalData)-1-i
if pathOfExperimentalData[len(pathOfExperimentalData)-1-i] == "/" or pathOfExperimentalData[len(pathOfExperimentalData)-1-i] == "\\":
l = len(pathOfExperimentalData)-i
break
resultFileName = pathOfExperimentalData[l:r]
resultFilePath = '.\\result\\qubit\\poisson\\' + resultDirectoryName + "\\" + resultFileName + '_result' + '.txt'
""" Save Result """
with open(resultFilePath, mode='a') as f:
f.write(str(fidelity) + '\n')
def plotResult(numberOfQubits, densityMatrix, baseNames):
"""
plotResult(densityMatrix)
"""
""" Plot Setting """
xedges, yedges = np.arange(2**numberOfQubits), np.arange(2**numberOfQubits)
xpos, ypos =
|
np.meshgrid(xedges, yedges)
|
numpy.meshgrid
|
#!/usr/bin/python
#
# Inline, Crossline, True dip, Dip Azimuth using the vector filtered envelope weighted phase gradient
# Derivatives calulated using Kroon's 3 point filter
#
import sys,os
import numpy as np
from scipy.signal import hilbert
from numba import autojit
#
# Import the module with the I/O scaffolding of the External Attribute
#
sys.path.insert(0, os.path.join(sys.path[0], '..'))
import extattrib as xa
import extlib as xl
#
# These are the attribute parameters
#
xa.params = {
'Inputs': ['Input'],
'Output': ['Crl_dip', 'Inl_dip', 'True Dip', 'Dip Azimuth'],
'ZSampMargin' : {'Value':[-15,15], 'Minimum':[-2,2], 'Symmetric': True},
'StepOut' : {'Value': [2,2], 'Minimum': [2,2], 'Symmetric': True},
'Par_0' : {'Name': 'Vector Filter ZStepOut', 'Value': 1},
'Par_1' : {'Name': 'Band', 'Value': 0.9},
'Select': {'Name': 'Filter', 'Values': ['Mean Dip', 'Vector L1 Median Dip', 'Vector L2 Median Dip'], 'Selection': 0},
'Help': 'http://waynegm.github.io/OpendTect-Plugin-Docs/external_attributes/DipandAzimuth.html'
}
#
# Define the compute function
#
def doCompute():
xs = xa.SI['nrinl']
ys = xa.SI['nrcrl']
zs = xa.params['ZSampMargin']['Value'][1] - xa.params['ZSampMargin']['Value'][0] + 1
filt = xa.params['Select']['Selection']
filtFunc = autojit(xl.vecmean) if filt==0 else autojit(xl.vmf_l1) if filt==1 else autojit(xl.vmf_l2)
inlFactor = xa.SI['zstep']/xa.SI['inldist'] * xa.SI['dipFactor']
crlFactor = xa.SI['zstep']/xa.SI['crldist'] * xa.SI['dipFactor']
band = xa.params['Par_1']['Value']
zw = min(2*int(xa.params['Par_0']['Value'])+1,3)
N = xa.params['ZSampMargin']['Value'][1]
kernel = xl.hilbert_kernel(N, band)
while True:
xa.doInput()
indata = xa.Input['Input']
#
# Analytic Signal
#
ansig = np.apply_along_axis(np.convolve,-1, indata, kernel, mode="same")
sr = np.real(ansig)
si = np.imag(ansig)
#
# Compute partial derivatives
sx = xl.kroon3( sr, axis=0 )
sy = xl.kroon3( sr, axis=1 )
sz = xl.kroon3( sr, axis=2 )
shx = xl.kroon3( si, axis=0 )
shy = xl.kroon3( si, axis=1 )
shz = xl.kroon3( si, axis=2 )
px = sr[1:xs-1,1:ys-1,:] * shx[1:xs-1,1:ys-1,:] - si[1:xs-1,1:ys-1,:] * sx[1:xs-1,1:ys-1,:]
py = sr[1:xs-1,1:ys-1,:] * shy[1:xs-1,1:ys-1,:] - si[1:xs-1,1:ys-1,:] * sy[1:xs-1,1:ys-1,:]
pz = sr[1:xs-1,1:ys-1,:] * shz[1:xs-1,1:ys-1,:] - si[1:xs-1,1:ys-1,:] * sz[1:xs-1,1:ys-1,:]
#
# Normalise the gradients so Z component is positive
p = np.sign(pz)/
|
np.sqrt(px*px+py*py+pz*pz)
|
numpy.sqrt
|
import itertools
import numpy as np
import pandas as pd
try:
from ortools.graph import pywrapgraph
except ModuleNotFoundError:
print('Could not import ortools')
# import networkx as nx
from .loading import subset2vec, vec2subset, compressSubsets
__all__ = ['DenseICSDist',
'pwICSDist',
'decomposeDist',
'getDecomposed']
"""Formulating polyfunctionality distance as a min cost flow problem"""
def pwICSDist(cdf, magCol='pctpos', cyCol='cytokine', indexCols=['ptid', 'visitday', 'tcellsub', 'antigen'], factor=100000):
"""Compute all pairwise ICS distances among samples indicated by index columns.
Parameters
----------
cdf : pd.DataFrame
Contains one row per cell population, many rows per sample.
magCol : str
Column containing the magnitude which should add up to 1 for all rows in a sample
cyCol : str
Column containing the marker combination for the row. E.g. IFNg+IL2-TNFa+
indexCols : list
List of columns that make each sample uniquely identifiable
factor : int
Since cost-flow estimates are based on integers, its effectively the number of
decimal places to be accurate to. Default 1e5 means magCol is multiplied by 1e5 before rounding to int.
Returns
-------
dmatDf : pd.DataFrame
Symetric pairwise distance matrix with hierarchical columns/index of indexCols"""
cdf = cdf.set_index(indexCols + [cyCol])[magCol].unstack(indexCols).fillna(0)
n = cdf.shape[1]
dmat = np.zeros((n, n))
tab = []
for i in range(n):
for j in range(n):
if i <= j:
d = DenseICSDist(cdf.iloc[:,i], cdf.iloc[:,j], factor=factor)
dmat[i, j] = d
dmat[j, i] = d
dmatDf = pd.DataFrame(dmat, columns=cdf.columns, index=cdf.columns)
return dmatDf
def DenseICSDist(freq1, freq2, factor=100000, verbose=False, tabulate=False):
"""Compute a positive, symetric distance between two frequency distributions,
where each node of the distribution can be related to every other node based
on marker combination (e.g. IFNg+IL2-TNFa-). Uses a cost-flow optimization
approach to finding the minimum dist/cost to move probability density from
one node (marker combination) to another, to have the effect of turning freq1
into freq2.
Parameters
----------
freq1, freq2 : pd.Series
Frequency distribution that should sum to one, with identical indices
containing all marker combinations
factor : int
Since cost-flow estimates are based on integers, its effectively the number of
decimal places to be accurate to. Default 1e5 means magCol is multiplied by 1e5 before rounding to int.
verbose : bool
Print all cost-flow arcs. Useful for debugging.
tabulate : bool
Optionally return a tabulation of all the cost-flows.
Returns
-------
cost : float
Total distance between distributions in probability units.
costtab : np.ndarray [narcs x nmarkers + 1]
Tabulation of the all the required flows to have freq1 == freq2
Each row is an arc. First nmarker columns indicate the costs between
the two nodes and last colum is the cost-flow/distance along that arc."""
nodeLabels = freq1.index.tolist()
nodeVecs = [subset2vec(m) for m in nodeLabels]
markers = nodeLabels[0].replace('-', '+').split('+')[:-1]
nmarkers = len(markers)
# nodes = list(range(len(nodeLabels)))
if nmarkers == 1:
flow = freq1[markers[0] + '+'] - freq2[markers[0] + '+']
if tabulate:
return np.abs(flow), np.zeros((0,nmarkers+1))
else:
return np.abs(flow)
def _cost(n1, n2):
"""Hamming distance between two node labels"""
return int(np.sum(np.abs(np.array(nodeVecs[n1]) - np.array(nodeVecs[n2]))))
diffv = freq1/freq1.sum() - freq2/freq2.sum()
diffv = (diffv * factor).astype(int)
extra = diffv.sum()
if extra > 0:
for i in range(extra):
diffv[i] -= 1
elif extra < 0:
for i in range(-extra):
diffv[i] += 1
assert diffv.sum() == 0
posNodes = np.nonzero(diffv > 0)[0]
negNodes = np.nonzero(diffv < 0)[0]
if len(posNodes) == 0:
"""Returns None when freq1 - freq2 is 0 for every subset/row"""
if tabulate:
return 0, np.zeros((0,nmarkers+1))
else:
return 0
"""Creates a dense network connecting all sources and sinks with cost/distance specified by how many functions differ
TODO: Could this instead be a sparse network connecting function combinations that only differ by 1? Cells have to move
multiple times along the network then. This may minimize to the same solution??"""
tmp = np.array([o for o in itertools.product(posNodes, negNodes)])
startNodes = tmp[:,0].tolist()
endNodes = tmp[:,1].tolist()
"""Set capacity to max possible"""
capacities = diffv[startNodes].tolist()
costs = [_cost(n1,n2) for n1,n2 in zip(startNodes, endNodes)]
supplies = diffv.tolist()
"""Instantiate a SimpleMinCostFlow solver."""
min_cost_flow = pywrapgraph.SimpleMinCostFlow()
"""Add each arc."""
for i in range(len(startNodes)):
min_cost_flow.AddArcWithCapacityAndUnitCost(startNodes[i], endNodes[i],
capacities[i], costs[i])
"""Add node supplies."""
for i in range(len(supplies)):
min_cost_flow.SetNodeSupply(i, supplies[i])
"""Find the minimum cost flow"""
res = min_cost_flow.SolveMaxFlowWithMinCost()
if res != min_cost_flow.OPTIMAL:
if verbose:
print('No optimal solution found.')
if tabulate:
return np.nan, None
else:
return np.nan
if verbose:
print('Minimum cost:', min_cost_flow.OptimalCost())
print('')
print(' Arc Flow / Capacity Cost')
for i in range(min_cost_flow.NumArcs()):
cost = min_cost_flow.Flow(i) * min_cost_flow.UnitCost(i)
print('%1s -> %1s %3s / %3s %3s' % (
min_cost_flow.Tail(i),
min_cost_flow.Head(i),
min_cost_flow.Flow(i),
min_cost_flow.Capacity(i),
cost))
cost = min_cost_flow.OptimalCost()/factor
if tabulate:
costtab =
|
np.zeros((tmp.shape[0], nmarkers+1))
|
numpy.zeros
|
'''
state variables: 7 phi values (0, 2pi), 7 dot phi values (real)
parameters: alpha (>= 0), gamma (0, 1)
calculated geometry: r_ij matrix, theta_ij matrix
calculate forces: examine https://wolfram74.github.io/worked_problems/spring_20/week_2019_12_30/view.html
set up flow:
calculate the geometry values
delta x/y -> rij+theta_ij
store those
generate force equations, include drag term
force equations will take in full state vector and return their contribution to change of state
2*7 in, 2*7 out
set initial phi values (0, and equi potentials)
time evolution outline
have state
calculate kernel 1 by summing forces
force equation could have as input self, state, and i,j, determine output from that, 1 function for all terms?
i==j -> drag, else torque
'''
import numpy
cos = numpy.cos
sin = numpy.sin
atan = numpy.arctan2
pi = numpy.pi
class System():
def __init__(self):
self.r_vals = numpy.zeros((7,7))
self.theta_vals =
|
numpy.zeros((7,7))
|
numpy.zeros
|
import os
import h5py
import numpy as np
import pyqtgraph as pg
from pyqtgraph.Qt import QtCore
from .area_mask import MaskAssemble
from .find_center import find_center
from .pyqtgraph_mod import LineROI
from .file_reader import read_raw_file
pg.setConfigOptions(imageAxisOrder='row-major')
class SimpleMask(object):
def __init__(self, pg_hdl, infobar):
self.data_raw = None
self.shape = None
self.qmap = None
self.mask = None
self.mask_kernel = None
self.saxs_lin = None
self.saxs_log = None
self.saxs_log_min = None
self.plot_log = True
self.new_partition = None
self.meta = None
self.hdl = pg_hdl
self.infobar = infobar
self.extent = None
self.hdl.scene.sigMouseMoved.connect(self.show_location)
self.bad_pixel_set = set()
self.qrings = []
self.idx_map = {
0: "scattering",
1: "scattering * mask",
2: "mask",
3: "dqmap_partition",
4: "sqmap_partition",
5: "preview"
}
def is_ready(self):
if self.meta is None or self.data_raw is None:
return False
else:
return True
def find_center(self):
if self.saxs_lin is None:
return
mask = self.mask
# center = (self.meta['bcy'], self.meta['bcx'])
center = find_center(self.saxs_lin, mask=mask, center_guess=None,
scale='log')
return center
def mask_evaluate(self, target, **kwargs):
msg = self.mask_kernel.evaluate(target, **kwargs)
# preview the mask
mask = self.mask_kernel.get_one_mask(target)
self.data_raw[5][:, :] = mask
return msg
def mask_apply(self, target):
mask = self.mask_kernel.get_one_mask(target)
self.mask_kernel.enable(target)
self.mask = np.logical_and(self.mask, mask)
self.data_raw[1] = self.saxs_log * self.mask
self.data_raw[2] = self.mask
if target == "mask_qring":
self.qrings = self.mask_kernel.workers[target].get_qrings()
if self.plot_log:
log_min = np.min(self.saxs_log[self.mask > 0])
self.data_raw[1][np.logical_not(self.mask)] = log_min
else:
lin_min = np.min(self.saxs_lin[self.mask > 0])
self.data_raw[1][np.logical_not(self.mask)] = lin_min
def get_pts_with_similar_intensity(self, cen=None, radius=50,
variation=50):
vmin = max(0, int(cen[0] - radius))
vmax = min(self.shape[0], int(cen[0] + radius))
hmin = max(0, int(cen[1] - radius))
hmax = min(self.shape[1], int(cen[1] + radius))
crop = self.saxs_lin[vmin:vmax, hmin:hmax]
val = self.saxs_lin[cen]
idx = np.abs(crop - val) <= variation / 100.0 * val
pos = np.array(np.nonzero(idx))
pos[0] += vmin
pos[1] += hmin
pos = np.roll(pos, shift=1, axis=0)
return pos.T
def save_partition(self, save_fname):
# if no partition is computed yet
if self.new_partition is None:
return
with h5py.File(save_fname, 'w') as hf:
if '/data' in hf:
del hf['/data']
data = hf.create_group('data')
data.create_dataset('mask', data=self.mask)
for key, val in self.new_partition.items():
data.create_dataset(key, data=val)
# directories that remain the same
dt = h5py.vlen_dtype(np.dtype('int32'))
version = data.create_dataset('Version', (1,), dtype=dt)
version[0] = [5]
maps = data.create_group("Maps")
dt = h5py.special_dtype(vlen=str)
map1name = maps.create_dataset('map1name', (1,), dtype=dt)
map1name[0] = 'q'
map2name = maps.create_dataset('map2name', (1,), dtype=dt)
map2name[0] = 'phi'
empty_arr = np.array([])
maps.create_dataset('q', data=self.qmap['q'])
maps.create_dataset('phi', data=self.qmap['phi'])
maps.create_dataset('x', data=empty_arr)
maps.create_dataset('y', data=empty_arr)
for key, val in self.meta.items():
if key in ['datetime', 'energy', 'det_dist', 'pix_dim', 'bcx',
'bcy', 'saxs']:
continue
val = np.array(val)
if val.size == 1:
val = val.reshape(1, 1)
data.create_dataset(key, data=val)
print('partition map is saved')
# generate 2d saxs
def read_data(self, fname=None, **kwargs):
reader = read_raw_file(fname)
saxs = reader.get_scattering(**kwargs)
if saxs is None:
print('cannot read the scattering data from raw data file.')
return
self.meta = reader.load_meta()
# keep same
self.data_raw = np.zeros(shape=(6, *saxs.shape))
self.mask = np.ones(saxs.shape, dtype=np.bool)
saxs_nonzero = saxs[saxs > 0]
# use percentile instead of min to be robust
self.saxs_lin_min = np.percentile(saxs_nonzero, 1)
self.saxs_log_min = np.log10(self.saxs_lin_min)
self.saxs_lin = saxs.astype(np.float32)
self.min_val = np.min(saxs[saxs > 0])
self.saxs_log = np.log10(saxs + self.min_val)
self.shape = self.data_raw[0].shape
# reset the qrings after data loading
self.qrings = []
self.qmap = self.compute_qmap()
self.mask_kernel = MaskAssemble(self.shape, self.saxs_log)
self.mask_kernel.update_qmap(self.qmap)
self.extent = self.compute_extent()
# self.meta['saxs'] = saxs
self.data_raw[0] = self.saxs_log
self.data_raw[1] = self.saxs_log * self.mask
self.data_raw[2] = self.mask
def compute_qmap(self):
k0 = 2 * np.pi / (12.398 / self.meta['energy'])
v = np.arange(self.shape[0], dtype=np.uint32) - self.meta['bcy']
h = np.arange(self.shape[1], dtype=np.uint32) - self.meta['bcx']
vg, hg = np.meshgrid(v, h, indexing='ij')
r = np.sqrt(vg * vg + hg * hg) * self.meta['pix_dim']
# phi = np.arctan2(vg, hg)
# to be compatible with matlab xpcs-gui; phi = 0 starts at 6 clock
# and it goes clockwise;
phi = np.arctan2(hg, vg)
phi[phi < 0] = phi[phi < 0] + np.pi * 2.0
phi = np.max(phi) - phi # make it clockwise
alpha = np.arctan(r / self.meta['det_dist'])
qr = np.sin(alpha) * k0
qr = 2 * np.sin(alpha / 2) * k0
qx = qr * np.cos(phi)
qy = qr * np.sin(phi)
phi = np.rad2deg(phi)
# keep phi and q as np.float64 to keep the precision.
qmap = {
'phi': phi,
'alpha': alpha.astype(np.float32),
'q': qr,
'qx': qx.astype(np.float32),
'qy': qy.astype(np.float32)
}
return qmap
def get_qp_value(self, x, y):
x = int(x)
y = int(y)
shape = self.qmap['q'].shape
if 0 <= x < shape[1] and 0 <= y < shape[0]:
return self.qmap['q'][y, x], self.qmap['phi'][y, x]
else:
return None, None
def compute_extent(self):
k0 = 2 * np.pi / (12.3980 / self.meta['energy'])
x_range = np.array([0, self.shape[1]]) - self.meta['bcx']
y_range = np.array([-self.shape[0], 0]) + self.meta['bcy']
x_range = x_range * self.meta['pix_dim'] / self.meta['det_dist'] * k0
y_range = y_range * self.meta['pix_dim'] / self.meta['det_dist'] * k0
# the extent for matplotlib imshow is:
# self._extent = xmin, xmax, ymin, ymax = extent
# convert to a tuple of 4 elements;
return (*x_range, *y_range)
def show_location(self, pos):
if not self.hdl.scene.itemsBoundingRect().contains(pos) or \
self.shape is None:
return
shape = self.shape
mouse_point = self.hdl.getView().mapSceneToView(pos)
col = int(mouse_point.x())
row = int(mouse_point.y())
if col < 0 or col >= shape[1]:
return
if row < 0 or row >= shape[0]:
return
qx = self.qmap['qx'][row, col]
qy = self.qmap['qy'][row, col]
phi = self.qmap['phi'][row, col]
val = self.data_raw[self.hdl.currentIndex][row, col]
# msg = f'{self.idx_map[self.hdl.currentIndex]}: ' + \
msg = f'[x={col:4d}, y={row:4d}, ' + \
f'qx={qx:.3e}Å⁻¹, qy={qy:.3e}Å⁻¹, phi={phi:.1f}deg], ' + \
f'val={val:.03e}'
self.infobar.clear()
self.infobar.setText(msg)
return None
def show_saxs(self, cmap='jet', log=True, invert=False,
plot_center=True, plot_index=0, **kwargs):
if self.meta is None or self.data_raw is None:
return
# self.hdl.reset_limits()
self.hdl.clear()
# self.data = np.copy(self.data_raw)
# print('show_saxs', np.min(self.data[1]))
center = (self.meta['bcx'], self.meta['bcy'])
self.plot_log = log
if not log:
self.data_raw[0] = self.saxs_lin
else:
self.data_raw[0] = self.saxs_log
# if invert:
# temp = np.max(self.data[0]) - self.data[0]
# self.data[0] = temp
self.hdl.setImage(self.data_raw)
self.hdl.adjust_viewbox()
self.hdl.set_colormap(cmap)
# plot center
if plot_center:
t = pg.ScatterPlotItem()
t.addPoints(x=[center[0]], y=[center[1]], symbol='+', size=15)
self.hdl.add_item(t, label='center')
self.hdl.setCurrentIndex(plot_index)
return
def apply_drawing(self):
if self.meta is None or self.data_raw is None:
return
ones = np.ones(self.data_raw[0].shape, dtype=np.bool)
mask_n = np.zeros_like(ones, dtype=np.bool)
mask_e = np.zeros_like(mask_n)
mask_i = np.zeros_like(mask_n)
for k, x in self.hdl.roi.items():
if not k.startswith('roi_'):
continue
mask_temp = np.zeros_like(ones, dtype=np.bool)
# return slice and transfrom
sl, _ = x.getArraySlice(self.data_raw[1], self.hdl.imageItem)
y = x.getArrayRegion(ones, self.hdl.imageItem)
# sometimes the roi size returned from getArraySlice and
# getArrayRegion are different;
nz_idx = np.nonzero(y)
h_beg = np.min(nz_idx[1])
h_end = np.max(nz_idx[1])
v_beg = np.min(nz_idx[0])
v_end = np.max(nz_idx[0])
sl_v = slice(sl[0].start, sl[0].start + v_end - v_beg + 1)
sl_h = slice(sl[1].start, sl[1].start + h_end - h_beg + 1)
mask_temp[sl_v, sl_h] = y[v_beg:v_end + 1, h_beg: h_end + 1]
if x.sl_mode == 'exclusive':
mask_e[mask_temp] = 1
elif x.sl_mode == 'inclusive':
mask_i[mask_temp] = 1
self.hdl.remove_rois(filter_str='roi_')
if np.sum(mask_i) == 0:
mask_i = 1
mask_p = np.logical_not(mask_e) * mask_i
return mask_p
def add_drawing(self, num_edges=None, radius=60, color='r',
sl_type='Polygon', width=3, sl_mode='exclusive',
second_point=None, label=None, movable=True):
# label: label of roi; default is None, which is for roi-draw
if label is not None and label in self.hdl.roi:
self.hdl.remove_item(label)
# cen = (shape[1] // 2, shape[2] // 2)
cen = (self.meta['bcx'], self.meta['bcy'])
if sl_mode == 'inclusive':
pen = pg.mkPen(color=color, width=width, style=QtCore.Qt.DotLine)
else:
pen = pg.mkPen(color=color, width=width)
handle_pen = pg.mkPen(color=color, width=width)
kwargs = {
'pen': pen,
'removable': True,
'hoverPen': pen,
'handlePen': handle_pen,
'movable': movable
}
if sl_type == 'Ellipse':
new_roi = pg.EllipseROI(cen, [60, 80], **kwargs)
# add scale handle
new_roi.addScaleHandle([0.5, 0], [0.5, 1], )
new_roi.addScaleHandle([0.5, 1], [0.5, 0])
new_roi.addScaleHandle([0, 0.5], [1, 0.5])
new_roi.addScaleHandle([1, 0.5], [0, 0.5])
elif sl_type == 'Circle':
if second_point is not None:
radius = np.sqrt((second_point[1] - cen[1]) ** 2 +
(second_point[0] - cen[0]) ** 2)
new_roi = pg.CircleROI(pos=[cen[0] - radius, cen[1] - radius],
radius=radius,
**kwargs)
elif sl_type == 'Polygon':
if num_edges is None:
num_edges = np.random.random_integers(6, 10)
# add angle offset so that the new rois don't overlap with each
# other
offset = np.random.random_integers(0, 359)
theta = np.linspace(0, np.pi * 2, num_edges + 1) + offset
x = radius * np.cos(theta) + cen[0]
y = radius * np.sin(theta) + cen[1]
pts =
|
np.vstack([x, y])
|
numpy.vstack
|
import os
import importlib
import random
from imp import reload
from mpl_toolkits.axes_grid1 import make_axes_locatable, axes_size
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
import numpy as np
import matplotlib.pyplot as plt
from visualisation import network_visualisation as vis
from visualisation import plotting_funcs as plotting_funcs
from analysis import auditory_analysis as aan
from analysis import quantify_aud_strfs as quant
from analysis import sparse_analysis as san
reload(plotting_funcs)
reload(san)
reload(quant)
reload(aan)
reload(vis)
def plot_quant_aud_figure(weights, sparse_weights, rstrfs,
Y_mds, t_pow_real, t_pow_model,
t_pow_sparse, save_path=None, use_blobs=False):
fig = plt.figure(figsize=[10,20])
gs = plt.GridSpec(200,100)
pop_ax = np.empty((2,3), dtype=object)
hist_ax = np.empty((2,2), dtype=object)
mds_ax = fig.add_subplot(gs[:45,:45])
temp_ax = fig.add_subplot(gs[:45,64:])
pop_ax[0,0] = fig.add_subplot(gs[55:85,:30])
pop_ax[0,1] = fig.add_subplot(gs[55:85, 35:65])
pop_ax[0,2] = fig.add_subplot(gs[55:85, 70:])
hist_ax[0,0] = fig.add_subplot(gs[95:110, :45])
hist_ax[0,1] = fig.add_subplot(gs[95:110, 55:])
pop_ax[1,0] = fig.add_subplot(gs[120:150,:30])
pop_ax[1,1] = fig.add_subplot(gs[120:150, 35:65])
pop_ax[1,2] = fig.add_subplot(gs[120:150, 70:])
hist_ax[1,0] = fig.add_subplot(gs[160:175, :45])
hist_ax[1,1] = fig.add_subplot(gs[160:175, 55:])
# gs.update(wspace=0.005, hspace=0.05) # set the spacing between axes.
labels = ['A1', 'predict', 'sparse', 'noise']
azure = '#006FFF'
clors = ['red','black',azure,'y']# markers = ['o', '_', '|']
markers = ['8', 'o', '^']
# mds_ax.set_axis_off()
# mds_ax.margins = [0,0]
mds_axlim = 20
mds_axes = plotting_funcs.plot_2D_projection_with_hists(Y_mds,mds_ax,
labels =None,
clors = clors,
markers = markers)
mds_axes[0].set_xlim([-mds_axlim,mds_axlim])
mds_axes[0].set_ylim([-mds_axlim,mds_axlim])
for ax in mds_axes:
ax.set_xticks([])
ax.set_yticks([])
ax.set_axis_off()
ax = temp_ax
divider = make_axes_locatable(ax)
aspect = 4
pad_fraction = 0.05
width = axes_size.AxesY(ax, aspect=1/aspect)
pad = axes_size.Fraction(pad_fraction, width)
dummy_ax_x = divider.append_axes("top", size=width, pad=0.1, sharex=ax)
# dummy_ax_y = divider.append_axes("right", size=width, pad=0.1, sharey=ax)
dummy_ax_x.set_axis_off()
# dummy_ax_y.set_axis_off()
if use_blobs:
markers = ['o', 'o', 'o']
else:
markers = ['blob', 'blob', 'blob']
t_plot_kwargs = {}
t_plot_kwargs['color']=clors[0]
t_plot_kwargs['label']='A1'
t_plot_kwargs['linewidth']=3
# ax.plot(real_measures.t_pow.mean(axis=0)/sum(real_measures.t_pow.mean(axis=0)),'o', **t_plot_kwargs )
# t_plot_kwargs['label']=''
ax.plot(t_pow_real, **t_plot_kwargs)
t_plot_kwargs = {}
t_plot_kwargs['color']=clors[1]
t_plot_kwargs['label']='prediction'
t_plot_kwargs['linewidth']=3
# ax.plot(res_pd.iloc[0].t_pow.mean(axis=0)[2:]/sum(res_pd.iloc[0].t_pow.mean(axis=0)[2:]),'_', **t_plot_kwargs)
# t_plot_kwargs['label']=''
ax.plot(t_pow_model, **t_plot_kwargs)
t_plot_kwargs['color']=clors[2]
t_plot_kwargs['label']='sparse'
t_plot_kwargs['linewidth']=3
# ax.plot(sparse_pd.iloc[11].t_pow.mean(axis=0)[2:]/sum(sparse_pd.iloc[11].t_pow.mean(axis=0)[2:]),'|', **t_plot_kwargs)
# t_plot_kwargs['label']=''
ax.plot(t_pow_sparse, **t_plot_kwargs)
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles, labels)
temp_ax = ax
temp_ax.set_xticklabels(['-250', '-200','-150','-100','-50', '0'])
temp_ax.set_ylabel('Proportion of power')
temp_ax.set_xlabel('Time (msec)')
[mf_peak_pos, mf_peak_neg, mf_bw_pos, mf_bw_neg, mt_peak_pos, mt_peak_neg, mt_bw_pos, mt_bw_neg, m_pow] = quant.quantify_strfs(weights)
[sf_peak_pos, sf_peak_neg, sf_bw_pos, sf_bw_neg, st_peak_pos, st_peak_neg, st_bw_pos, st_bw_neg, s_pow] = quant.quantify_strfs(sparse_weights)
[rf_peak_pos, rf_peak_neg, rf_bw_pos, rf_bw_neg, rt_peak_pos, rt_peak_neg, rt_bw_pos, rt_bw_neg, r_pow] = quant.quantify_strfs(rstrfs, n_h=38)
mf_ix = [mf_bw_neg>0] #and [mf_bw_pos>0]
rf_ix = [rf_bw_neg>0] #and [rf_bw_pos>0]
mt_ix = [mt_bw_neg>0] #and [mt_bw_pos>0]
rt_ix = [rt_bw_neg>0] #and [rt_bw_pos>0]
sf_ix = [sf_bw_neg>0] #and [mf_bw_pos>0]
st_ix = [st_bw_neg>0] #and [mt_bw_pos>0]
print('Excluding units with 0 bw')
print('model proportion included: ')
print(np.sum(mt_ix[:])/len(mt_ix[0]))
print('total: %i' %len(mt_ix[0]))
print('included: %i' %np.sum(mt_ix[:]))
print('excluded: %i' %(len(mt_ix[0])-np.sum(mt_ix[:])))
print('sparse proportion included: ')
print(np.sum(st_ix[:])/len(st_ix[0]))
print('total: %i' %len(st_ix[0]))
print('included: %i' %np.sum(st_ix[:]))
print('excluded: %i' %(len(st_ix[0])-np.sum(st_ix[:])))
print('real proportion included: ')
print(np.sum(rt_ix[:])/len(rt_ix[0]))
print('total: %i' %len(rt_ix[0]))
print('included: %i' %np.sum(rt_ix[:]))
print('excluded: %i' %(len(rt_ix[0])-np.sum(rt_ix[:])))
mf_bw_pos = mf_bw_pos[mf_ix]
rf_bw_pos = rf_bw_pos[rf_ix]
sf_bw_pos = sf_bw_pos[sf_ix]
mt_bw_pos = mt_bw_pos[mt_ix]
rt_bw_pos = rt_bw_pos[rt_ix]
st_bw_pos = st_bw_pos[st_ix]
mf_bw_neg = mf_bw_neg[mf_ix]
rf_bw_neg = rf_bw_neg[rf_ix]
sf_bw_neg = sf_bw_neg[sf_ix]
mt_bw_neg = mt_bw_neg[mt_ix]
rt_bw_neg = rt_bw_neg[rt_ix]
st_bw_neg = st_bw_neg[st_ix]
xs = np.empty((2,3), dtype=object)
ys = np.empty((2,3), dtype=object)
xs[0,:] = [rt_bw_pos, mt_bw_pos, st_bw_pos]
ys[0,:] = [rt_bw_neg, mt_bw_neg, st_bw_neg]
xs[1,:] = [rf_bw_pos, mf_bw_pos, sf_bw_pos]
ys[1,:] = [rf_bw_neg, mf_bw_neg, sf_bw_neg]
lims = [225,6]
#Make the scatter plots on the population axes
for ii in range(xs.shape[0]):
for jj,ax in enumerate(pop_ax[ii,:]):
x = xs[ii,jj]
y = ys[ii,jj]
clor = clors[jj] # '#444444'
if use_blobs:
pop_ax[ii,jj],_ = plotting_funcs.blobscatter(x,y, ax=ax, **{'facecolor':'none','edgecolor':clor})
else:
ax.scatter(x,y)
plt_kwargs = {'color':'k', 'linestyle':'--', 'alpha':0.8}
pop_ax[ii,jj].plot(range(lims[ii]+1), **plt_kwargs)
pop_ax[ii,jj].set_xlim([0,lims[ii]])
pop_ax[ii,jj].set_ylim([0,lims[ii]])
if ii==0:
pop_ax[ii,jj].set_yticks([0, 50, 100, 150, 200])
pop_ax[ii,jj].set_xticks([0, 50, 100, 150, 200])
print('x or y>100', sum(np.logical_or(x>100, y>100)), 'out of ', len(x))
if ii==1:
pop_ax[ii,jj].set_yticks([0,2,4,6])
print('x or y>4', sum(np.logical_or(x>4, y>4)), 'out of ', len(x))
if jj>0:
# pop_ax[ii,jj].spines['left'].set_visible(False)
# pop_ax[ii,jj].set_yticks([])
pop_ax[ii,jj].set_yticklabels([])
inset_lims = [50,1.5]
inset_ticks = [[25,50], [0.75,1.5]]
inset_binss = [np.arange(0.1,50,5), np.arange(0.05,2,0.15)]
#Make insets to show details on first two scatter plots
for ii in range(xs.shape[0]):
for jj,ax in enumerate(pop_ax[ii,:2]):
x = xs[ii,jj]
y = ys[ii,jj]
clor = clors[jj] # '#444444'
inset_axes_kwargs = {'xlim':[0,inset_lims[ii]], 'ylim':[0,inset_lims[ii]],
'xticks':inset_ticks[ii], 'yticks':inset_ticks[ii]}
inset_ax = inset_axes(pop_ax[ii,jj],
width="40%", # width = 30% of parent_bbox
height="40%", # height : 1 inch
loc=1, axes_kwargs=inset_axes_kwargs)
if use_blobs:
inset_ax,_ = plotting_funcs.blobscatter(x,y, ax=inset_ax, **{'facecolor':'none','edgecolor':clor})
else:
inset_ax.scatter(x,y)
# inset_ax.hist2d(x,y, bins=inset_binss[ii])
# plt_kwargs = {'color':'k', 'linestyle':'--', 'alpha':0.8}
# inset_ax.plot(range(lims[ii]+1), **plt_kwargs)
binss = [
|
np.arange(-2.5,200,5)
|
numpy.arange
|
#!/usr/bin/env python
# Copyright (c) 2015. <NAME> <<EMAIL>>
#
# This work is licensed under the terms of the Apache Software License, Version 2.0. See the file LICENSE for details.
"""
Description here
"""
import numpy as np
def weighted_mode(a, w, axis=0):
"""Returns an array of the weighted modal (most common) value in a
If there is more than one such value, only the first is returned.
The bin-count for the modal bins is also returned.
This is an extension of the algorithm in scipy.stats.mode.
COPIED from sklearn.utils.extmath because the extmath functions are not exported outside scikit-learn itself.
Parameters
----------
a : array_like
n-dimensional array of which to find mode(s).
w : array_like
n-dimensional array of weights for each value
axis : int, optional
Axis along which to operate. Default is 0, i.e. the first axis.
Returns
-------
vals : ndarray
Array of modal values.
score : ndarray
Array of weighted counts for each mode.
Examples
--------
>>> from sklearn.utils.extmath import weighted_mode
>>> x = [4, 1, 4, 2, 4, 2]
>>> weights = [1, 1, 1, 1, 1, 1]
>>> weighted_mode(x, weights)
(array([ 4.]), array([ 3.]))
The value 4 appears three times: with uniform weights, the result is
simply the mode of the distribution.
>>> weights = [1, 3, 0.5, 1.5, 1, 2] # deweight the 4's
>>> weighted_mode(x, weights)
(array([ 2.]), array([ 3.5]))
The value 2 has the highest score: it appears twice with weights of
1.5 and 2: the sum of these is 3.
See Also
--------
scipy.stats.mode
"""
if axis is None:
a = np.ravel(a)
w = np.ravel(w)
axis = 0
else:
a = np.asarray(a)
w = np.asarray(w)
axis = axis
if a.shape != w.shape:
w = np.zeros(a.shape, dtype=w.dtype) + w
scores = np.unique(
|
np.ravel(a)
|
numpy.ravel
|
"""
control method for one patient
to retrieve the fitness value from
the k number of features and pre-ictal period
"""
import os
import numpy as np
from Utils import getLabelsForSeizure
from Utils import removeConstantFeatures
from Utils import removeRedundantFeatures
from Utils import filterFeatureSelectionCorr
from Utils import filterFeatureSelectionAUC
from sklearn import preprocessing
from sklearn.feature_selection import RFE
from sklearn.svm import SVR
from sklearn.ensemble import RandomForestClassifier
from sklearn.datasets import make_classification
from sklearn.metrics import confusion_matrix
from Utils import specificity
from Utils import sensitivity
import time
# due to the np.delete function
import warnings
warnings.filterwarnings("ignore")
# where the data is
# go back to data folder
os.chdir("..")
os.chdir("..")
os.chdir("Data")
os.chdir("Processed_data")
path=os.getcwd()
# go to where the data is
os.chdir(path)
patient_id=1321803
pre_ictal=35
k_features=10
def calculateFitness(patient_id,k_features,pre_ictal):
t = time.process_time()
contador=0;
# load training seizures
seizure_1_data=np.load("pat"+str(patient_id)+"_seizure1_featureMatrix.npy");
seizure_2_data=np.load("pat"+str(patient_id)+"_seizure2_featureMatrix.npy");
seizure_3_data=np.load("pat"+str(patient_id)+"_seizure3_featureMatrix.npy");
#removing ictal data, that is, where - last line (class)
# ictal is equal to 2
# inter-ictal is equal to 0
seizure_1_data=seizure_1_data[:,(np.where(seizure_1_data[-1,:]==0)[0])]
seizure_2_data=seizure_2_data[:,(np.where(seizure_2_data[-1,:]==0)[0])]
seizure_3_data=seizure_3_data[:,(np.where(seizure_3_data[-1,:]==0)[0])]
performance_values=0
# for each pre-ictal period value, we make a 3-fold cross validation
for k in range(0,3):
#seizure_1 for testing, seizure_2 and seizure_3 for training
if k==0:
# removing the class label for the feature vector
testing_features=seizure_1_data[0:-1,:]
# retrieving the training labels
testing_labels=getLabelsForSeizure(testing_features,pre_ictal)
# removing the class label for the feature vector, for one seizure
training_features_1=seizure_2_data[0:-1,:]
# retrieving the testing labels for one seizure
training_labels_1=getLabelsForSeizure(training_features_1,pre_ictal)
# removing the class label for the feature vector, for one seizure
training_features_2=seizure_3_data[0:-1,:]
# retrieving the testing labels for one seizure
training_labels_2=getLabelsForSeizure(training_features_2,pre_ictal)
# concatenate both testing_features and testing labels
training_features=np.concatenate([training_features_1, training_features_2], axis=1)
training_labels=np.concatenate([training_labels_1, training_labels_2], axis=1)
del training_features_1
del training_features_2
del training_labels_1
del training_labels_2
#seizure_2 for testing, seizure_1 and seizure_3 for training
elif k==1:
# removing the class label for the feature vector
testing_features=seizure_2_data[0:-1,:]
# retrieving the training labels
testing_labels=getLabelsForSeizure(testing_features,pre_ictal)
# removing the class label for the feature vector, for one seizure
training_features_1=seizure_1_data[0:-1,:]
# retrieving the testing labels for one seizure
training_labels_1=getLabelsForSeizure(training_features_1,pre_ictal)
# removing the class label for the feature vector, for one seizure
training_features_2=seizure_3_data[0:-1,:]
# retrieving the testing labels for one seizure
training_labels_2=getLabelsForSeizure(training_features_2,pre_ictal)
# concatenate both testing_features and testing labels
training_features=
|
np.concatenate([training_features_1, training_features_2], axis=1)
|
numpy.concatenate
|
import numpy as np
from diautils import help
from diautils.config import to_conf
from bisect import bisect_left
from scipy.stats import norm
def siglog(v):
return np.sign(v) * np.log(1.0 + np.abs(v))
def sigexp(v):
sv = np.sign(v)
return sv * (np.exp(sv * v) - 1.0)
def z_norm(v, mean=None, std=None):
if len(v.shape) == 1:
if mean is None:
mean = v.mean()
if std is None:
std = v.std()
np.where(std == 0, 1, std)
return (v - mean) / std
else:
if mean is None:
mean = v.mean(axis=-2)
if std is None:
std = v.std(axis=-2)
np.where(std == 0, 1, std)
return (v -
|
np.expand_dims(mean, -2)
|
numpy.expand_dims
|
###############################################################################
# actionAngle: a Python module to calculate actions, angles, and frequencies
#
# class: actionAngleStaeckelGrid
#
# build grid in integrals of motion to quickly evaluate
# actionAngleStaeckel
#
# methods:
# __call__: returns (jr,lz,jz)
#
###############################################################################
import numpy
from scipy import interpolate, optimize, ndimage
from . import actionAngleStaeckel
from .actionAngle import actionAngle
from . import actionAngleStaeckel_c
from .actionAngleStaeckel_c import _ext_loaded as ext_loaded
from .. import potential
from ..potential.Potential import _evaluatePotentials
from ..potential.Potential import flatten as flatten_potential
from ..util import multi, coords, conversion
_PRINTOUTSIDEGRID= False
class actionAngleStaeckelGrid(actionAngle):
"""Action-angle formalism for axisymmetric potentials using Binney (2012)'s Staeckel approximation, grid-based interpolation"""
def __init__(self,pot=None,delta=None,Rmax=5.,
nE=25,npsi=25,nLz=30,numcores=1,
interpecc=False,
**kwargs):
"""
NAME:
__init__
PURPOSE:
initialize an actionAngleStaeckelGrid object
INPUT:
pot= potential or list of potentials
delta= focus of prolate confocal coordinate system (can be Quantity)
Rmax = Rmax for building grids (natural units)
nE=, npsi=, nLz= grid size
interpecc= (False) if True, also interpolate the approximate eccentricity, zmax, rperi, and rapo
numcores= number of cpus to use to parallellize
ro= distance from vantage point to GC (kpc; can be Quantity)
vo= circular velocity at ro (km/s; can be Quantity)
OUTPUT:
instance
HISTORY:
2012-11-29 - Written - Bovy (IAS)
2017-12-15 - Written - Bovy (UofT)
"""
actionAngle.__init__(self,
ro=kwargs.get('ro',None),vo=kwargs.get('vo',None))
if pot is None:
raise IOError("Must specify pot= for actionAngleStaeckelGrid")
self._pot= flatten_potential(pot)
if delta is None:
raise IOError("Must specify delta= for actionAngleStaeckelGrid")
if ext_loaded and 'c' in kwargs and kwargs['c']:
self._c= True
else:
self._c= False
self._delta= conversion.parse_length(delta,ro=self._ro)
self._Rmax= Rmax
self._Rmin= 0.01
#Set up the actionAngleStaeckel object that we will use to interpolate
self._aA= actionAngleStaeckel.actionAngleStaeckel(pot=self._pot,delta=self._delta,c=self._c)
#Build grid
self._Lzmin= 0.01
self._Lzs= numpy.linspace(self._Lzmin,
self._Rmax\
*potential.vcirc(self._pot,self._Rmax),
nLz)
self._Lzmax= self._Lzs[-1]
self._nLz= nLz
#Calculate E_c(R=RL), energy of circular orbit
self._RL= numpy.array([potential.rl(self._pot,l) for l in self._Lzs])
self._RLInterp= interpolate.InterpolatedUnivariateSpline(self._Lzs,
self._RL,k=3)
self._ERL= _evaluatePotentials(self._pot,self._RL,
numpy.zeros(self._nLz))\
+self._Lzs**2./2./self._RL**2.
self._ERLmax= numpy.amax(self._ERL)+1.
self._ERLInterp= interpolate.InterpolatedUnivariateSpline(self._Lzs,
numpy.log(-(self._ERL-self._ERLmax)),k=3)
self._Ramax= 200./8.
self._ERa= _evaluatePotentials(self._pot,self._Ramax,0.) +self._Lzs**2./2./self._Ramax**2.
#self._EEsc= numpy.array([self._ERL[ii]+potential.vesc(self._pot,self._RL[ii])**2./4. for ii in range(nLz)])
self._ERamax= numpy.amax(self._ERa)+1.
self._ERaInterp= interpolate.InterpolatedUnivariateSpline(self._Lzs,
numpy.log(-(self._ERa-self._ERamax)),k=3)
y= numpy.linspace(0.,1.,nE)
self._nE= nE
psis= numpy.linspace(0.,1.,npsi)*numpy.pi/2.
self._npsi= npsi
jr= numpy.zeros((nLz,nE,npsi))
jz= numpy.zeros((nLz,nE,npsi))
u0= numpy.zeros((nLz,nE))
jrLzE= numpy.zeros((nLz))
jzLzE= numpy.zeros((nLz))
#First calculate u0
thisLzs= (numpy.tile(self._Lzs,(nE,1)).T).flatten()
thisERL= (numpy.tile(self._ERL,(nE,1)).T).flatten()
thisERa= (numpy.tile(self._ERa,(nE,1)).T).flatten()
thisy= (numpy.tile(y,(nLz,1))).flatten()
thisE= _invEfunc(_Efunc(thisERa,thisERL)+thisy*(_Efunc(thisERL,thisERL)-_Efunc(thisERa,thisERL)),thisERL)
if isinstance(self._pot,potential.interpRZPotential) and hasattr(self._pot,'_origPot'):
u0pot= self._pot._origPot
else:
u0pot= self._pot
if self._c:
mu0= actionAngleStaeckel_c.actionAngleStaeckel_calcu0(thisE,thisLzs,
u0pot,
self._delta)[0]
else:
if numcores > 1:
mu0= multi.parallel_map((lambda x: self.calcu0(thisE[x],
thisLzs[x])),
range(nE*nLz),
numcores=numcores)
else:
mu0= list(map((lambda x: self.calcu0(thisE[x],
thisLzs[x])),
range(nE*nLz)))
u0= numpy.reshape(mu0,(nLz,nE))
thisR= self._delta*numpy.sinh(u0)
thisv= numpy.reshape(self.vatu0(thisE.flatten(),thisLzs.flatten(),
u0.flatten(),
thisR.flatten()),(nLz,nE))
self.thisv= thisv
#reshape
thisLzs= numpy.reshape(thisLzs,(nLz,nE))
thispsi= numpy.tile(psis,(nLz,nE,1)).flatten()
thisLzs= numpy.tile(thisLzs.T,(npsi,1,1)).T.flatten()
thisR= numpy.tile(thisR.T,(npsi,1,1)).T.flatten()
thisv= numpy.tile(thisv.T,(npsi,1,1)).T.flatten()
mjr, mlz, mjz= self._aA(thisR, #R
thisv*numpy.cos(thispsi), #vR
thisLzs/thisR, #vT
numpy.zeros(len(thisR)), #z
thisv*numpy.sin(thispsi), #vz
fixed_quad=True)
if interpecc:
mecc, mzmax, mrperi, mrap=\
self._aA.EccZmaxRperiRap(thisR, #R
thisv*numpy.cos(thispsi), #vR
thisLzs/thisR, #vT
numpy.zeros(len(thisR)), #z
thisv*numpy.sin(thispsi)) #vz
if isinstance(self._pot,potential.interpRZPotential) and hasattr(self._pot,'_origPot'):
#Interpolated potentials have problems with extreme orbits
indx= (mjr == 9999.99)
indx+= (mjz == 9999.99)
#Re-calculate these using the original potential, hopefully not too slow
tmpaA= actionAngleStaeckel.actionAngleStaeckel(pot=self._pot._origPot,delta=self._delta,c=self._c)
mjr[indx], dum, mjz[indx]= tmpaA(thisR[indx], #R
thisv[indx]*numpy.cos(thispsi[indx]), #vR
thisLzs[indx]/thisR[indx], #vT
numpy.zeros(numpy.sum(indx)), #z
thisv[indx]*numpy.sin(thispsi[indx]), #vz
fixed_quad=True)
if interpecc:
mecc[indx], mzmax[indx], mrperi[indx], mrap[indx]=\
self._aA.EccZmaxRperiRap(thisR[indx], #R
thisv[indx]*numpy.cos(thispsi[indx]), #vR
thisLzs[indx]/thisR[indx], #vT
numpy.zeros(numpy.sum(indx)), #z
thisv[indx]*numpy.sin(thispsi[indx])) #vz
jr= numpy.reshape(mjr,(nLz,nE,npsi))
jz= numpy.reshape(mjz,(nLz,nE,npsi))
if interpecc:
ecc= numpy.reshape(mecc,(nLz,nE,npsi))
zmax= numpy.reshape(mzmax,(nLz,nE,npsi))
rperi= numpy.reshape(mrperi,(nLz,nE,npsi))
rap= numpy.reshape(mrap,(nLz,nE,npsi))
zmaxLzE= numpy.zeros((nLz))
rperiLzE= numpy.zeros((nLz))
rapLzE= numpy.zeros((nLz))
for ii in range(nLz):
jrLzE[ii]= numpy.nanmax(jr[ii,(jr[ii,:,:] != 9999.99)])#:,:])
jzLzE[ii]= numpy.nanmax(jz[ii,(jz[ii,:,:] != 9999.99)])#:,:])
if interpecc:
zmaxLzE[ii]= numpy.amax(zmax[ii,numpy.isfinite(zmax[ii])])
rperiLzE[ii]= numpy.amax(rperi[ii,numpy.isfinite(rperi[ii])])
rapLzE[ii]= numpy.amax(rap[ii,numpy.isfinite(rap[ii])])
jrLzE[(jrLzE == 0.)]= numpy.nanmin(jrLzE[(jrLzE > 0.)])
jzLzE[(jzLzE == 0.)]= numpy.nanmin(jzLzE[(jzLzE > 0.)])
if interpecc:
zmaxLzE[(zmaxLzE == 0.)]= numpy.nanmin(zmaxLzE[(zmaxLzE > 0.)])
rperiLzE[(rperiLzE == 0.)]= numpy.nanmin(rperiLzE[(rperiLzE > 0.)])
rapLzE[(rapLzE == 0.)]= numpy.nanmin(rapLzE[(rapLzE > 0.)])
for ii in range(nLz):
jr[ii,:,:]/= jrLzE[ii]
jz[ii,:,:]/= jzLzE[ii]
if interpecc:
zmax[ii,:,:]/= zmaxLzE[ii]
rperi[ii,:,:]/= rperiLzE[ii]
rap[ii,:,:]/= rapLzE[ii]
#Deal w/ 9999.99
jr[(jr > 1.)]= 1.
jz[(jz > 1.)]= 1.
#Deal w/ NaN
jr[numpy.isnan(jr)]= 0.
jz[numpy.isnan(jz)]= 0.
if interpecc:
ecc[(ecc < 0.)]= 0.
ecc[(ecc > 1.)]= 1.
ecc[numpy.isnan(ecc)]= 0.
ecc[numpy.isinf(ecc)]= 1.
zmax[(zmax > 1.)]= 1.
zmax[numpy.isnan(zmax)]= 0.
zmax[numpy.isinf(zmax)]= 1.
rperi[(rperi > 1.)]= 1.
rperi[numpy.isnan(rperi)]= 0.
rperi[numpy.isinf(rperi)]= 0. # typically orbits that can reach 0
rap[(rap > 1.)]= 1.
rap[numpy.isnan(rap)]= 0.
rap[numpy.isinf(rap)]= 1.
#First interpolate the maxima
self._jr= jr
self._jz= jz
self._u0= u0
self._jrLzE= jrLzE
self._jzLzE= jzLzE
self._jrLzInterp= interpolate.InterpolatedUnivariateSpline(self._Lzs,
numpy.log(jrLzE+10.**-5.),k=3)
self._jzLzInterp= interpolate.InterpolatedUnivariateSpline(self._Lzs,
numpy.log(jzLzE+10.**-5.),k=3)
if interpecc:
self._ecc= ecc
self._zmax= zmax
self._rperi= rperi
self._rap= rap
self._zmaxLzE= zmaxLzE
self._rperiLzE= rperiLzE
self._rapLzE= rapLzE
self._zmaxLzInterp=\
interpolate.InterpolatedUnivariateSpline(self._Lzs,
numpy.log(zmaxLzE+10.**-5.),k=3)
self._rperiLzInterp=\
interpolate.InterpolatedUnivariateSpline(self._Lzs,
numpy.log(rperiLzE+10.**-5.),k=3)
self._rapLzInterp=\
interpolate.InterpolatedUnivariateSpline(self._Lzs,
numpy.log(rapLzE+10.**-5.),k=3)
#Interpolate u0
self._logu0Interp= interpolate.RectBivariateSpline(self._Lzs,
y,
numpy.log(u0),
kx=3,ky=3,s=0.)
#spline filter jr and jz, such that they can be used with ndimage.map_coordinates
self._jrFiltered= ndimage.spline_filter(numpy.log(self._jr+10.**-10.),order=3)
self._jzFiltered= ndimage.spline_filter(numpy.log(self._jz+10.**-10.),order=3)
if interpecc:
self._eccFiltered= ndimage.spline_filter(numpy.log(self._ecc+10.**-10.),order=3)
self._zmaxFiltered= ndimage.spline_filter(numpy.log(self._zmax+10.**-10.),order=3)
self._rperiFiltered= ndimage.spline_filter(numpy.log(self._rperi+10.**-10.),order=3)
self._rapFiltered= ndimage.spline_filter(numpy.log(self._rap+10.**-10.),order=3)
# Check the units
self._check_consistent_units()
return None
def _evaluate(self,*args,**kwargs):
"""
NAME:
__call__ (_evaluate)
PURPOSE:
evaluate the actions (jr,lz,jz)
INPUT:
Either:
a) R,vR,vT,z,vz[,phi]:
1) floats: phase-space value for single object (phi is optional) (each can be a Quantity)
2) numpy.ndarray: [N] phase-space values for N objects (each can be a Quantity)
b) Orbit instance: initial condition used if that's it, orbit(t) if there is a time given as well as the second argument
Keywords for actionAngleStaeckel.__call__ for off-the-grid evaluations
OUTPUT:
(jr,lz,jz)
HISTORY:
2012-11-29 - Written - Bovy (IAS)
"""
if len(args) == 5: #R,vR.vT, z, vz
R,vR,vT, z, vz= args
elif len(args) == 6: #R,vR.vT, z, vz, phi
R,vR,vT, z, vz, phi= args
else:
self._parse_eval_args(*args)
R= self._eval_R
vR= self._eval_vR
vT= self._eval_vT
z= self._eval_z
vz= self._eval_vz
Lz= R*vT
Phi= _evaluatePotentials(self._pot,R,z)
E= Phi+vR**2./2.+vT**2./2.+vz**2./2.
thisERL= -numpy.exp(self._ERLInterp(Lz))+self._ERLmax
thisERa= -numpy.exp(self._ERaInterp(Lz))+self._ERamax
if isinstance(R,numpy.ndarray):
indx= ((E-thisERa)/(thisERL-thisERa) > 1.)\
*(((E-thisERa)/(thisERL-thisERa)-1.) < 10.**-2.)
E[indx]= thisERL[indx]
indx= ((E-thisERa)/(thisERL-thisERa) < 0.)\
*((E-thisERa)/(thisERL-thisERa) > -10.**-2.)
E[indx]= thisERa[indx]
indx= (Lz < self._Lzmin)
indx+= (Lz > self._Lzmax)
indx+= ((E-thisERa)/(thisERL-thisERa) > 1.)
indx+= ((E-thisERa)/(thisERL-thisERa) < 0.)
indxc= True^indx
jr= numpy.empty(R.shape)
jz= numpy.empty(R.shape)
if numpy.sum(indxc) > 0:
u0= numpy.exp(self._logu0Interp.ev(Lz[indxc],
(_Efunc(E[indxc],thisERL[indxc])-_Efunc(thisERa[indxc],thisERL[indxc]))/(_Efunc(thisERL[indxc],thisERL[indxc])-_Efunc(thisERa[indxc],thisERL[indxc]))))
sinh2u0= numpy.sinh(u0)**2.
thisEr= self.Er(R[indxc],z[indxc],vR[indxc],vz[indxc],
E[indxc],Lz[indxc],sinh2u0,u0)
thisEz= self.Ez(R[indxc],z[indxc],vR[indxc],vz[indxc],
E[indxc],Lz[indxc],sinh2u0,u0)
thisv2= self.vatu0(E[indxc],Lz[indxc],u0,self._delta*numpy.sinh(u0),retv2=True)
cos2psi= 2.*thisEr/thisv2/(1.+sinh2u0) #latter is cosh2u0
cos2psi[(cos2psi > 1.)*(cos2psi < 1.+10.**-5.)]= 1.
indxCos2psi= (cos2psi > 1.)
indxCos2psi+= (cos2psi < 0.)
indxc[indxc]= True^indxCos2psi#Handle these two cases as off-grid
indx= True^indxc
psi= numpy.arccos(numpy.sqrt(cos2psi[True^indxCos2psi]))
coords= numpy.empty((3,numpy.sum(indxc)))
coords[0,:]= (Lz[indxc]-self._Lzmin)/(self._Lzmax-self._Lzmin)*(self._nLz-1.)
y= (_Efunc(E[indxc],thisERL[indxc])-_Efunc(thisERa[indxc],thisERL[indxc]))/(_Efunc(thisERL[indxc],thisERL[indxc])-_Efunc(thisERa[indxc],thisERL[indxc]))
coords[1,:]= y*(self._nE-1.)
coords[2,:]= psi/numpy.pi*2.*(self._npsi-1.)
jr[indxc]= (numpy.exp(ndimage.interpolation.map_coordinates(self._jrFiltered,
coords,
order=3,
prefilter=False))-10.**-10.)*(numpy.exp(self._jrLzInterp(Lz[indxc]))-10.**-5.)
#Switch to Ez-calculated psi
sin2psi= 2.*thisEz[True^indxCos2psi]/thisv2[True^indxCos2psi]/(1.+sinh2u0[True^indxCos2psi]) #latter is cosh2u0
sin2psi[(sin2psi > 1.)*(sin2psi < 1.+10.**-5.)]= 1.
indxSin2psi= (sin2psi > 1.)
indxSin2psi+= (sin2psi < 0.)
indxc[indxc]= True^indxSin2psi#Handle these two cases as off-grid
indx= True^indxc
psiz= numpy.arcsin(numpy.sqrt(sin2psi[True^indxSin2psi]))
newcoords= numpy.empty((3,numpy.sum(indxc)))
newcoords[0:2,:]= coords[0:2,True^indxSin2psi]
newcoords[2,:]= psiz/numpy.pi*2.*(self._npsi-1.)
jz[indxc]= (numpy.exp(ndimage.interpolation.map_coordinates(self._jzFiltered,
newcoords,
order=3,
prefilter=False))-10.**-10.)*(numpy.exp(self._jzLzInterp(Lz[indxc]))-10.**-5.)
if numpy.sum(indx) > 0:
jrindiv, lzindiv, jzindiv= self._aA(R[indx],
vR[indx],
vT[indx],
z[indx],
vz[indx],
**kwargs)
jr[indx]= jrindiv
jz[indx]= jzindiv
"""
jrindiv= numpy.empty(numpy.sum(indx))
jzindiv= numpy.empty(numpy.sum(indx))
for ii in range(numpy.sum(indx)):
try:
thisaA= actionAngleStaeckel.actionAngleStaeckelSingle(\
R[indx][ii], #R
vR[indx][ii], #vR
vT[indx][ii], #vT
z[indx][ii], #z
vz[indx][ii], #vz
pot=self._pot,delta=self._delta)
jrindiv[ii]= thisaA.JR(fixed_quad=True)[0]
jzindiv[ii]= thisaA.Jz(fixed_quad=True)[0]
except (UnboundError,OverflowError):
jrindiv[ii]= numpy.nan
jzindiv[ii]= numpy.nan
jr[indx]= jrindiv
jz[indx]= jzindiv
"""
else:
jr,Lz, jz= self(numpy.array([R]),
numpy.array([vR]),
numpy.array([vT]),
numpy.array([z]),
numpy.array([vz]),
**kwargs)
return (jr[0],Lz[0],jz[0])
jr[jr < 0.]= 0.
jz[jz < 0.]= 0.
return (jr,R*vT,jz)
def Jz(self,*args,**kwargs):
"""
NAME:
Jz
PURPOSE:
evaluate the action jz
INPUT:
Either:
a) R,vR,vT,z,vz
b) Orbit instance: initial condition used if that's it, orbit(t)
if there is a time given as well
scipy.integrate.quadrature keywords
OUTPUT:
jz
HISTORY:
2012-07-30 - Written - Bovy (IAS@MPIA)
"""
return self(*args,**kwargs)[2]
def JR(self,*args,**kwargs):
"""
NAME:
JR
PURPOSE:
evaluate the action jr
INPUT:
Either:
a) R,vR,vT,z,vz
b) Orbit instance: initial condition used if that's it, orbit(t)
if there is a time given as well
scipy.integrate.quadrature keywords
OUTPUT:
jr
HISTORY:
2012-07-30 - Written - Bovy (IAS@MPIA)
"""
return self(*args,**kwargs)[0]
def _EccZmaxRperiRap(self,*args,**kwargs):
"""
NAME:
EccZmaxRperiRap (_EccZmaxRperiRap)
PURPOSE:
evaluate the eccentricity, maximum height above the plane, peri- and apocenter in the Staeckel approximation
INPUT:
Either:
a) R,vR,vT,z,vz[,phi]:
1) floats: phase-space value for single object (phi is optional) (each can be a Quantity)
2) numpy.ndarray: [N] phase-space values for N objects (each can be a Quantity)
b) Orbit instance: initial condition used if that's it, orbit(t) if there is a time given as well as the second argument
OUTPUT:
(e,zmax,rperi,rap)
HISTORY:
2017-12-15 - Written - Bovy (UofT)
"""
if len(args) == 5: #R,vR.vT, z, vz
R,vR,vT, z, vz= args
elif len(args) == 6: #R,vR.vT, z, vz, phi
R,vR,vT, z, vz, phi= args
else:
self._parse_eval_args(*args)
R= self._eval_R
vR= self._eval_vR
vT= self._eval_vT
z= self._eval_z
vz= self._eval_vz
Lz= R*vT
Phi= _evaluatePotentials(self._pot,R,z)
E= Phi+vR**2./2.+vT**2./2.+vz**2./2.
thisERL= -numpy.exp(self._ERLInterp(Lz))+self._ERLmax
thisERa= -numpy.exp(self._ERaInterp(Lz))+self._ERamax
if isinstance(R,numpy.ndarray):
indx= ((E-thisERa)/(thisERL-thisERa) > 1.)\
*(((E-thisERa)/(thisERL-thisERa)-1.) < 10.**-2.)
E[indx]= thisERL[indx]
indx= ((E-thisERa)/(thisERL-thisERa) < 0.)\
*((E-thisERa)/(thisERL-thisERa) > -10.**-2.)
E[indx]= thisERa[indx]
indx= (Lz < self._Lzmin)
indx+= (Lz > self._Lzmax)
indx+= ((E-thisERa)/(thisERL-thisERa) > 1.)
indx+= ((E-thisERa)/(thisERL-thisERa) < 0.)
indxc= True^indx
ecc= numpy.empty(R.shape)
zmax= numpy.empty(R.shape)
rperi= numpy.empty(R.shape)
rap= numpy.empty(R.shape)
if numpy.sum(indxc) > 0:
u0= numpy.exp(self._logu0Interp.ev(Lz[indxc],
(_Efunc(E[indxc],thisERL[indxc])-_Efunc(thisERa[indxc],thisERL[indxc]))/(_Efunc(thisERL[indxc],thisERL[indxc])-_Efunc(thisERa[indxc],thisERL[indxc]))))
sinh2u0= numpy.sinh(u0)**2.
thisEr= self.Er(R[indxc],z[indxc],vR[indxc],vz[indxc],
E[indxc],Lz[indxc],sinh2u0,u0)
thisEz= self.Ez(R[indxc],z[indxc],vR[indxc],vz[indxc],
E[indxc],Lz[indxc],sinh2u0,u0)
thisv2= self.vatu0(E[indxc],Lz[indxc],u0,self._delta*numpy.sinh(u0),retv2=True)
cos2psi= 2.*thisEr/thisv2/(1.+sinh2u0) #latter is cosh2u0
cos2psi[(cos2psi > 1.)*(cos2psi < 1.+10.**-5.)]= 1.
indxCos2psi= (cos2psi > 1.)
indxCos2psi+= (cos2psi < 0.)
indxc[indxc]= True^indxCos2psi#Handle these two cases as off-grid
indx= True^indxc
psi= numpy.arccos(numpy.sqrt(cos2psi[True^indxCos2psi]))
coords= numpy.empty((3,numpy.sum(indxc)))
coords[0,:]= (Lz[indxc]-self._Lzmin)/(self._Lzmax-self._Lzmin)*(self._nLz-1.)
y= (_Efunc(E[indxc],thisERL[indxc])-_Efunc(thisERa[indxc],thisERL[indxc]))/(_Efunc(thisERL[indxc],thisERL[indxc])-_Efunc(thisERa[indxc],thisERL[indxc]))
coords[1,:]= y*(self._nE-1.)
coords[2,:]= psi/numpy.pi*2.*(self._npsi-1.)
ecc[indxc]= (numpy.exp(ndimage.interpolation.map_coordinates(self._eccFiltered,
coords,
order=3,
prefilter=False))-10.**-10.)
rperi[indxc]= (numpy.exp(ndimage.interpolation.map_coordinates(self._rperiFiltered,
coords,
order=3,
prefilter=False))-10.**-10.)*(numpy.exp(self._rperiLzInterp(Lz[indxc]))-10.**-5.)
# We do rap below with zmax
#Switch to Ez-calculated psi
sin2psi= 2.*thisEz[True^indxCos2psi]/thisv2[True^indxCos2psi]/(1.+sinh2u0[True^indxCos2psi]) #latter is cosh2u0
sin2psi[(sin2psi > 1.)*(sin2psi < 1.+10.**-5.)]= 1.
indxSin2psi= (sin2psi > 1.)
indxSin2psi+= (sin2psi < 0.)
indxc[indxc]= True^indxSin2psi#Handle these two cases as off-grid
indx= True^indxc
psiz= numpy.arcsin(numpy.sqrt(sin2psi[True^indxSin2psi]))
newcoords= numpy.empty((3,numpy.sum(indxc)))
newcoords[0:2,:]= coords[0:2,True^indxSin2psi]
newcoords[2,:]= psiz/numpy.pi*2.*(self._npsi-1.)
zmax[indxc]= (numpy.exp(ndimage.interpolation.map_coordinates(self._zmaxFiltered,
newcoords,
order=3,
prefilter=False))-10.**-10.)*(numpy.exp(self._zmaxLzInterp(Lz[indxc]))-10.**-5.)
rap[indxc]= (numpy.exp(ndimage.interpolation.map_coordinates(self._rapFiltered,
newcoords,
order=3,
prefilter=False))-10.**-10.)*(numpy.exp(self._rapLzInterp(Lz[indxc]))-10.**-5.)
if numpy.sum(indx) > 0:
eccindiv, zmaxindiv, rperiindiv, rapindiv=\
self._aA.EccZmaxRperiRap(R[indx],
vR[indx],
vT[indx],
z[indx],
vz[indx],
**kwargs)
ecc[indx]= eccindiv
zmax[indx]= zmaxindiv
rperi[indx]= rperiindiv
rap[indx]= rapindiv
else:
ecc,zmax,rperi,rap= self.EccZmaxRperiRap(numpy.array([R]),
numpy.array([vR]),
numpy.array([vT]),
numpy.array([z]),
numpy.array([vz]),
**kwargs)
return (ecc[0],zmax[0],rperi[0],rap[0])
ecc[ecc < 0.]= 0.
zmax[zmax < 0.]= 0.
rperi[rperi < 0.]= 0.
rap[rap < 0.]= 0.
return (ecc,zmax,rperi,rap)
def vatu0(self,E,Lz,u0,R,retv2=False):
"""
NAME:
vatu0
PURPOSE:
calculate the velocity at u0
INPUT:
E - energy
Lz - angular momentum
u0 - u0
R - radius corresponding to u0,pi/2.
retv2= (False), if True return v^2
OUTPUT:
velocity
HISTORY:
2012-11-29 - Written - Bovy (IAS)
"""
v2= (2.*(E-actionAngleStaeckel.potentialStaeckel(u0,numpy.pi/2.,
self._pot,
self._delta))
-Lz**2./R**2.)
if retv2: return v2
v2[(v2 < 0.)*(v2 > -10.**-7.)]= 0.
return numpy.sqrt(v2)
def calcu0(self,E,Lz):
"""
NAME:
calcu0
PURPOSE:
calculate the minimum of the u potential
INPUT:
E - energy
Lz - angular momentum
OUTPUT:
u0
HISTORY:
2012-11-29 - Written - Bovy (IAS)
"""
logu0= optimize.brent(_u0Eq,
args=(self._delta,self._pot,
E,Lz**2./2.))
return numpy.exp(logu0)
def Er(self,R,z,vR,vz,E,Lz,sinh2u0,u0):
"""
NAME:
Er
PURPOSE:
calculate the 'radial energy'
INPUT:
R, z, vR, vz - coordinates
E - energy
Lz - angular momentum
sinh2u0, u0 - sinh^2 and u0
OUTPUT:
Er
HISTORY:
2012-11-29 - Written - Bovy (IAS)
"""
u,v= coords.Rz_to_uv(R,z,self._delta)
pu= (vR*numpy.cosh(u)*numpy.sin(v)
+vz*numpy.sinh(u)*numpy.cos(v)) #no delta, bc we will divide it out
out= (pu**2./2.+Lz**2./2./self._delta**2.*(1./numpy.sinh(u)**2.-1./sinh2u0)
-E*(numpy.sinh(u)**2.-sinh2u0)
+(numpy.sinh(u)**2.+1.)*actionAngleStaeckel.potentialStaeckel(u,numpy.pi/2.,self._pot,self._delta)
-(sinh2u0+1.)*actionAngleStaeckel.potentialStaeckel(u0,numpy.pi/2.,self._pot,self._delta))
# +(numpy.sinh(u)**2.+numpy.sin(v)**2.)*actionAngleStaeckel.potentialStaeckel(u,v,self._pot,self._delta)
# -(sinh2u0+numpy.sin(v)**2.)*actionAngleStaeckel.potentialStaeckel(u0,v,self._pot,self._delta))
return out
def Ez(self,R,z,vR,vz,E,Lz,sinh2u0,u0):
"""
NAME:
Ez
PURPOSE:
calculate the 'vertical energy'
INPUT:
R, z, vR, vz - coordinates
E - energy
Lz - angular momentum
sinh2u0, u0 - sinh^2 and u0
OUTPUT:
Ez
HISTORY:
2012-12-23 - Written - Bovy (IAS)
"""
u,v= coords.Rz_to_uv(R,z,self._delta)
pv= (vR*numpy.sinh(u)*numpy.cos(v)
-vz*numpy.cosh(u)*numpy.sin(v)) #no delta, bc we will divide it out
out= (pv**2./2.+Lz**2./2./self._delta**2.*(1./numpy.sin(v)**2.-1.)
-E*(numpy.sin(v)**2.-1.)
-(sinh2u0+1.)*actionAngleStaeckel.potentialStaeckel(u0,numpy.pi/2.,self._pot,self._delta)
+(sinh2u0+numpy.sin(v)**2.)*actionAngleStaeckel.potentialStaeckel(u0,v,self._pot,self._delta))
return out
def _u0Eq(logu,delta,pot,E,Lz22):
"""The equation that needs to be minimized to find u0"""
u= numpy.exp(logu)
sinh2u= numpy.sinh(u)**2.
cosh2u= numpy.cosh(u)**2.
dU= cosh2u*actionAngleStaeckel.potentialStaeckel(u,numpy.pi/2.,pot,delta)
return -(E*sinh2u-dU-Lz22/delta**2./sinh2u)
def _Efunc(E,*args):
"""Function to apply to the energy in building the grid (e.g., if this is a log, then the grid will be logarithmic"""
# return ((E-args[0]))**0.5
return numpy.log((E-args[0]+10.**-10.))
def _invEfunc(Ef,*args):
"""Inverse of Efunc"""
# return Ef**2.+args[0]
return
|
numpy.exp(Ef)
|
numpy.exp
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.